From f37add86b3c2d54f3b1a62609f70a98e508a8e21 Mon Sep 17 00:00:00 2001 From: Martin Krasser Date: Tue, 28 Jan 2025 07:07:57 +0100 Subject: [PATCH] Support for Gemini 2 Flash Thinking (#35) - plus documentation enhancements --- README.md | 4 +- docs/cli.md | 37 +++++-- docs/evaluation.md | 2 +- docs/index.md | 10 +- docs/integration.md | 10 +- docs/models.md | 96 ++++++++++++++++--- docs/tutorials/basics.md | 2 +- freeact/cli/__main__.py | 10 +- freeact/model/gemini/model/chat.py | 63 +++++++++--- freeact/model/gemini/model/live.py | 2 +- freeact/model/gemini/prompt/__init__.py | 0 .../gemini/{prompt.py => prompt/default.py} | 0 freeact/model/gemini/prompt/thinking.py | 42 ++++++++ mkdocs.yml | 4 +- poetry.lock | 8 +- pyproject.toml | 2 +- 16 files changed, 235 insertions(+), 57 deletions(-) create mode 100644 freeact/model/gemini/prompt/__init__.py rename freeact/model/gemini/{prompt.py => prompt/default.py} (100%) create mode 100644 freeact/model/gemini/prompt/thinking.py diff --git a/README.md b/README.md index 2308181..f217b17 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ https://github.com/user-attachments/assets/83cec179-54dc-456c-b647-ea98ec99600b ## Evaluation -We [evaluated](evaluation) `freeact` using five state-of-the-art models: +We [evaluated](evaluation) `freeact` with these models: - Claude 3.5 Sonnet (`claude-3-5-sonnet-20241022`) - Claude 3.5 Haiku (`claude-3-5-haiku-20241022`) @@ -114,4 +114,4 @@ Interestingly, these results were achieved using zero-shot prompting in `freeact ## Supported models -In addition to the models we [evaluated](#evaluation), `freeact` also supports the [integration](https://gradion-ai.github.io/freeact/integration/) of new models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python), including open models deployed locally with [ollama](https://ollama.com/) or [TGI](https://huggingface.co/docs/text-generation-inference/index), for example. +In addition to all [supported models](https://gradion-ai.github.io/freeact/models/), `freeact` also supports the [integration](https://gradion-ai.github.io/freeact/integration/) of new models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python), including open models deployed locally with [ollama](https://ollama.com/) or [TGI](https://huggingface.co/docs/text-generation-inference/index), for example. diff --git a/docs/cli.md b/docs/cli.md index 6f74082..96eb3d8 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,12 +1,14 @@ -# Command-line interface +# Command line interface -`freeact` provides a minimalistic command-line interface (CLI) for running agents. It is currently intended for demonstration purposes only. [Install `freeact`](installation.md) and run the following command to see all available options: +`freeact` provides a minimalistic command line interface (CLI) for running agents. It is currently intended for demonstration purposes only. [Install `freeact`](installation.md) and run the following command to see all available options: ```bash python -m freeact.cli --help ``` -or check [quickstart](quickstart.md) and [tutorials](tutorials/index.md) for usage examples. +!!! Tip + + Check [quickstart](quickstart.md), [tutorials](tutorials/index.md) or [supported models](models.md) for usage examples. ## Multiline input @@ -21,7 +23,22 @@ To submit a multiline message, simply press `Enter`. ## Environment variables -The CLI reads environment variables from a `.env` file in the current directory and passes them to the [execution environment](environment.md#execution-environment). API keys required for an agent's code action model must be either defined in the `.env` file, passed as command-line arguments, or directly set as variables in the shell. +Environment variables may be required for two purposes: + +1. Running skill modules in the [execution environment](environment.md#execution-environment), including: + - [Predefined skills](https://gradion-ai.github.io/freeact-skills/) + - [Custom skills](tutorials/skills.md) +2. Running code action models by `freeact` agents + +There are three ways to provide these environment variables: + +- For skill modules (purpose 1): Variables must be defined in a `.env` file in your current working directory +- For code action models (purpose 2): Variables can be provided through: + - A `.env` file in the current working directory + - Command-line arguments + - Shell environment variables + +This is shown by example in the following two subsections. ### Example 1 @@ -29,10 +46,10 @@ The [quickstart](quickstart.md) example requires `ANTHROPIC_API_KEY` and `GOOGLE ```env title=".env" # Required for Claude 3.5 Sonnet -ANTHROPIC_API_KEY=your-anthropic-api-key +ANTHROPIC_API_KEY=... # Required for generative Google Search via Gemini 2 -GOOGLE_API_KEY=your-google-api-key +GOOGLE_API_KEY=... ``` the following command will launch an agent with `claude-3-5-sonnet-20241022` as code action model configured with a generative Google search skill implemented by module `freeact_skills.search.google.stream.api`: @@ -49,7 +66,7 @@ The API key can alternatively be passed as command-line argument: ```bash python -m freeact.cli \ --model-name=claude-3-5-sonnet-20241022 \ - --api-key=your-anthropic-api-key \ + --api-key=$ANTHROPIC_API_KEY \ --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ --skill-modules=freeact_skills.search.google.stream.api ``` @@ -61,10 +78,10 @@ To use models from other providers, such as [accounts/fireworks/models/deepseek- ```env title=".env" # Required for DeepSeek V3 hosted by Fireworks DEEPSEEK_BASE_URL=https://api.fireworks.ai/inference/v1 -DEEPSEEK_API_KEY=your-deepseek-api-key +DEEPSEEK_API_KEY=... # Required for generative Google Search via Gemini 2 -GOOGLE_API_KEY=your-google-api-key +GOOGLE_API_KEY=... ``` and launch the agent with @@ -82,7 +99,7 @@ or pass the base URL and API key directly as command-line arguments: python -m freeact.cli \ --model-name=accounts/fireworks/models/deepseek-v3 \ --base-url=https://api.fireworks.ai/inference/v1 \ - --api-key=your-deepseek-api-key \ + --api-key=$DEEPSEEK_API_KEY \ --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ --skill-modules=freeact_skills.search.google.stream.api ``` diff --git a/docs/evaluation.md b/docs/evaluation.md index c310838..24c5596 100644 --- a/docs/evaluation.md +++ b/docs/evaluation.md @@ -1,6 +1,6 @@ # Evaluation results -We [evaluated](https://github.com/gradion-ai/freeact/tree/main/evaluation) `freeact` using four state-of-the-art models: +We [evaluated](https://github.com/gradion-ai/freeact/tree/main/evaluation) `freeact` with these models: - Claude 3.5 Sonnet (`claude-3-5-sonnet-20241022`) - Claude 3.5 Haiku (`claude-3-5-haiku-20241022`) diff --git a/docs/index.md b/docs/index.md index a78dfa5..e190102 100644 --- a/docs/index.md +++ b/docs/index.md @@ -19,15 +19,15 @@ The library's architecture emphasizes extensibility and transparency, avoiding t ## Next steps - [Quickstart](quickstart.md) - Launch your first `freeact` agent and interact with it on the command line -- [Installation](installation.md) - Installation instructions and configuration of execution environments - [Building blocks](blocks.md) - Learn about the essential components of a `freeact` agent system -- [Tutorials](tutorials/index.md) - Tutorials demonstrating the `freeact` building blocks +- [Tutorials](tutorials/index.md) - Tutorials demonstrating the usage of `freeact` building blocks +- [Command line](cli.md) - Guide to using `freeact` agents from the command line +- [Supported models](models.md) - Overview of models [evaluated](evaluation.md) with `freeact` ## Further reading -- [Command line interface](cli.md) - Guide to using `freeact` agents on the command line -- [Supported models](models.md) - Overview of models [evaluated](evaluation.md) with `freeact` -- [Model integration](integration.md) - Guidelines for integrating new models into `freeact` +- [Model integration](integration.md) - Guide for integrating new models into `freeact` +- [Execution environment](environment.md) - Overview of prebuilt and custom execution environments - [Streaming protocol](streaming.md) - Specification for streaming model responses and execution results ## Status diff --git a/docs/integration.md b/docs/integration.md index 408de1f..21e25e9 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -11,7 +11,7 @@ The low-level API is not further described here. For implementation examples, se ### High-level API -The high-level API supports usage of models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python). To use a model, you need to provide prompt templates that guide it to generate code actions. You can either reuse existing templates or create your own. Then, you can either create an instance of `GenericModel` or subclass it. +The high-level API supports usage of models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python). To use a model, you need to provide prompt templates that guide it to generate code actions. You can either reuse existing templates or create your own. The following subsections demonstrate this using Qwen 2.5 Coder 32B Instruct as an example, showing how to use it both via the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index) and locally with [ollama](https://ollama.com/). @@ -23,10 +23,6 @@ Start with model-specific prompt templates that guide Qwen 2.5 Coder Instruct mo --8<-- "freeact/model/qwen/prompt.py" ``` -!!! Note - - These prompt templates are still experimental. They work reasonably well for larger Qwen 2.5 Coder models, but need optimization for smaller ones. - !!! Tip While tested with Qwen 2.5 Coder Instruct, these prompt templates can also serve as a good starting point for other models (as we did for DeepSeek V3, for example). @@ -54,7 +50,7 @@ Here's a Python example that uses `QwenCoder` as code action model in a `freeact Run it with: ```bash -HF_TOKEN= python -m freeact.examples.qwen +HF_TOKEN=... python -m freeact.examples.qwen ``` Alternatively, use the `freeact` [CLI](cli.md) directly: @@ -63,7 +59,7 @@ Alternatively, use the `freeact` [CLI](cli.md) directly: python -m freeact.cli \ --model-name=Qwen/Qwen2.5-Coder-32B-Instruct \ --base-url=https://api-inference.huggingface.co/v1/ \ - --api-key= \ + --api-key=$HF_TOKEN \ --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ --skill-modules=freeact_skills.search.google.stream.api ``` diff --git a/docs/models.md b/docs/models.md index b707325..5dc2784 100644 --- a/docs/models.md +++ b/docs/models.md @@ -1,19 +1,93 @@ # Supported models -The following models have been [evaluated](evaluation.md) with `freeact`: +For the following models, `freeact` provides model-specific prompt templates. -- Claude 3.5 Sonnet (20241022) -- Claude 3.5 Haiku (20241022) -- Gemini 2.0 Flash (experimental) -- Qwen 2.5 Coder 32B Instruct -- DeepSeek V3 +| Model | Release | [Evaluation](evaluation.md) | Prompt | +|-----------------------------|------------|-----------|--------------| +| Claude 3.5 Sonnet | 2024-10-22 | ✓ | optimized | +| Claude 3.5 Haiku | 2024-10-22 | ✓ | optimized | +| Gemini 2.0 Flash | 2024-12-11 | ✓ | experimental | +| Gemini 2.0 Flash Thinking | 2025-01-21 | ✗ | experimental | +| Qwen 2.5 Coder 32B Instruct | | ✓ | experimental | +| DeepSeek V3 | | ✓ | experimental | -For these models, `freeact` provides model-specific prompt templates. +!!! Info -!!! Note + `freeact` additionally supports the [integration](integration.md) of new models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python), including open models deployed locally with [ollama](https://ollama.com/) or [TGI](https://huggingface.co/docs/text-generation-inference/index), for example. - In addition to the models we evaluated, `freeact` also supports the [integration](integration.md) of new models from any provider that is compatible with the [OpenAI Python SDK](https://github.com/openai/openai-python), including open models deployed locally with [ollama](https://ollama.com/) or [TGI](https://huggingface.co/docs/text-generation-inference/index), for example. +## Command line -!!! Tip +This section demonstrates how you can launch `freeact` agents with these models from the [command line](cli.md). All agents use the [predefined](https://gradion-ai.github.io/freeact-skills/) `freeact_skills.search.google.stream.api` skill module for generative Google search. The required [Gemini](https://aistudio.google.com/app/apikey) API key for that skill must be defined in a `.env` file in the current working directory: - For best performance, we recommend Claude 3.5 Sonnet, with DeepSeek V3 as a close second. Support for Gemini 2.0 Flash, Qwen 2.5 Coder, and DeepSeek V3 remains experimental as we continue to optimize their prompt templates. +```env title=".env" +# Required for `freeact_skills.search.google.stream.api` +GOOGLE_API_KEY=... +``` + +API keys and base URLs for code action models are provided as `--api-key` and `--base-url` arguments, respectively. Code actions are executed in a Docker container created from the [prebuilt](environment.md#prebuilt-docker-images) `ghcr.io/gradion-ai/ipybox:basic` image, passed as `--ipybox-tag` argument. + +!!! Info + + The [CLI documentation](cli.md) covers more details how environment variables can be passed to `freeact` agent systems. + +### Claude 3.5 Sonnet + +```bash +python -m freeact.cli \ + --model-name=claude-3-5-sonnet-20241022 \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api \ + --api-key=$ANTHROPIC_API_KEY +``` + +### Claude 3.5 Haiku + +```bash +python -m freeact.cli \ + --model-name=claude-3-5-haiku-20241022 \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api \ + --api-key=$ANTHROPIC_API_KEY +``` + +### Gemini 2.0 Flash + +```bash +python -m freeact.cli \ + --model-name=gemini-2.0-flash-exp \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api + --api-key=$GOOGLE_API_KEY +``` + +### Gemini 2.0 Flash Thinking + +```bash +python -m freeact.cli \ + --model-name=gemini-2.0-flash-thinking-exp-01-21 \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api + --api-key=$GOOGLE_API_KEY +``` + +### Qwen 2.5 Coder 32B Instruct + +```bash +python -m freeact.cli \ + --model-name=Qwen/Qwen2.5-Coder-32B-Instruct \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api \ + --base-url=https://api-inference.huggingface.co/v1/ \ + --api-key=$HF_TOKEN +``` + +### DeepSeek V3 + +```bash +python -m freeact.cli \ + --model-name=accounts/fireworks/models/deepseek-v3 \ + --ipybox-tag=ghcr.io/gradion-ai/ipybox:basic \ + --skill-modules=freeact_skills.search.google.stream.api \ + --base-url=https://api.fireworks.ai/inference/v1 \ + --api-key=$FIREWORKS_API_KEY +``` diff --git a/docs/tutorials/basics.md b/docs/tutorials/basics.md index f171c15..a7d561d 100644 --- a/docs/tutorials/basics.md +++ b/docs/tutorials/basics.md @@ -81,7 +81,7 @@ To use Gemini instead of Claude, run: --8<-- "freeact/examples/commands.txt:cli-basics-gemini" ``` -See also [this example](../cli.md#example-2) for running DeepSeek V3 or [this example](../integration.md#model-usage) for Qwen 2.5 Coder. +See also [Supported models](../models.md) for other CLI examples. ### Example conversation diff --git a/freeact/cli/__main__.py b/freeact/cli/__main__.py index 9511d2f..0640501 100644 --- a/freeact/cli/__main__.py +++ b/freeact/cli/__main__.py @@ -6,7 +6,15 @@ from dotenv import load_dotenv from rich.console import Console -from freeact import Claude, CodeActAgent, CodeActModel, DeepSeek, Gemini, QwenCoder, execution_environment +from freeact import ( + Claude, + CodeActAgent, + CodeActModel, + DeepSeek, + Gemini, + QwenCoder, + execution_environment, +) from freeact.cli.utils import read_file, stream_conversation app = typer.Typer() diff --git a/freeact/model/gemini/model/chat.py b/freeact/model/gemini/model/chat.py index 19fa8bf..3afacfa 100644 --- a/freeact/model/gemini/model/chat.py +++ b/freeact/model/gemini/model/chat.py @@ -4,20 +4,22 @@ from google import genai from google.genai.chats import AsyncChat -from google.genai.types import GenerateContentConfig +from google.genai.types import GenerateContentConfig, ThinkingConfig from freeact.model.base import CodeActModel, CodeActModelResponse, CodeActModelTurn, StreamRetry -from freeact.model.gemini.prompt import ( - EXECUTION_ERROR_TEMPLATE, - EXECUTION_OUTPUT_TEMPLATE, - SYSTEM_TEMPLATE, -) +from freeact.model.gemini.prompt import default, thinking -GeminiModelName = Literal["gemini-2.0-flash-exp",] +GeminiModelName = Literal[ + "gemini-2.0-flash-exp", + "gemini-2.0-flash-thinking-exp", + "gemini-2.0-flash-thinking-exp-01-21", +] @dataclass class GeminiResponse(CodeActModelResponse): + thoughts: str = "" + @property def tool_use_id(self) -> str | None: return None @@ -46,6 +48,7 @@ def __init__(self, chat: AsyncChat, message: str): self.chat = chat self.message = message + self._thoughts: str = "" self._response: str = "" self._stream_consumed = False @@ -54,7 +57,7 @@ async def response(self) -> GeminiResponse: async for _ in self.stream(): pass # TODO: include token usage data into response object - return GeminiResponse(text=self._response, is_error=False) + return GeminiResponse(text=self._response, thoughts=self._thoughts, is_error=False) async def stream(self, emit_retry: bool = False) -> AsyncIterator[str | StreamRetry]: async for chunk in self.chat.send_message_stream(self.message): @@ -66,6 +69,27 @@ async def stream(self, emit_retry: bool = False) -> AsyncIterator[str | StreamRe self._stream_consumed = True +class GeminiThinkingTurn(GeminiTurn): + async def stream(self, emit_retry: bool = False) -> AsyncIterator[str | StreamRetry]: + thinking = True + yield "\n" + + async for chunk in self.chat.send_message_stream(self.message): + for part in chunk.candidates[0].content.parts: + text = part.text + if part.thought: + self._thoughts += text + yield text + else: + if thinking: + thinking = False + yield "\n\n\n" + yield text + self._response += text + + self._stream_consumed = True + + class Gemini(CodeActModel): """A `CodeActModel` implementation based on Google's Gemini 2 chat API. @@ -93,7 +117,8 @@ def __init__( temperature=temperature, max_output_tokens=max_tokens, response_modalities=["TEXT"], - system_instruction=SYSTEM_TEMPLATE.format(python_modules=skill_sources or ""), + thinking_config=self.thinking_config, + system_instruction=self.system_template.format(python_modules=skill_sources or ""), ), ) @@ -103,5 +128,21 @@ def request(self, user_query: str, **kwargs) -> GeminiTurn: def feedback( self, feedback: str, is_error: bool, tool_use_id: str | None, tool_use_name: str | None, **kwargs ) -> GeminiTurn: - template = EXECUTION_OUTPUT_TEMPLATE if not is_error else EXECUTION_ERROR_TEMPLATE - return GeminiTurn(self._chat, template.format(execution_feedback=feedback)) + if self.thinking: + feedback_template = thinking.EXECUTION_ERROR_TEMPLATE if is_error else thinking.EXECUTION_OUTPUT_TEMPLATE + return GeminiThinkingTurn(self._chat, feedback_template.format(execution_feedback=feedback)) + else: + feedback_template = default.EXECUTION_ERROR_TEMPLATE if is_error else default.EXECUTION_OUTPUT_TEMPLATE + return GeminiTurn(self._chat, feedback_template.format(execution_feedback=feedback)) + + @property + def system_template(self) -> str: + return thinking.SYSTEM_TEMPLATE if self.thinking else default.SYSTEM_TEMPLATE + + @property + def thinking_config(self) -> ThinkingConfig | None: + return ThinkingConfig(include_thoughts=True) if self.thinking else None + + @property + def thinking(self) -> bool: + return "thinking" in self._model_name.lower() diff --git a/freeact/model/gemini/model/live.py b/freeact/model/gemini/model/live.py index 45e8854..a2f085a 100644 --- a/freeact/model/gemini/model/live.py +++ b/freeact/model/gemini/model/live.py @@ -6,7 +6,7 @@ from freeact.model.base import CodeActModel, CodeActModelTurn, StreamRetry from freeact.model.gemini.model.chat import GeminiModelName, GeminiResponse -from freeact.model.gemini.prompt import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE +from freeact.model.gemini.prompt.default import EXECUTION_ERROR_TEMPLATE, EXECUTION_OUTPUT_TEMPLATE, SYSTEM_TEMPLATE class GeminiLiveTurn(CodeActModelTurn): diff --git a/freeact/model/gemini/prompt/__init__.py b/freeact/model/gemini/prompt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/freeact/model/gemini/prompt.py b/freeact/model/gemini/prompt/default.py similarity index 100% rename from freeact/model/gemini/prompt.py rename to freeact/model/gemini/prompt/default.py diff --git a/freeact/model/gemini/prompt/thinking.py b/freeact/model/gemini/prompt/thinking.py new file mode 100644 index 0000000..7755a09 --- /dev/null +++ b/freeact/model/gemini/prompt/thinking.py @@ -0,0 +1,42 @@ +SYSTEM_TEMPLATE = """You are a ReAct agent that acts by writing tool_code in Python. +Return tool_code such that it can be executed in an IPython notebook cell. +I will execute the code for you and provide feedback. + +Avoid generating tool_code when thinking. Only generate tool_code in your response at each step. +Your final response must not contain tool_code but a direct answer to the user question. + +You can use any Python packages from pypi.org and install them with !pip install ... +You can use code enclosed in the following tags: + + +{python_modules} + + +Before using these , you must import them. + +Prefer using specialized REST APIs, that can be accessed with the requests package, over general internet search. Examples include: +- the open-meteo API for weather data +- the geocoding API of open-meteo for obtaining coordinates of a location +- ... + +Alternatively, install and use specialized Python packages instead of using general internet search. Examples include: +- the PyGithub package for information about code repositories +- the yfinance package for financial data +- ... +""" + + +EXECUTION_OUTPUT_TEMPLATE = """Here are the execution results of the code you generated: + + +{execution_feedback} + +""" + + +EXECUTION_ERROR_TEMPLATE = """The code you generated produced an error during execution: + + +{execution_feedback} + +""" diff --git a/mkdocs.yml b/mkdocs.yml index c8ded24..d79f39c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -87,9 +87,8 @@ nav: - Quickstart: quickstart.md - Installation: installation.md - Building blocks: blocks.md + - Command line: cli.md - Supported models: models.md - - Execution environment: environment.md - - CLI: cli.md - Tutorials: - Overview: tutorials/index.md - Basic usage: tutorials/basics.md @@ -97,6 +96,7 @@ nav: - System extensions: tutorials/extend.md - Advanced topics: - Model integration: integration.md + - Execution environment: environment.md - Streaming protocol: streaming.md - Evaluation results: evaluation.md - API Documentation: diff --git a/poetry.lock b/poetry.lock index 6dcef50..c49cfd0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -867,13 +867,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "google-genai" -version = "0.3.0" +version = "0.6.0" description = "GenAI Python SDK" optional = false python-versions = ">=3.9" files = [ - {file = "google_genai-0.3.0-py3-none-any.whl", hash = "sha256:e45eb1732cf5b1f7c5a4103fe64cc4b577981d7e592e3edfba6d34ec734f56be"}, - {file = "google_genai-0.3.0.tar.gz", hash = "sha256:1e080ca381268912dacc8e0cb15eb203eb4e38f51f6ae35cc5768e5af323c8cc"}, + {file = "google_genai-0.6.0-py3-none-any.whl", hash = "sha256:93a250998b03d7665b257d5e0d0c642194e94213327bab24edc74ece43e14c24"}, + {file = "google_genai-0.6.0.tar.gz", hash = "sha256:8fff8e0eab073fca4bdbd0608a1600ecaad73782b99ac2e353fc98960cdc3dc8"}, ] [package.dependencies] @@ -3267,4 +3267,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.0" python-versions = "^3.11,<3.14" -content-hash = "c79051e2648aa472eec2fb06afe528c1656abdd2e467fabc90a37edef7373394" +content-hash = "2ae1bc94c135952ad5979b1b23050396c1ffceaf5ba7ce0edd666b0341127f51" diff --git a/pyproject.toml b/pyproject.toml index 1a24573..7bb2250 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ packages = [ aioconsole = "^0.8.1" aiofiles = "^24.1" anthropic = "^0.43.0" -google-genai = "^0.3.0" +google-genai = "^0.6.0" ipybox = "^0.3.1" openai = "^1.59" prompt_toolkit = "^3.0"