diff --git a/README.md b/README.md
index 8bfaa9377..1e65a0b1c 100644
--- a/README.md
+++ b/README.md
@@ -42,6 +42,8 @@ Start building LLM-empowered multi-agent applications in an easier way.
## News
+- **[2025-01-04]** AgentScope supports Anthropic API now.
+
- **[2024-12-12]** We have updated the [roadmap of AgentScope](https://github.com/modelscope/agentscope/blob/main/docs/ROADMAP.md).
- **[2024-09-06]** AgentScope version 0.1.0 is released now.
@@ -95,6 +97,7 @@ services and third-party model APIs.
| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... |
| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... |
| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
+| Anthropic API | Chat | [`AnthropicChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/anthropic_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#anthropic-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/anthropic_chat_model_config_template.json) | claude-3-5-sonnet-20241022, ... |
**Supported Local Model Deployment**
diff --git a/README_JA.md b/README_JA.md
index 71f8500d9..8f5c26575 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -42,6 +42,8 @@ LLMを活用したマルチエージェントアプリケーションをより
## ニュース
+- **[2025-01-04]** AgentScopeが現在Anthropic APIをサポートしています。
+
- **[2024-12-12]** AgentScopeの[ロードマップ](https://github.com/modelscope/agentscope/blob/main/docs/ROADMAP.md)を更新しました。
- **[2024-09-06]** AgentScopeバージョン0.1.0がリリースされました。
@@ -90,6 +92,7 @@ AgentScopeは、ローカルモデルサービスとサードパーティのモ
| LiteLLM API | チャット | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [litellmがサポートするモデル](https://docs.litellm.ai/docs/)... |
| Yi API | チャット | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... |
| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [ガイダンス](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[テンプレート](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
+| Anthropic API | Chat | [`AnthropicChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/anthropic_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#anthropic-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/anthropic_chat_model_config_template.json) | claude-3-5-sonnet-20241022, ... |
**サポートされているローカルモデルのデプロイ**
diff --git a/README_ZH.md b/README_ZH.md
index e1cf48158..54e57196f 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -42,6 +42,8 @@
## 新闻
+- **[2025-01-04]** AgentScope 已支持 Anthropic API
+
- **[2024-12-12]** [AgentScope 开发路线图](https://github.com/modelscope/agentscope/blob/main/docs/ROADMAP.md) 已更新
- **[2024-09-06]** AgentScope v0.1.0 版本已上线
@@ -70,26 +72,26 @@ AgentScope是一个创新的多智能体开发平台,旨在赋予开发人员
AgentScope提供了一系列`ModelWrapper`来支持本地模型服务和第三方模型API。
-| API | Task | Model Wrapper | Configuration | Some Supported Models |
-|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------|
-| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... |
-| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... |
-| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 |
-| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... |
-| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 |
-| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... |
-| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat |
-| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... |
-| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... |
-| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... |
-| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... |
-| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... |
-| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... |
-| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... |
-| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... |
-| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... |
-| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
-
+| API | Task | Model Wrapper | Configuration | Some Supported Models |
+|------------------------|-----------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------|
+| OpenAI API | Chat | [`OpenAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_chat_template.json) | gpt-4o, gpt-4, gpt-3.5-turbo, ... |
+| | Embedding | [`OpenAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_embedding_template.json) | text-embedding-ada-002, ... |
+| | DALL·E | [`OpenAIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/openai_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#openai-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/openai_dall_e_template.json) | dall-e-2, dall-e-3 |
+| DashScope API | Chat | [`DashScopeChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_chat_template.json) | qwen-plus, qwen-max, ... |
+| | Image Synthesis | [`DashScopeImageSynthesisWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_image_synthesis_template.json) | wanx-v1 |
+| | Text Embedding | [`DashScopeTextEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_text_embedding_template.json) | text-embedding-v1, text-embedding-v2, ... |
+| | Multimodal | [`DashScopeMultiModalWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/dashscope_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#dashscope-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/dashscope_multimodal_template.json) | qwen-vl-max, qwen-vl-chat-v1, qwen-audio-chat |
+| Gemini API | Chat | [`GeminiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_chat_template.json) | gemini-pro, ... |
+| | Embedding | [`GeminiEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/gemini_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#gemini-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/gemini_embedding_template.json) | models/embedding-001, ... |
+| ZhipuAI API | Chat | [`ZhipuAIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_chat_template.json) | glm-4, ... |
+| | Embedding | [`ZhipuAIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/zhipu_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#zhipu-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/zhipu_embedding_template.json) | embedding-2, ... |
+| ollama | Chat | [`OllamaChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_chat_template.json) | llama3, llama2, Mistral, ... |
+| | Embedding | [`OllamaEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_embedding_template.json) | llama2, Mistral, ... |
+| | Generation | [`OllamaGenerationWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/ollama_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#ollama-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/ollama_generate_template.json) | llama2, Mistral, ... |
+| LiteLLM API | Chat | [`LiteLLMChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#litellm-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/litellm_chat_template.json) | [models supported by litellm](https://docs.litellm.ai/docs/)... |
+| Yi API | Chat | [`YiChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/yi_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/yi_chat_template.json) | yi-large, yi-medium, ... |
+| Post Request based API | - | [`PostAPIModelWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#post-request-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/postapi_model_config_template.json) | - |
+| Anthropic API | Chat | [`AnthropicChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/anthropic_model.py) | [guidance](https://modelscope.github.io/agentscope/en/tutorial/203-model.html#anthropic-api)
[template](https://github.com/modelscope/agentscope/blob/main/examples/model_configs_template/anthropic_chat_model_config_template.json) | claude-3-5-sonnet-20241022, ... |
**支持的本地模型部署**
AgentScope支持使用以下库快速部署本地模型服务。
diff --git a/docs/ROADMAP.md b/docs/ROADMAP.md
index 79e2f445f..bfe0716fc 100644
--- a/docs/ROADMAP.md
+++ b/docs/ROADMAP.md
@@ -23,6 +23,8 @@ Offering **agent-oriented programming (AOP)** as a new programming model to orga
2. Tools Calling
+ - ✅ Add Support for Anthropic API.
+
- 🚧 Support tools calling in user-assistant conversations.
- OpenAI API
- DashScope API
diff --git a/docs/sphinx_doc/en/source/tutorial/203-model.md b/docs/sphinx_doc/en/source/tutorial/203-model.md
index 1a69b0c49..06316b241 100644
--- a/docs/sphinx_doc/en/source/tutorial/203-model.md
+++ b/docs/sphinx_doc/en/source/tutorial/203-model.md
@@ -20,6 +20,7 @@ Currently, AgentScope supports the following model service APIs:
- Post Request API, model inference services based on Post
requests, including Huggingface/ModelScope Inference API and various
post request based model APIs.
+- Anthropic Chat API.
## Configuration
@@ -94,6 +95,7 @@ In the current AgentScope, the supported `model_type` types, the corresponding
| | Chat | [`PostAPIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api_chat"` | meta-llama/Meta-Llama-3-8B-Instruct, ... |
| | Image Synthesis | [`PostAPIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_dall_e` | - | |
| | Embedding | [`PostAPIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_embedding` | - |
+| Anthropic API | Chat | [`AnthropicChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/anthropic_model.py) | `"anthropic_chat"` | claude-3-5-sonnet-20241022, ... |
#### Detailed Parameters
@@ -538,6 +540,30 @@ com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py">agen
+
+
+#### Anthropic API
+
+
+
+Anthropic Chat API (agentscope.models.AnthropicChatWrapper
)
+
+
+```python
+{
+ "model_config": "my_anthropic_chat_config",
+ "model_type": "anthropic_chat",
+ "model_name": "claude-3-5-sonnet-20241022",
+
+ # Required parameters
+ "api_key": "{your_api_key}",
+
+ # Optional parameters
+ "temperature": 0.5
+}
+```
+
+
diff --git a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
index 43eecc7fe..8ce88f7e3 100644
--- a/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
+++ b/docs/sphinx_doc/zh_CN/source/tutorial/203-model.md
@@ -16,6 +16,7 @@ AgentScope中,模型的部署和调用是通过`ModelWrapper`来解耦开的
- LiteLLM API, 包括对话(Chat), 支持各种模型的API.
- Post请求API,基于Post请求实现的模型推理服务,包括Huggingface/ModelScope
Inference API和各种符合Post请求格式的API。
+- Anthropic 对话 API。
## 配置方式
@@ -114,6 +115,7 @@ API如下:
| | Chat | [`PostAPIChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `"post_api_chat"` | meta-llama/Meta-Llama-3-8B-Instruct, ... |
| | Image Synthesis | [`PostAPIDALLEWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_dall_e` | - | |
| | Embedding | [`PostAPIEmbeddingWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/post_model.py) | `post_api_embedding` | - |
+| Anthropic API | Chat | [`AnthropicChatWrapper`](https://github.com/modelscope/agentscope/blob/main/src/agentscope/models/anthropic_model.py) | `"anthropic_chat"` | claude-3-5-sonnet-20241022, ... |
#### 详细参数
@@ -560,8 +562,29 @@ com/modelscope/agentscope/blob/main/src/agentscope/models/litellm_model.py">agen
+
+
+#### Anthropic API
+
+
+
+Anthropic Chat API (agentscope.models.AnthropicChatWrapper
)
+
+
+```python
+{
+ "model_config": "my_anthropic_chat_config",
+ "model_type": "anthropic_chat",
+ "model_name": "claude-3-5-sonnet-20241022",
+ # 必要参数
+ "api_key": "{your_api_key}",
+ # 可选参数
+ "temperature": 0.5
+}
+```
+
diff --git a/examples/model_configs_template/anthropic_chat_model_config_template.json b/examples/model_configs_template/anthropic_chat_model_config_template.json
new file mode 100644
index 000000000..f697c0d83
--- /dev/null
+++ b/examples/model_configs_template/anthropic_chat_model_config_template.json
@@ -0,0 +1,9 @@
+[
+ {
+ "config_name": "anthropic_chat-claude-3-5-sonnet-20241022",
+ "model_type": "anthropic_chat",
+ "model_name": "claude-3-5-sonnet-20241022",
+
+ "api_key": "{your_api_key}"
+ }
+]
\ No newline at end of file
diff --git a/examples/model_configs_template/postapi_model_config_template.json b/examples/model_configs_template/postapi_model_config_template.json
index 3f4586621..a2bfbe0c1 100644
--- a/examples/model_configs_template/postapi_model_config_template.json
+++ b/examples/model_configs_template/postapi_model_config_template.json
@@ -1,6 +1,7 @@
[{
"config_name": "post_api_chat-flask_llama2-7b-chat-hf",
"model_type": "post_api_chat",
+ "model_name": "llama2",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
@@ -10,6 +11,7 @@
{
"config_name": "post_api_chat-flask_llama2-7b-chat-ms",
"model_type": "post_api_chat",
+ "model_name": "llama2-7b",
"api_url": "http://127.0.0.1:8000/llm/",
"json_args": {
"max_length": 4096,
@@ -19,6 +21,7 @@
{
"config_name": "post_api-fastchat_llama2-7b-chat-hf",
"model_type": "openai_chat",
+ "model_name": "llama2-7b",
"api_key": "EMPTY",
"client_args": {
"base_url": "http://127.0.0.1:8000/v1/"
@@ -30,6 +33,7 @@
{
"config_name": "post_api-vllm_llama2-7b-chat-hf",
"model_type": "openai_chat",
+ "model_name": "llama2-7b",
"api_key": "EMPTY",
"client_args": {
"base_url": "http://127.0.0.1:8000/v1/"
@@ -41,6 +45,7 @@
{
"config_name": "post_api_chat-model-inference-api-gpt2",
"model_type": "post_api_chat",
+ "model_name": "gpt2",
"headers": {
"Authorization": "Bearer {YOUR_API_TOKEN}"
},
@@ -49,6 +54,7 @@
{
"config_name": "post_api_img_syn_config",
"model_type": "post_api_dall_e",
+ "model_name": "{YOUR_MODEL}",
"api_url": "http://xxx.xxx.xxx.xxx:xxxx/xxx",
"headers": {
"Content-Type": "application/json",
@@ -67,6 +73,7 @@
{
"config_name": "post_api_embedding_config",
"model_type": "post_api_embedding",
+ "model_name": "{YOUR_MODEL}",
"api_url": "http://xxx.xxx.xxx.xxx:xxxx/xxx",
"headers": {
"Content-Type": "application/json",
diff --git a/setup.py b/setup.py
index ecf690b6e..3b090c2f8 100644
--- a/setup.py
+++ b/setup.py
@@ -93,6 +93,7 @@
extra_litellm_requires = ["litellm"]
extra_zhipuai_requires = ["zhipuai"]
extra_ollama_requires = ["ollama>=0.1.7"]
+extra_anthropic_requires = ["anthropic"]
# Full requires
extra_full_requires = (
@@ -105,6 +106,7 @@
+ extra_litellm_requires
+ extra_zhipuai_requires
+ extra_ollama_requires
+ + extra_anthropic_requires
)
# For online workstation
@@ -143,6 +145,7 @@
"litellm": extra_litellm_requires,
"zhipuai": extra_zhipuai_requires,
"gemini": extra_gemini_requires,
+ "anthropic": extra_anthropic_requires,
# For service functions
"service": extra_service_requires,
# For distribution mode
diff --git a/src/agentscope/models/__init__.py b/src/agentscope/models/__init__.py
index 60de46f44..9aad55d9a 100644
--- a/src/agentscope/models/__init__.py
+++ b/src/agentscope/models/__init__.py
@@ -38,6 +38,7 @@
from .yi_model import (
YiChatWrapper,
)
+from .anthropic_model import AnthropicChatWrapper
_BUILD_IN_MODEL_WRAPPERS = [
"PostAPIChatWrapper",
@@ -57,6 +58,7 @@
"ZhipuAIEmbeddingWrapper",
"LiteLLMChatWrapper",
"YiChatWrapper",
+ "AnthropicChatWrapper",
]
__all__ = [
@@ -81,4 +83,5 @@
"ZhipuAIEmbeddingWrapper",
"LiteLLMChatWrapper",
"YiChatWrapper",
+ "AnthropicChatWrapper",
]
diff --git a/src/agentscope/models/anthropic_model.py b/src/agentscope/models/anthropic_model.py
new file mode 100644
index 000000000..5c042be29
--- /dev/null
+++ b/src/agentscope/models/anthropic_model.py
@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+"""The Anthropic model wrapper for AgentScope."""
+from typing import Optional, Union, Generator, Any, Sequence
+
+from ..manager import FileManager
+from ..message import Msg
+from .model import ModelWrapperBase, ModelResponse
+from ..utils.common import (
+ _guess_type_by_extension,
+ _is_web_url,
+ _get_base64_from_image_path,
+)
+
+
+class AnthropicChatWrapper(ModelWrapperBase):
+ """The Anthropic model wrapper for AgentScope."""
+
+ model_type: str = "anthropic_chat"
+
+ _supported_image_format: list[str] = ["jpeg", "png", "gif", "webp"]
+
+ def __init__(
+ self,
+ model_name: str,
+ config_name: Optional[str] = None,
+ api_key: Optional[str] = None,
+ stream: bool = False,
+ client_kwargs: Optional[dict] = None,
+ ) -> None:
+ """Initialize the Anthropic model wrapper.
+
+ Args:
+ model_name (`str`):
+ The name of the used model, e.g. `claude-3-5-sonnet-20241022`.
+ config_name (`Optional[str]`, defaults to `None`):
+ The name of the model configuration.
+ api_key (`Optional[str]`, defaults to `None`):
+ The API key for the Anthropic API.
+ stream (`bool`, defaults to `False`):
+ Enable streaming mode or not.
+ client_kwargs (`Optional[dict]`, defaults to `None`):
+ The additional keyword arguments for the anthropic client.
+ """
+ super().__init__(config_name, model_name)
+
+ try:
+ import anthropic
+ except ImportError as e:
+ raise ImportError(
+ "Please install the `anthropic` package by running "
+ "`pip install anthropic`.",
+ ) from e
+
+ client_kwargs = client_kwargs or {}
+
+ self.client = anthropic.Anthropic(
+ api_key=api_key,
+ **client_kwargs,
+ )
+ self.stream = stream
+
+ def format(
+ self,
+ *args: Union[Msg, Sequence[Msg]],
+ ) -> list[dict[str, object]]:
+ """Format the messages for anthropic model input.
+
+ TODO: Add support for multimodal input.
+
+ Args:
+ *args (`Union[Msg, list[Msg]]`):
+ The message(s) to be formatted.
+
+ Returns:
+ `list[dict[str, object]]`:
+ A list of formatted messages.
+ """
+ return ModelWrapperBase.format_for_common_chat_models(*args)
+
+ @staticmethod
+ def _format_msg_with_url(
+ msg: Msg,
+ ) -> dict[str, Union[str, list[dict]]]:
+ """Format a message with image urls into the format that anthropic
+ LLM requires.
+
+ Refer to https://docs.anthropic.com/en/api/messages-examples
+
+ Args:
+ msg (`Msg`):
+ The message object to be formatted
+
+ Returns:
+ `dict[str, Union[str, list[dict]]]`:
+ The message in the required format.
+ """
+ urls = [msg.url] if isinstance(msg.url, str) else msg.url
+
+ image_urls = []
+ for url in urls:
+ if _guess_type_by_extension(url) == "image":
+ image_urls.append(url)
+
+ content = []
+ for image_url in image_urls:
+ extension = image_url.split(".")[-1].lower()
+ extension = "jpeg" if extension == "jpg" else extension
+ if extension not in AnthropicChatWrapper._supported_image_format:
+ raise TypeError(
+ "Anthropic model only supports image formats "
+ f"{AnthropicChatWrapper._supported_image_format}, "
+ f"got {extension}",
+ )
+
+ if _is_web_url(image_url):
+ # Download the image locally
+ file_manager = FileManager.get_instance()
+ image_url = file_manager.save_image(image_url)
+
+ data_base64 = _get_base64_from_image_path(image_url)
+
+ content.append(
+ {
+ "type": "image",
+ "source": [
+ {
+ "type": "base64",
+ "media_type": f"image/{extension}",
+ "data": data_base64,
+ }
+ for _ in image_urls
+ ],
+ },
+ )
+
+ if msg.content is not None:
+ content.append(
+ {
+ "type": "text",
+ "text": msg.content,
+ },
+ )
+ return {
+ "role": msg.role,
+ "content": content,
+ }
+
+ def __call__( # pylint: disable=too-many-branches
+ self,
+ messages: list[dict[str, Union[str, list[dict]]]],
+ stream: Optional[bool] = None,
+ max_tokens: int = 2048,
+ **kwargs: Any,
+ ) -> ModelResponse:
+ """Call the Anthropic model.
+
+ .. note:: The official Anthropic API supports system prompt by a
+ separate argument "system". For the convenience of the users, we
+ allow the system prompt to be the first message in the input messages.
+
+ Args:
+ messages (`list[dict[str, Union[str, list[dict]]]]`):
+ A list of message dictionaries. Each dictionary should have
+ 'role' and 'content' keys.
+ stream (`Optional[bool]`, defaults to `None`):
+ Enable streaming mode or not.
+ max_tokens (`int`, defaults to `2048`):
+ The max tokens in generation.
+ **kwargs (`Any`):
+ The additional keyword arguments for the model.
+
+ Returns:
+ `ModelResponse`:
+ The model response.
+ """
+ # Check the input messages
+ if isinstance(messages, list):
+ if len(messages) == 0:
+ raise ValueError("The input messages should not be empty.")
+
+ for msg in messages:
+ if not isinstance(msg, dict):
+ raise ValueError(
+ "The input messages should be a list of dictionaries, "
+ f"got {type(msg)}",
+ )
+ if "role" not in msg or "content" not in msg:
+ raise ValueError(
+ f"Each message should have 'role' and 'content' keys, "
+ f"got {msg}",
+ )
+ if msg["role"] not in ["assistant", "user", "system"]:
+ raise ValueError(
+ f"Invalid role {msg['role']}. The role must be one of "
+ f"['assistant', 'user', 'system']",
+ )
+
+ else:
+ raise ValueError(
+ "The input messages should be a list of dictionaries, "
+ f"got {type(messages)}",
+ )
+
+ # Check the stream
+ if stream is None:
+ stream = stream or self.stream
+
+ # Prepare the keyword arguments
+ kwargs.update(
+ {
+ "model": self.model_name,
+ "stream": stream,
+ "max_tokens": max_tokens,
+ },
+ )
+
+ # Extract the system message
+ if messages[0]["role"] == "system":
+ if not isinstance(messages[0]["content"], str):
+ raise ValueError(
+ "The content of the system message should be a string, "
+ f"got {type(messages[0]['content'])}",
+ )
+
+ kwargs["system"] = messages[0]["content"]
+ messages = messages[1:]
+
+ kwargs["messages"] = messages
+
+ # Call the model
+ response = self.client.messages.create(**kwargs)
+
+ # Get the response according to the stream
+ if stream:
+
+ def generator() -> Generator[str, None, None]:
+ # Used in model invocation recording
+ gathered_response = {}
+
+ text = ""
+ current_block = {}
+ for chunk in response:
+ chunk = chunk.model_dump()
+ chunk_type = chunk.get("type", None)
+
+ if chunk_type == "message_start":
+ gathered_response.update(**chunk["message"])
+
+ if chunk_type == "message_delta":
+ for key, cost in chunk.get("usage", {}).items():
+ gathered_response["usage"][key] = (
+ gathered_response["usage"].get(key, 0) + cost
+ )
+
+ if chunk_type == "content_block_start":
+ # Refresh the current block
+ current_block = chunk["content_block"]
+
+ if chunk_type == "content_block_delta":
+ delta = chunk.get("delta", {})
+ if delta.get("type", None) == "text_delta":
+ # To recover the complete response with multiple
+ # blocks in its content field
+ current_block["text"] = current_block.get(
+ "text",
+ "",
+ ) + delta.get("text", "")
+ # Used for feedback
+ text += delta.get("text", "")
+ yield text
+
+ # TODO: Support tool calls in streaming mode
+
+ if chunk_type == "content_block_stop":
+ gathered_response["content"].append(current_block)
+
+ self._save_model_invocation_and_update_monitor(
+ kwargs,
+ gathered_response,
+ )
+
+ return ModelResponse(
+ stream=generator(),
+ )
+
+ else:
+ response = response.model_dump()
+
+ # Save the model invocation and update the monitor
+ self._save_model_invocation_and_update_monitor(
+ kwargs,
+ response,
+ )
+
+ texts = []
+ # Gather text from content blocks
+ for block in response.get("content", []):
+ if (
+ isinstance(block, dict)
+ and block.get("type", None) == "text"
+ ):
+ texts.append(block.get("text", ""))
+
+ # Return the response
+ return ModelResponse(
+ text="\n".join(texts),
+ raw=response,
+ )
+
+ def _save_model_invocation_and_update_monitor(
+ self,
+ kwargs: dict,
+ response: dict,
+ ) -> None:
+ self._save_model_invocation(
+ arguments=kwargs,
+ response=response,
+ )
+
+ usage = response.get("usage", None)
+ if usage is not None:
+ self.monitor.update_text_and_embedding_tokens(
+ model_name=self.model_name,
+ prompt_tokens=usage.get("input_tokens", 0),
+ completion_tokens=usage.get("output_tokens", 0),
+ )
diff --git a/src/agentscope/models/model.py b/src/agentscope/models/model.py
index 0f2cca403..afc484269 100644
--- a/src/agentscope/models/model.py
+++ b/src/agentscope/models/model.py
@@ -5,7 +5,7 @@
import inspect
import time
from functools import wraps
-from typing import Sequence, Any, Callable, Union, List
+from typing import Sequence, Any, Callable, Union, List, Optional
from loguru import logger
@@ -102,8 +102,8 @@ class in model configuration."""
def __init__(
self, # pylint: disable=W0613
- config_name: str,
- model_name: str,
+ config_name: Optional[str] = None,
+ model_name: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Base class for model wrapper.
@@ -112,17 +112,25 @@ def __init__(
`__call__` function.
Args:
- config_name (`str`):
+ config_name (`Optional[str]`, defaults to `None`):
The id of the model, which is used to extract configuration
from the config file.
- model_name (`str`):
+ model_name (`Optional[str]`, defaults to `None`):
The name of the model.
"""
self.monitor = MonitorManager.get_instance()
self.config_name = config_name
+
+ if model_name is None:
+ raise ValueError(
+ "Model name should be provided for model "
+ f"configuration [{config_name}].",
+ )
+
self.model_name = model_name
- logger.info(f"Initialize model by configuration [{config_name}]")
+
+ logger.debug(f"Initialize model by configuration [{config_name}]")
def __call__(self, *args: Any, **kwargs: Any) -> ModelResponse:
"""Processing input with the model."""
diff --git a/src/agentscope/models/post_model.py b/src/agentscope/models/post_model.py
index 48797627c..63f8e11f9 100644
--- a/src/agentscope/models/post_model.py
+++ b/src/agentscope/models/post_model.py
@@ -3,7 +3,7 @@
import json
import time
from abc import ABC
-from typing import Any, Union, Sequence, List
+from typing import Any, Union, Sequence, List, Optional
import requests
from loguru import logger
@@ -25,6 +25,7 @@ def __init__(
self,
config_name: str,
api_url: str,
+ model_name: Optional[str] = None,
headers: dict = None,
max_length: int = 2048,
timeout: int = 30,
@@ -42,6 +43,9 @@ def __init__(
The id of the model.
api_url (`str`):
The url of the post request api.
+ model_name (`str`):
+ The name of the model. If `None`, the model name will be
+ extracted from the `json_args`.
headers (`dict`, defaults to `None`):
The headers of the api. Defaults to None.
max_length (`int`, defaults to `2048`):
@@ -76,13 +80,14 @@ def __init__(
**post_args
)
"""
- if json_args is not None:
- model_name = json_args.get(
- "model",
- json_args.get("model_name", None),
- )
- else:
- model_name = None
+ if model_name is None:
+ if json_args is not None:
+ model_name = json_args.get(
+ "model",
+ json_args.get("model_name", None),
+ )
+ else:
+ model_name = None
super().__init__(config_name=config_name, model_name=model_name)
diff --git a/src/agentscope/utils/common.py b/src/agentscope/utils/common.py
index bfdadb4e8..9ff974de1 100644
--- a/src/agentscope/utils/common.py
+++ b/src/agentscope/utils/common.py
@@ -390,6 +390,22 @@ def _is_web_url(url: str) -> bool:
return parsed_url.scheme in ["http", "https", "ftp", "oss"]
+def _get_base64_from_image_path(image_path: str) -> str:
+ """Get the base64 string from the image url.
+
+ Args:
+ image_path (`str`):
+ The local path of the image.
+ """
+ with open(image_path, "rb") as image_file:
+ base64_image = base64.b64encode(image_file.read()).decode(
+ "utf-8",
+ )
+ extension = image_path.lower().split(".")[-1]
+ mime_type = f"image/{extension}"
+ return f"data:{mime_type};base64,{base64_image}"
+
+
def _is_json_serializable(obj: Any) -> bool:
"""Check if the given object is json serializable."""
try:
diff --git a/tests/minimal.py b/tests/minimal.py
index affad7cb7..cfdcb9981 100644
--- a/tests/minimal.py
+++ b/tests/minimal.py
@@ -26,6 +26,7 @@
{
"model_type": "post_api_chat",
"config_name": "my_post_api",
+ "model_name": "llama",
"api_url": "https://xxx",
"headers": {},
"json_args": {},
diff --git a/tests/model_anthropic_test.py b/tests/model_anthropic_test.py
new file mode 100644
index 000000000..9c9ec44b8
--- /dev/null
+++ b/tests/model_anthropic_test.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+"""Unittests for anthropic model."""
+import unittest
+from unittest.mock import patch, MagicMock
+
+import agentscope
+from agentscope.manager import ModelManager
+from agentscope.models import ModelResponse
+
+
+class AnthropicModelWrapperTest(unittest.TestCase):
+ """Anthropic model wrapper unittests."""
+
+ def setUp(self) -> None:
+ """Init for ExampleTest."""
+ agentscope.init(
+ model_configs={
+ "config_name": "claude-3-5",
+ "model_type": "anthropic_chat",
+ "model_name": "claude-3-5-sonnet-20241022",
+ "api_key": "xxx",
+ "stream": False,
+ },
+ save_api_invoke=False,
+ )
+ self.mock_response = {
+ "id": "msg_018zRB2VgEx2hS5TGxMhLz6Y",
+ "content": [
+ {
+ "text": "你好!我是 Bob。今天天气不错,你觉得呢?",
+ "type": "text",
+ },
+ ],
+ "model": "claude-3-5-sonnet-20241022",
+ "role": "assistant",
+ "stop_reason": "end_turn",
+ "stop_sequence": None,
+ "type": "message",
+ "usage": {
+ "cache_creation_input_tokens": 0,
+ "cache_read_input_tokens": 0,
+ "input_tokens": 21,
+ "output_tokens": 26,
+ },
+ }
+
+ self.model_response_gt = ModelResponse(
+ text="你好!我是 Bob。今天天气不错,你觉得呢?",
+ raw=self.mock_response,
+ )
+
+ @patch("anthropic.Anthropic")
+ def test_model_calling(self, mock_anthropic: MagicMock) -> None:
+ """Test if the model is called successfully."""
+ mock_response = MagicMock()
+ mock_response.model_dump.return_value = {
+ "id": "msg_018zRB2VgEx2hS5TGxMhLz6Y",
+ "content": [
+ {
+ "text": "你好!我是 Bob。今天天气不错,你觉得呢?",
+ "type": "text",
+ },
+ ],
+ "model": "claude-3-5-sonnet-20241022",
+ "role": "assistant",
+ "stop_reason": "end_turn",
+ "stop_sequence": None,
+ "type": "message",
+ "usage": {
+ "cache_creation_input_tokens": 0,
+ "cache_read_input_tokens": 0,
+ "input_tokens": 21,
+ "output_tokens": 26,
+ },
+ }
+
+ mock_client = mock_anthropic.return_value
+
+ mock_client.messages.create.return_value = mock_response
+
+ model = ModelManager.get_instance().get_model_by_config_name(
+ "claude-3-5",
+ )
+ response = model([{"role": "user", "content": "你好"}])
+
+ self.assertEqual(response.raw, self.model_response_gt.raw)
+ self.assertEqual(response.text, self.model_response_gt.text)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/model_test.py b/tests/model_test.py
index 2ec1d8e14..dafc02c79 100644
--- a/tests/model_test.py
+++ b/tests/model_test.py
@@ -27,6 +27,7 @@
OpenAIDALLEWrapper,
OpenAIChatWrapper,
PostAPIChatWrapper,
+ AnthropicChatWrapper,
)
@@ -42,6 +43,8 @@ def format(
self,
*args: Union[Msg, Sequence[Msg]],
) -> Union[List[dict], str]:
+ """Format the input for the model"""
+ print(*args)
return ""
@@ -75,6 +78,7 @@ def test_build_in_model_wrapper_classes(self) -> None:
"zhipuai_embedding": ZhipuAIEmbeddingWrapper,
"litellm_chat": LiteLLMChatWrapper,
"yi_chat": YiChatWrapper,
+ "anthropic_chat": AnthropicChatWrapper,
},
)
@@ -93,6 +97,7 @@ def test_load_model_configs(self, mock_logging: MagicMock) -> None:
{
"model_type": "post_api_chat",
"config_name": "my_post_api",
+ "model_name": "llama",
"api_url": "https://xxx",
"headers": {},
"json_args": {},