From e2985d70a4b57ffc4c87a6a89d235092d5168100 Mon Sep 17 00:00:00 2001
From: zeeland <zeeland4work@gmail.com>
Date: Wed, 17 Apr 2024 19:41:24 +0800
Subject: [PATCH 1/4] docs: update README

fix: lf to ctrl
---
 Makefile                                | 212 +++----
 README_zh.md                            |   2 +-
 docs/README.md                          |   2 +-
 docs/index.html                         | 220 +++----
 promptulate/__init__.py                 | 154 ++---
 promptulate/agents/tool_agent/agent.py  | 434 ++++++-------
 promptulate/agents/tool_agent/prompt.py | 198 +++---
 promptulate/chat.py                     | 548 ++++++++---------
 promptulate/schema.py                   | 604 +++++++++----------
 promptulate/tools/base.py               | 770 ++++++++++++------------
 promptulate/tools/file/toolkit.py       | 102 ++--
 promptulate/tools/manager.py            | 220 +++----
 pyproject.toml                          | 242 ++++----
 13 files changed, 1854 insertions(+), 1854 deletions(-)

diff --git a/Makefile b/Makefile
index 0d76b4a7..09c6b3f1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,106 +1,106 @@
-SHELL := /usr/bin/env bash
-OS := $(shell python -c "import sys; print(sys.platform)")
-
-# all test files define here
-DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py
-DEV_TEST_HOOK_FILES := ./tests/hook/test_llm.py ./tests/hook/test_tool_hook.py
-
-DEV_TEST_LLM_FILES := ./tests/llms/test_openai.py ./tests/llms/test_factory.py
-DEV_TEST_AGENT_FILES := ./tests/agents/test_tool_agent.py ./tests/agents/test_assistant_agent.py
-DEV_TEST_FILES := $(DEV_TEST_TOOL_FILES) $(DEV_TEST_HOOK_FILES) $(DEV_TEST_LLM_FILES) $(DEV_TEST_AGENT_FILES) ./tests/test_chat.py ./tests/output_formatter ./tests/test_import.py ./tests/utils/test_string_template.py
-
-
-ifeq ($(OS),win32)
-	PYTHONPATH := $(shell python -c "import os; print(os.getcwd())")
-    TEST_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
-	TEST_PROD_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
-else
-	PYTHONPATH := `pwd`
-    TEST_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
-	TEST_PROD_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
-endif
-
-.PHONY: lock
-lock:
-	poetry lock -n && poetry export --without-hashes > requirements.txt
-
-.PHONY: install
-install:
-	poetry install --with dev
-
-.PHONY: install-integration
-install-integration:
-	poetry install --with dev,test_integration
-
-.PHONY: install-docs
-install-docs:
-	npm i docsify-cli -g
-
-.PHONY: pre-commit-install
-pre-commit-install:
-	poetry run pre-commit install
-
-.PHONY: polish-codestyle
-polish-codestyle:
-	poetry run ruff format --config pyproject.toml promptulate tests example
-	poetry run ruff check --fix --config pyproject.toml promptulate tests example
-
-.PHONY: formatting
-formatting: polish-codestyle
-
-.PHONY: test
-test:
-	$(TEST_COMMAND)
-
-.PHONY: test-prod
-test-prod:
-	$(TEST_PROD_COMMAND)
-	poetry run coverage-badge -o docs/images/coverage.svg -f
-
-.PHONY: check-codestyle
-check-codestyle:
-	poetry run ruff format --check --config pyproject.toml promptulate tests example
-	poetry run ruff check --config pyproject.toml promptulate tests example
-
-.PHONY: lint
-lint: check-codestyle test
-
-# https://github.com/Maxlinn/linn-jupyter-site-template/blob/main/.github/workflows/linn-jupyter-site-template-deploy.yml
-# Any notebook will be converted here.
-# If there are any notebook will be changed, then the notebook will be converted to markdown and pushed to the repo.
-.PHONY: build-docs
-build-docs:
-	jupyter nbconvert ./example/chat_usage.ipynb --to markdown --output-dir ./docs/use_cases/
-	jupyter nbconvert ./example/tools/custom_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
-	jupyter nbconvert ./example/llm/custom_llm.ipynb --to markdown --output-dir ./docs/modules/llm
-	jupyter nbconvert ./example/tools/langchain_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
-		jupyter nbconvert ./example/agent/assistant_agent_usage.ipynb --to markdown --output-dir ./docs/modules/agents
-
-
-.PHONY: start-docs
-start-docs:
-	docsify serve docs
-
-#* Cleaning
-.PHONY: pycache-remove
-pycache-remove:
-	find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf
-
-.PHONY: dsstore-remove
-dsstore-remove:
-	find . | grep -E ".DS_Store" | xargs rm -rf
-
-.PHONY: ipynbcheckpoints-remove
-ipynbcheckpoints-remove:
-	find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
-
-.PHONY: pytestcache-remove
-pytestcache-remove:
-	find . | grep -E ".pytest_cache" | xargs rm -rf
-
-.PHONY: build-remove
-build-remove:
-	rm -rf build/
-
-.PHONY: cleanup
-cleanup: pycache-remove dsstore-remove ipynbcheckpoints-remove pytestcache-remove
+SHELL := /usr/bin/env bash
+OS := $(shell python -c "import sys; print(sys.platform)")
+
+# all test files define here
+DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py
+DEV_TEST_HOOK_FILES := ./tests/hook/test_llm.py ./tests/hook/test_tool_hook.py
+
+DEV_TEST_LLM_FILES := ./tests/llms/test_openai.py ./tests/llms/test_factory.py
+DEV_TEST_AGENT_FILES := ./tests/agents/test_tool_agent.py ./tests/agents/test_assistant_agent.py
+DEV_TEST_FILES := $(DEV_TEST_TOOL_FILES) $(DEV_TEST_HOOK_FILES) $(DEV_TEST_LLM_FILES) $(DEV_TEST_AGENT_FILES) ./tests/test_chat.py ./tests/output_formatter ./tests/test_import.py ./tests/utils/test_string_template.py
+
+
+ifeq ($(OS),win32)
+	PYTHONPATH := $(shell python -c "import os; print(os.getcwd())")
+    TEST_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
+	TEST_PROD_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
+else
+	PYTHONPATH := `pwd`
+    TEST_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
+	TEST_PROD_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
+endif
+
+.PHONY: lock
+lock:
+	poetry lock -n && poetry export --without-hashes > requirements.txt
+
+.PHONY: install
+install:
+	poetry install --with dev
+
+.PHONY: install-integration
+install-integration:
+	poetry install --with dev,test_integration
+
+.PHONY: install-docs
+install-docs:
+	npm i docsify-cli -g
+
+.PHONY: pre-commit-install
+pre-commit-install:
+	poetry run pre-commit install
+
+.PHONY: polish-codestyle
+polish-codestyle:
+	poetry run ruff format --config pyproject.toml promptulate tests example
+	poetry run ruff check --fix --config pyproject.toml promptulate tests example
+
+.PHONY: formatting
+formatting: polish-codestyle
+
+.PHONY: test
+test:
+	$(TEST_COMMAND)
+
+.PHONY: test-prod
+test-prod:
+	$(TEST_PROD_COMMAND)
+	poetry run coverage-badge -o docs/images/coverage.svg -f
+
+.PHONY: check-codestyle
+check-codestyle:
+	poetry run ruff format --check --config pyproject.toml promptulate tests example
+	poetry run ruff check --config pyproject.toml promptulate tests example
+
+.PHONY: lint
+lint: check-codestyle test
+
+# https://github.com/Maxlinn/linn-jupyter-site-template/blob/main/.github/workflows/linn-jupyter-site-template-deploy.yml
+# Any notebook will be converted here.
+# If there are any notebook will be changed, then the notebook will be converted to markdown and pushed to the repo.
+.PHONY: build-docs
+build-docs:
+	jupyter nbconvert ./example/chat_usage.ipynb --to markdown --output-dir ./docs/use_cases/
+	jupyter nbconvert ./example/tools/custom_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
+	jupyter nbconvert ./example/llm/custom_llm.ipynb --to markdown --output-dir ./docs/modules/llm
+	jupyter nbconvert ./example/tools/langchain_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
+		jupyter nbconvert ./example/agent/assistant_agent_usage.ipynb --to markdown --output-dir ./docs/modules/agents
+
+
+.PHONY: start-docs
+start-docs:
+	docsify serve docs
+
+#* Cleaning
+.PHONY: pycache-remove
+pycache-remove:
+	find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf
+
+.PHONY: dsstore-remove
+dsstore-remove:
+	find . | grep -E ".DS_Store" | xargs rm -rf
+
+.PHONY: ipynbcheckpoints-remove
+ipynbcheckpoints-remove:
+	find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
+
+.PHONY: pytestcache-remove
+pytestcache-remove:
+	find . | grep -E ".pytest_cache" | xargs rm -rf
+
+.PHONY: build-remove
+build-remove:
+	rm -rf build/
+
+.PHONY: cleanup
+cleanup: pycache-remove dsstore-remove ipynbcheckpoints-remove pytestcache-remove
diff --git a/README_zh.md b/README_zh.md
index b0431b11..836e9945 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -216,7 +216,7 @@ pne 框架的设计原则包括:模块化、可扩展性、互操作性、鲁
 欢迎加入群聊一起交流讨论 LLM & AI Agent 相关的话题,群里会不定期进行技术分享,链接过期了可以 issue 或 email 提醒一下作者。
 
 <div style="width: 250px;margin: 0 auto;">
-    <img src="https://zeeland-bucket.oss-cn-beijing.aliyuncs.com/images/20240331035352.png"/>
+    <img src="https://zeeland-bucket.oss-cn-beijing.aliyuncs.com/pne_group.png"/>
 </div>
 
 For more information please contact: [zeeland4work@gmail.com](zeeland4work@gmail.com)
diff --git a/docs/README.md b/docs/README.md
index 36e6ff03..66849e81 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -195,7 +195,7 @@ Following these principles and applying the latest artificial intelligence techn
 Feel free to join the group chat to discuss topics related to LLM & AI Agents. There will be occasional technical shares in the group. If the link expires, please remind the author via issue or email.
 
 <div style="width: 250px;margin: 0 auto;">
-    <img src="https://zeeland-bucket.oss-cn-beijing.aliyuncs.com/images/20240331035352.png"/>
+    <img src="https://zeeland-bucket.oss-cn-beijing.aliyuncs.com/pne_group.png"/>
 </div>
 
 For more information, please contact: [zeeland4work@gmail.com](mailto:zeeland4work@gmail.com)
diff --git a/docs/index.html b/docs/index.html
index 6a87c4d9..1bcd2bcf 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -1,110 +1,110 @@
-<!DOCTYPE html>
-<html lang="zh">
-<head>
-    <meta charset="UTF-8">
-    <title>Document</title>
-    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
-    <meta name="description" content="Description">
-    <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
-    <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify@4/lib/themes/vue.css">
-    <script src="//cdn.jsdelivr.net/npm/docsify-edit-on-github"></script>
-    <style>
-    </style>
-</head>
-<body>
-<nav>
-    <a href="#/">文档 v1.11.2</a>
-    <a href="#/">示例</a>
-</nav>
-<div id="app"></div>
-<script>
-    window.$docsify = {
-        name: 'Promptulate',
-        repo: 'https://github.com/Undertone0809/promptulate',
-        // logo: '/images/logo.svg',
-        loadNavbar: true,
-        loadSidebar: true,
-        subMaxLevel: 3,
-        coverpage: true,
-        search: {
-            paths: 'auto',
-            placeholder: 'Search',
-            noData: 'Not Found',
-            depth: 3,
-        },
-        count: {
-            countable: true,
-            fontsize: '0.9em',
-            color: 'rgb(90,90,90)',
-            language: 'english'
-        },
-        plugins: [
-            EditOnGithubPlugin.create(
-                'https://github.com/Undertone0809/promptulate/tree/main/docs/',
-                null,
-                function (file) {
-                    if (file.indexOf('en') === -1) {
-                        return '编辑文档'
-                    } else {
-                        return 'edit on github'
-                    }
-                }
-            ),
-            function (hook) {
-                var footer = [
-                    '<hr/>',
-                    '<footer>',
-                    '<span><a href="https://github.com/Undertone0809/">Promptulate/Zeeland</a> &copy;2023.</span>',
-                    '<span>Published with <a href="https://github.com/QingWei-Li/docsify" target="_blank">docsify</a>.</span>',
-                    '</footer>'
-                ].join('')
-
-                hook.afterEach(function (html) {
-                    return html + footer
-                })
-            },
-        ]
-
-    }
-</script>
-<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.css">
-
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/gitalk.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.min.js"></script>
-<script>
-    var gitalk = new Gitalk({
-        clientID: '0b1cb7e36ac5fa3233ba',
-        clientSecret: '4148c415d7ed173e3dcffc861b9ac3eea79fbdbb',
-        repo: 'promptulate',
-        owner: 'Undertone0809',
-        admin: ['Undertone0809'],
-        title: `Document comment ${location.hash.match(/#(.*?)([?]|$)/)[1]}`,
-        id: location.hash.match(/#(.*?)([?]|$)/)[1],
-    })
-    // Listen for changes in hash in the URL. If an MD file is found to have changed,
-    // refresh the page to solve the problem of using a single digital comment issue for the entire website.
-    window.onhashchange = function (event) {
-        if (event.newURL.split('?')[0] !== event.oldURL.split('?')[0]) {
-            location.reload()
-        }
-    }
-</script>
-<!-- Docsify v4 -->
-<script src="//cdn.jsdelivr.net/npm/docsify@4"></script>
-<!-- search plugin -->
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/search.min.js"></script>
-<!-- image zoom plugin -->1
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/zoom-image.min.js"></script>
-<!-- copy code plugin -->
-<script src="//cdn.jsdelivr.net/npm/docsify-copy-code"></script>
-<script src="//unpkg.com/docsify-count/dist/countable.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-bash.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-python.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-json.min.js"></script>
-
-<script id="embedai" src="https://embedai.thesamur.ai/embedai.js" data-id="pne-docs"></script>
-
-<!-- sidebar plugin -->
-<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify-sidebar-collapse/dist/sidebar.min.css" />
-</body>
-</html>
+<!DOCTYPE html>
+<html lang="zh">
+<head>
+    <meta charset="UTF-8">
+    <title>Document</title>
+    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
+    <meta name="description" content="Description">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
+    <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify@4/lib/themes/vue.css">
+    <script src="//cdn.jsdelivr.net/npm/docsify-edit-on-github"></script>
+    <style>
+    </style>
+</head>
+<body>
+<nav>
+    <a href="#/">文档 v1.11.2</a>
+    <a href="#/">示例</a>
+</nav>
+<div id="app"></div>
+<script>
+    window.$docsify = {
+        name: 'Promptulate',
+        repo: 'https://github.com/Undertone0809/promptulate',
+        // logo: '/images/logo.svg',
+        loadNavbar: true,
+        loadSidebar: true,
+        subMaxLevel: 3,
+        coverpage: true,
+        search: {
+            paths: 'auto',
+            placeholder: 'Search',
+            noData: 'Not Found',
+            depth: 3,
+        },
+        count: {
+            countable: true,
+            fontsize: '0.9em',
+            color: 'rgb(90,90,90)',
+            language: 'english'
+        },
+        plugins: [
+            EditOnGithubPlugin.create(
+                'https://github.com/Undertone0809/promptulate/tree/main/docs/',
+                null,
+                function (file) {
+                    if (file.indexOf('en') === -1) {
+                        return '编辑文档'
+                    } else {
+                        return 'edit on github'
+                    }
+                }
+            ),
+            function (hook) {
+                var footer = [
+                    '<hr/>',
+                    '<footer>',
+                    '<span><a href="https://github.com/Undertone0809/">Promptulate/Zeeland</a> &copy;2023.</span>',
+                    '<span>Published with <a href="https://github.com/QingWei-Li/docsify" target="_blank">docsify</a>.</span>',
+                    '</footer>'
+                ].join('')
+
+                hook.afterEach(function (html) {
+                    return html + footer
+                })
+            },
+        ]
+
+    }
+</script>
+<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.css">
+
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/gitalk.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.min.js"></script>
+<script>
+    var gitalk = new Gitalk({
+        clientID: '0b1cb7e36ac5fa3233ba',
+        clientSecret: '4148c415d7ed173e3dcffc861b9ac3eea79fbdbb',
+        repo: 'promptulate',
+        owner: 'Undertone0809',
+        admin: ['Undertone0809'],
+        title: `Document comment ${location.hash.match(/#(.*?)([?]|$)/)[1]}`,
+        id: location.hash.match(/#(.*?)([?]|$)/)[1],
+    })
+    // Listen for changes in hash in the URL. If an MD file is found to have changed,
+    // refresh the page to solve the problem of using a single digital comment issue for the entire website.
+    window.onhashchange = function (event) {
+        if (event.newURL.split('?')[0] !== event.oldURL.split('?')[0]) {
+            location.reload()
+        }
+    }
+</script>
+<!-- Docsify v4 -->
+<script src="//cdn.jsdelivr.net/npm/docsify@4"></script>
+<!-- search plugin -->
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/search.min.js"></script>
+<!-- image zoom plugin -->1
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/zoom-image.min.js"></script>
+<!-- copy code plugin -->
+<script src="//cdn.jsdelivr.net/npm/docsify-copy-code"></script>
+<script src="//unpkg.com/docsify-count/dist/countable.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-bash.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-python.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-json.min.js"></script>
+
+<script id="embedai" src="https://embedai.thesamur.ai/embedai.js" data-id="pne-docs"></script>
+
+<!-- sidebar plugin -->
+<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify-sidebar-collapse/dist/sidebar.min.css" />
+</body>
+</html>
diff --git a/promptulate/__init__.py b/promptulate/__init__.py
index 8404d819..805a79b9 100644
--- a/promptulate/__init__.py
+++ b/promptulate/__init__.py
@@ -1,77 +1,77 @@
-# Copyright (c) 2023 promptulate
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Copyright Owner: Zeeland
-# GitHub Link: https://github.com/Undertone0809/
-# Project Link: https://github.com/Undertone0809/promptulate
-# Contact Email: zeeland@foxmail.com
-
-import warnings
-
-from promptulate.agents.base import BaseAgent
-from promptulate.agents.tool_agent.agent import ToolAgent
-from promptulate.agents.web_agent.agent import WebAgent
-from promptulate.chat import AIChat, chat
-from promptulate.llms.base import BaseLLM
-from promptulate.llms.factory import LLMFactory
-from promptulate.llms.openai.openai import ChatOpenAI
-from promptulate.output_formatter import OutputFormatter
-from promptulate.schema import (
-    AssistantMessage,
-    BaseMessage,
-    MessageSet,
-    SystemMessage,
-    UserMessage,
-)
-from promptulate.tools.base import BaseTool, Tool, define_tool
-from promptulate.utils.logger import enable_log
-from promptulate.utils.string_template import StringTemplate
-
-_util_fields = [
-    "enable_log",
-    "OutputFormatter",
-    "StringTemplate",
-]
-
-_schema_fields = [
-    "AssistantMessage",
-    "SystemMessage",
-    "UserMessage",
-    "BaseMessage",
-    "MessageSet",
-]
-
-_llm_fields = ["chat", "AIChat", "BaseLLM", "ChatOpenAI", "LLMFactory"]
-
-_tool_fields = [
-    "Tool",
-    "define_tool",
-    "BaseTool",
-]
-
-_agent_fields = [
-    "BaseAgent",
-    "WebAgent",
-    "ToolAgent",
-]
-
-__all__ = [
-    *_util_fields,
-    *_schema_fields,
-    *_llm_fields,
-    *_tool_fields,
-    *_agent_fields,
-]
-
-warnings.filterwarnings("always", category=DeprecationWarning)
+# Copyright (c) 2023 promptulate
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Copyright Owner: Zeeland
+# GitHub Link: https://github.com/Undertone0809/
+# Project Link: https://github.com/Undertone0809/promptulate
+# Contact Email: zeeland@foxmail.com
+
+import warnings
+
+from promptulate.agents.base import BaseAgent
+from promptulate.agents.tool_agent.agent import ToolAgent
+from promptulate.agents.web_agent.agent import WebAgent
+from promptulate.chat import AIChat, chat
+from promptulate.llms.base import BaseLLM
+from promptulate.llms.factory import LLMFactory
+from promptulate.llms.openai.openai import ChatOpenAI
+from promptulate.output_formatter import OutputFormatter
+from promptulate.schema import (
+    AssistantMessage,
+    BaseMessage,
+    MessageSet,
+    SystemMessage,
+    UserMessage,
+)
+from promptulate.tools.base import BaseTool, Tool, define_tool
+from promptulate.utils.logger import enable_log
+from promptulate.utils.string_template import StringTemplate
+
+_util_fields = [
+    "enable_log",
+    "OutputFormatter",
+    "StringTemplate",
+]
+
+_schema_fields = [
+    "AssistantMessage",
+    "SystemMessage",
+    "UserMessage",
+    "BaseMessage",
+    "MessageSet",
+]
+
+_llm_fields = ["chat", "AIChat", "BaseLLM", "ChatOpenAI", "LLMFactory"]
+
+_tool_fields = [
+    "Tool",
+    "define_tool",
+    "BaseTool",
+]
+
+_agent_fields = [
+    "BaseAgent",
+    "WebAgent",
+    "ToolAgent",
+]
+
+__all__ = [
+    *_util_fields,
+    *_schema_fields,
+    *_llm_fields,
+    *_tool_fields,
+    *_agent_fields,
+]
+
+warnings.filterwarnings("always", category=DeprecationWarning)
diff --git a/promptulate/agents/tool_agent/agent.py b/promptulate/agents/tool_agent/agent.py
index da9a38a6..f21ae2e2 100644
--- a/promptulate/agents/tool_agent/agent.py
+++ b/promptulate/agents/tool_agent/agent.py
@@ -1,217 +1,217 @@
-import json
-import time
-from typing import Callable, List, Optional, TypedDict, Union
-
-from promptulate.agents import BaseAgent
-from promptulate.agents.tool_agent.prompt import (
-    PREFIX_TEMPLATE,
-    REACT_SYSTEM_PROMPT_TEMPLATE,
-)
-from promptulate.hook import Hook, HookTable
-from promptulate.llms.base import BaseLLM
-from promptulate.llms.openai.openai import ChatOpenAI
-from promptulate.schema import ToolTypes
-from promptulate.tools.manager import ToolManager
-from promptulate.utils.logger import logger
-from promptulate.utils.string_template import StringTemplate
-
-
-class ActionResponse(TypedDict):
-    thought: str
-    action_name: str
-    action_parameters: Union[dict, str]
-
-
-class ToolAgent(BaseAgent):
-    """
-    An agent who is good at using tool. ref ReAct.
-
-    Attributes:
-        llm (BaseLLM): The language model driver. Default is ChatOpenAI with model
-            "gpt-3.5-turbo-16k".
-        stop_sequences (List[str]): The sequences that, when met, will stop the output
-            of the llm.
-        system_prompt_template (StringTemplate): The preset system prompt template.
-        prefix_prompt_template (StringTemplate): The prefix system prompt template.
-        tool_manager (ToolManager): Used to manage all tools.
-        conversation_prompt (str): Stores all conversation messages during a
-            conversation.
-        max_iterations (Optional[int]): The maximum number of executions. Default is 15.
-        max_execution_time (Optional[float]): The longest running time. No default
-            value.
-        enable_role (bool): Flag to enable role. Default is False.
-        agent_name (str): The name of the agent. Default is "pne-bot".
-        agent_identity (str): The identity of the agent. Default is "bot".
-        agent_goal (str): The goal of the agent. Default is "provides better assistance
-            and services for humans.".
-        agent_constraints (str): The constraints of the agent. Default is "none".
-    """
-
-    def __init__(
-        self,
-        *,
-        llm: BaseLLM = None,
-        tools: Optional[List[ToolTypes]] = None,
-        prefix_prompt_template: StringTemplate = StringTemplate(PREFIX_TEMPLATE),
-        hooks: Optional[List[Callable]] = None,
-        enable_role: bool = False,
-        agent_name: str = "tool-agent",
-        agent_identity: str = "tool-agent",
-        agent_goal: str = "provides better assistance and services for humans.",
-        agent_constraints: str = "none",
-        tool_manager: Optional[ToolManager] = None,
-        _from: Optional[str] = None,
-    ):
-        if tools is not None and tool_manager is not None:
-            raise ValueError(
-                "Please provide either 'tools' or 'tool_manager', but not both simultaneously."  # noqa
-            )
-
-        super().__init__(hooks=hooks, agent_type="Tool Agent", _from=_from)
-        self.llm: BaseLLM = llm or ChatOpenAI(model="gpt-4-1106-preview")
-        """llm provider"""
-        self.tool_manager: ToolManager = (
-            tool_manager if tool_manager is not None else ToolManager(tools or [])
-        )
-        """Used to manage all tools, Only create a new ToolManager if 'tool_manager' is
-        not provided."""
-        self.system_prompt_template: StringTemplate = REACT_SYSTEM_PROMPT_TEMPLATE
-        """Preset system prompt template."""
-        self.prefix_prompt_template: StringTemplate = prefix_prompt_template
-        """Prefix system prompt template."""
-        self.conversation_prompt: str = ""
-        """Store all conversation message when conversation. ToolAgent use dynamic
-        system prompt."""
-        self.max_iterations: Optional[int] = 15
-        """The maximum number of executions."""
-        self.max_execution_time: Optional[float] = None
-        """The longest running time. """
-        self.enable_role: bool = enable_role
-        self.agent_name: str = agent_name
-        self.agent_identity: str = agent_identity
-        self.agent_goal: str = agent_goal
-        self.agent_constraints: str = agent_constraints
-
-    def get_llm(self) -> BaseLLM:
-        return self.llm
-
-    def _build_system_prompt(self, instruction: str) -> str:
-        """Build the system prompt."""
-        prefix_prompt = (
-            self.prefix_prompt_template.format(
-                agent_identity=self.agent_identity,
-                agent_name=self.agent_name,
-                agent_goal=self.agent_goal,
-                agent_constraints=self.agent_constraints,
-            )
-            if self.enable_role
-            else ""
-        )
-
-        return prefix_prompt + self.system_prompt_template.format(
-            question=instruction,
-            tool_descriptions=self.tool_manager.tool_descriptions,
-        )
-
-    @property
-    def current_date(self) -> str:
-        """Get the current date."""
-        return f"Current date: {time.strftime('%Y-%m-%d %H:%M:%S')}"
-
-    def _run(
-        self, instruction: str, return_raw_data: bool = False, **kwargs
-    ) -> Union[str, ActionResponse]:
-        """Run the tool agent. The tool agent will interact with the LLM and the tool.
-
-        Args:
-            instruction(str): The instruction to the tool agent.
-            return_raw_data(bool): Whether to return raw data. Default is False.
-
-        Returns:
-            The output of the tool agent.
-        """
-        self.conversation_prompt = self._build_system_prompt(instruction)
-        logger.info(f"[pne] ToolAgent system prompt: {self.conversation_prompt}")
-
-        iterations = 0
-        used_time = 0.0
-        start_time = time.time()
-
-        while self._should_continue(iterations, used_time):
-            llm_resp: str = self.llm(
-                instruction=self.conversation_prompt + self.current_date
-            )
-            while llm_resp == "":
-                llm_resp = self.llm(
-                    instruction=self.conversation_prompt + self.current_date
-                )
-
-            action_resp: ActionResponse = self._parse_llm_response(llm_resp)
-            self.conversation_prompt += f"{llm_resp}\n"
-            logger.info(
-                f"[pne] tool agent <{iterations}> current prompt: {self.conversation_prompt}"  # noqa
-            )
-
-            if "finish" in action_resp["action_name"]:
-                if return_raw_data:
-                    return action_resp
-
-                return action_resp["action_parameters"]["content"]
-
-            Hook.call_hook(
-                HookTable.ON_AGENT_ACTION,
-                self,
-                thought=action_resp["thought"],
-                action=action_resp["action_name"],
-                action_input=action_resp["action_parameters"],
-            )
-
-            tool_result = self.tool_manager.run_tool(
-                action_resp["action_name"], action_resp["action_parameters"]
-            )
-            Hook.call_hook(
-                HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result
-            )
-            self.conversation_prompt += f"Observation: {tool_result}\n"
-
-            iterations += 1
-            used_time += time.time() - start_time
-
-    def _should_continue(self, current_iteration: int, current_time_elapsed) -> bool:
-        """Determine whether to stop, both timeout and exceeding the maximum number of
-        iterations will stop.
-
-        Args:
-            current_iteration: current iteration times.
-            current_time_elapsed: current running time.
-
-        Returns:
-            Whether to stop.
-        """
-        if self.max_iterations and current_iteration >= self.max_iterations:
-            return False
-        if self.max_execution_time and current_time_elapsed >= self.max_execution_time:
-            return False
-        return True
-
-    def _parse_llm_response(self, llm_resp: str) -> ActionResponse:
-        """Parse next instruction of LLM output.
-
-        Args:
-            llm_resp(str): output of LLM
-
-        Returns:
-            Return a tuple, (thought,action,action input)
-            action(str): tool name
-            action_input(dict | str): tool parameters
-        """
-        llm_resp: str = (
-            llm_resp.replace("```json", "").replace("```JSON", "").replace("```", "")
-        )
-        data: dict = json.loads(llm_resp)
-
-        return ActionResponse(
-            thought=data["thought"],
-            action_name=data["action"]["name"],
-            action_parameters=data["action"]["args"],
-        )
+import json
+import time
+from typing import Callable, List, Optional, TypedDict, Union
+
+from promptulate.agents import BaseAgent
+from promptulate.agents.tool_agent.prompt import (
+    PREFIX_TEMPLATE,
+    REACT_SYSTEM_PROMPT_TEMPLATE,
+)
+from promptulate.hook import Hook, HookTable
+from promptulate.llms.base import BaseLLM
+from promptulate.llms.openai.openai import ChatOpenAI
+from promptulate.schema import ToolTypes
+from promptulate.tools.manager import ToolManager
+from promptulate.utils.logger import logger
+from promptulate.utils.string_template import StringTemplate
+
+
+class ActionResponse(TypedDict):
+    thought: str
+    action_name: str
+    action_parameters: Union[dict, str]
+
+
+class ToolAgent(BaseAgent):
+    """
+    An agent who is good at using tool. ref ReAct.
+
+    Attributes:
+        llm (BaseLLM): The language model driver. Default is ChatOpenAI with model
+            "gpt-3.5-turbo-16k".
+        stop_sequences (List[str]): The sequences that, when met, will stop the output
+            of the llm.
+        system_prompt_template (StringTemplate): The preset system prompt template.
+        prefix_prompt_template (StringTemplate): The prefix system prompt template.
+        tool_manager (ToolManager): Used to manage all tools.
+        conversation_prompt (str): Stores all conversation messages during a
+            conversation.
+        max_iterations (Optional[int]): The maximum number of executions. Default is 15.
+        max_execution_time (Optional[float]): The longest running time. No default
+            value.
+        enable_role (bool): Flag to enable role. Default is False.
+        agent_name (str): The name of the agent. Default is "pne-bot".
+        agent_identity (str): The identity of the agent. Default is "bot".
+        agent_goal (str): The goal of the agent. Default is "provides better assistance
+            and services for humans.".
+        agent_constraints (str): The constraints of the agent. Default is "none".
+    """
+
+    def __init__(
+        self,
+        *,
+        llm: BaseLLM = None,
+        tools: Optional[List[ToolTypes]] = None,
+        prefix_prompt_template: StringTemplate = StringTemplate(PREFIX_TEMPLATE),
+        hooks: Optional[List[Callable]] = None,
+        enable_role: bool = False,
+        agent_name: str = "tool-agent",
+        agent_identity: str = "tool-agent",
+        agent_goal: str = "provides better assistance and services for humans.",
+        agent_constraints: str = "none",
+        tool_manager: Optional[ToolManager] = None,
+        _from: Optional[str] = None,
+    ):
+        if tools is not None and tool_manager is not None:
+            raise ValueError(
+                "Please provide either 'tools' or 'tool_manager', but not both simultaneously."  # noqa
+            )
+
+        super().__init__(hooks=hooks, agent_type="Tool Agent", _from=_from)
+        self.llm: BaseLLM = llm or ChatOpenAI(model="gpt-4-1106-preview")
+        """llm provider"""
+        self.tool_manager: ToolManager = (
+            tool_manager if tool_manager is not None else ToolManager(tools or [])
+        )
+        """Used to manage all tools, Only create a new ToolManager if 'tool_manager' is
+        not provided."""
+        self.system_prompt_template: StringTemplate = REACT_SYSTEM_PROMPT_TEMPLATE
+        """Preset system prompt template."""
+        self.prefix_prompt_template: StringTemplate = prefix_prompt_template
+        """Prefix system prompt template."""
+        self.conversation_prompt: str = ""
+        """Store all conversation message when conversation. ToolAgent use dynamic
+        system prompt."""
+        self.max_iterations: Optional[int] = 15
+        """The maximum number of executions."""
+        self.max_execution_time: Optional[float] = None
+        """The longest running time. """
+        self.enable_role: bool = enable_role
+        self.agent_name: str = agent_name
+        self.agent_identity: str = agent_identity
+        self.agent_goal: str = agent_goal
+        self.agent_constraints: str = agent_constraints
+
+    def get_llm(self) -> BaseLLM:
+        return self.llm
+
+    def _build_system_prompt(self, instruction: str) -> str:
+        """Build the system prompt."""
+        prefix_prompt = (
+            self.prefix_prompt_template.format(
+                agent_identity=self.agent_identity,
+                agent_name=self.agent_name,
+                agent_goal=self.agent_goal,
+                agent_constraints=self.agent_constraints,
+            )
+            if self.enable_role
+            else ""
+        )
+
+        return prefix_prompt + self.system_prompt_template.format(
+            question=instruction,
+            tool_descriptions=self.tool_manager.tool_descriptions,
+        )
+
+    @property
+    def current_date(self) -> str:
+        """Get the current date."""
+        return f"Current date: {time.strftime('%Y-%m-%d %H:%M:%S')}"
+
+    def _run(
+        self, instruction: str, return_raw_data: bool = False, **kwargs
+    ) -> Union[str, ActionResponse]:
+        """Run the tool agent. The tool agent will interact with the LLM and the tool.
+
+        Args:
+            instruction(str): The instruction to the tool agent.
+            return_raw_data(bool): Whether to return raw data. Default is False.
+
+        Returns:
+            The output of the tool agent.
+        """
+        self.conversation_prompt = self._build_system_prompt(instruction)
+        logger.info(f"[pne] ToolAgent system prompt: {self.conversation_prompt}")
+
+        iterations = 0
+        used_time = 0.0
+        start_time = time.time()
+
+        while self._should_continue(iterations, used_time):
+            llm_resp: str = self.llm(
+                instruction=self.conversation_prompt + self.current_date
+            )
+            while llm_resp == "":
+                llm_resp = self.llm(
+                    instruction=self.conversation_prompt + self.current_date
+                )
+
+            action_resp: ActionResponse = self._parse_llm_response(llm_resp)
+            self.conversation_prompt += f"{llm_resp}\n"
+            logger.info(
+                f"[pne] tool agent <{iterations}> current prompt: {self.conversation_prompt}"  # noqa
+            )
+
+            if "finish" in action_resp["action_name"]:
+                if return_raw_data:
+                    return action_resp
+
+                return action_resp["action_parameters"]["content"]
+
+            Hook.call_hook(
+                HookTable.ON_AGENT_ACTION,
+                self,
+                thought=action_resp["thought"],
+                action=action_resp["action_name"],
+                action_input=action_resp["action_parameters"],
+            )
+
+            tool_result = self.tool_manager.run_tool(
+                action_resp["action_name"], action_resp["action_parameters"]
+            )
+            Hook.call_hook(
+                HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result
+            )
+            self.conversation_prompt += f"Observation: {tool_result}\n"
+
+            iterations += 1
+            used_time += time.time() - start_time
+
+    def _should_continue(self, current_iteration: int, current_time_elapsed) -> bool:
+        """Determine whether to stop, both timeout and exceeding the maximum number of
+        iterations will stop.
+
+        Args:
+            current_iteration: current iteration times.
+            current_time_elapsed: current running time.
+
+        Returns:
+            Whether to stop.
+        """
+        if self.max_iterations and current_iteration >= self.max_iterations:
+            return False
+        if self.max_execution_time and current_time_elapsed >= self.max_execution_time:
+            return False
+        return True
+
+    def _parse_llm_response(self, llm_resp: str) -> ActionResponse:
+        """Parse next instruction of LLM output.
+
+        Args:
+            llm_resp(str): output of LLM
+
+        Returns:
+            Return a tuple, (thought,action,action input)
+            action(str): tool name
+            action_input(dict | str): tool parameters
+        """
+        llm_resp: str = (
+            llm_resp.replace("```json", "").replace("```JSON", "").replace("```", "")
+        )
+        data: dict = json.loads(llm_resp)
+
+        return ActionResponse(
+            thought=data["thought"],
+            action_name=data["action"]["name"],
+            action_parameters=data["action"]["args"],
+        )
diff --git a/promptulate/agents/tool_agent/prompt.py b/promptulate/agents/tool_agent/prompt.py
index d9b0564a..6acb130d 100644
--- a/promptulate/agents/tool_agent/prompt.py
+++ b/promptulate/agents/tool_agent/prompt.py
@@ -1,99 +1,99 @@
-from promptulate.utils.string_template import StringTemplate
-
-SYSTEM_PROMPT_TEMPLATE = StringTemplate(
-    template_format="jinja2",
-    template="""As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
-
-## Tools
-You have access to the following tools, the tools information is provided by the following schema:
-{{tool_descriptions}}
-
-## Task
-Currently, you are working on the following task:
-{{task}}
-
-To achieve your goals, you need to choose the appropriate tools for reasoning.
-For example: If the user wants to check the weather in Beijing tomorrow. The first step is to use websearch to query the weather in Beijing. After obtaining the results, in the second step, you can use the finish command to return the results.
-
-## Constraints
-- Choose only ONE tool in one step.
-- Choose tool carefully as it is critical to accomplish the task.
-- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
-
-{{current_process}}
-
-{{output_format}}
-""",  # noqa: E501
-)
-
-REACT_SYSTEM_PROMPT_TEMPLATE = StringTemplate(
-    template_format="jinja2",
-    template="""
-As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
-
-## Tools
-You have access to the following tools, the tools information is provided by the following schema:
-{{tool_descriptions}}
-
-## Output Format
-To answer the question, Use the following JSON format. JSON only, no explanation. Otherwise, you will be punished.
-The output should be formatted as a JSON instance that conforms to the format below. JSON only, no explanation.
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": # the action to take, must be one of provided tools
-    {
-    "name": "tool name",
-    "args": "tool input parameters, json type data"
-    }
-}
-```
-
-If this format is used, the user will respond in the following format:
-
-```
-Observation: tool response
-```
-
-You should keep repeating the above format until you have enough information
-to answer the question without using any more tools. At that point, you MUST respond
-in the one of the following two formats:
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": {
-    "name": "finish",
-    "args": {"content": "You answer here."}
-    }
-}
-```
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": {
-    "name": "finish",
-    "args": {"content": "Sorry, I cannot answer your query, because (Summary all the upper steps, and explain)"}
-    }
-}
-```
-
-## Attention
-- Your output is JSON only and no explanation.
-- Choose only ONE tool and you can't do without using any tools in one step.
-- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
-- Whether the action input is JSON or str depends on the definition of the tool.
-
-## User question
-{{question}}
-
-## Current Conversation
-Below is the current conversation consisting of interleaving human and assistant history.
-""",  # noqa: E501
-)
-PREFIX_TEMPLATE = """You are a {agent_identity}, named {agent_name}, your goal is {agent_goal}, and the constraint is {agent_constraints}. """  # noqa
+from promptulate.utils.string_template import StringTemplate
+
+SYSTEM_PROMPT_TEMPLATE = StringTemplate(
+    template_format="jinja2",
+    template="""As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
+
+## Tools
+You have access to the following tools, the tools information is provided by the following schema:
+{{tool_descriptions}}
+
+## Task
+Currently, you are working on the following task:
+{{task}}
+
+To achieve your goals, you need to choose the appropriate tools for reasoning.
+For example: If the user wants to check the weather in Beijing tomorrow. The first step is to use websearch to query the weather in Beijing. After obtaining the results, in the second step, you can use the finish command to return the results.
+
+## Constraints
+- Choose only ONE tool in one step.
+- Choose tool carefully as it is critical to accomplish the task.
+- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
+
+{{current_process}}
+
+{{output_format}}
+""",  # noqa: E501
+)
+
+REACT_SYSTEM_PROMPT_TEMPLATE = StringTemplate(
+    template_format="jinja2",
+    template="""
+As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
+
+## Tools
+You have access to the following tools, the tools information is provided by the following schema:
+{{tool_descriptions}}
+
+## Output Format
+To answer the question, Use the following JSON format. JSON only, no explanation. Otherwise, you will be punished.
+The output should be formatted as a JSON instance that conforms to the format below. JSON only, no explanation.
+
+```json
+{
+"thought": "The thought of what to do and why.",
+"self_criticism":"Constructive self-criticism of the thought",
+"action": # the action to take, must be one of provided tools
+    {
+    "name": "tool name",
+    "args": "tool input parameters, json type data"
+    }
+}
+```
+
+If this format is used, the user will respond in the following format:
+
+```
+Observation: tool response
+```
+
+You should keep repeating the above format until you have enough information
+to answer the question without using any more tools. At that point, you MUST respond
+in the one of the following two formats:
+
+```json
+{
+"thought": "The thought of what to do and why.",
+"self_criticism":"Constructive self-criticism of the thought",
+"action": {
+    "name": "finish",
+    "args": {"content": "You answer here."}
+    }
+}
+```
+
+```json
+{
+"thought": "The thought of what to do and why.",
+"self_criticism":"Constructive self-criticism of the thought",
+"action": {
+    "name": "finish",
+    "args": {"content": "Sorry, I cannot answer your query, because (Summary all the upper steps, and explain)"}
+    }
+}
+```
+
+## Attention
+- Your output is JSON only and no explanation.
+- Choose only ONE tool and you can't do without using any tools in one step.
+- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
+- Whether the action input is JSON or str depends on the definition of the tool.
+
+## User question
+{{question}}
+
+## Current Conversation
+Below is the current conversation consisting of interleaving human and assistant history.
+""",  # noqa: E501
+)
+PREFIX_TEMPLATE = """You are a {agent_identity}, named {agent_name}, your goal is {agent_goal}, and the constraint is {agent_constraints}. """  # noqa
diff --git a/promptulate/chat.py b/promptulate/chat.py
index caaffa9f..e763bd93 100644
--- a/promptulate/chat.py
+++ b/promptulate/chat.py
@@ -1,274 +1,274 @@
-import json
-from typing import Dict, List, Optional, TypeVar, Union
-
-import litellm
-
-from promptulate.agents.base import BaseAgent
-from promptulate.agents.tool_agent.agent import ToolAgent
-from promptulate.beta.agents.assistant_agent import AssistantAgent
-from promptulate.llms import BaseLLM
-from promptulate.output_formatter import formatting_result, get_formatted_instructions
-from promptulate.pydantic_v1 import BaseModel
-from promptulate.schema import (
-    AssistantMessage,
-    BaseMessage,
-    MessageSet,
-    StreamIterator,
-    ToolTypes,
-)
-from promptulate.tools.base import BaseTool
-from promptulate.utils.logger import logger
-
-T = TypeVar("T", bound=BaseModel)
-
-
-def parse_content(chunk) -> (str, str):
-    """Parse the litellm chunk.
-    Args:
-        chunk: litellm chunk.
-
-    Returns:
-        content: The content of the chunk.
-        ret_data: The additional data of the chunk.
-    """
-    content = chunk.choices[0].delta.content
-    ret_data = json.loads(chunk.json())
-    return content, ret_data
-
-
-class _LiteLLM(BaseLLM):
-    def __init__(
-        self, model: str, model_config: Optional[dict] = None, *args, **kwargs
-    ):
-        logger.info(f"[pne chat] init LiteLLM, model: {model} config: {model_config}")
-        super().__init__(*args, **kwargs)
-        self._model: str = model
-        self._model_config: dict = model_config or {}
-
-    def _predict(
-        self, messages: MessageSet, stream: bool = False, *args, **kwargs
-    ) -> Union[AssistantMessage, StreamIterator]:
-        logger.info(f"[pne chat] prompts: {messages.string_messages}")
-        temp_response = litellm.completion(
-            model=self._model, messages=messages.listdict_messages, **self._model_config
-        )
-
-        if stream:
-            return StreamIterator(
-                response_stream=temp_response,
-                parse_content=parse_content,
-                return_raw_response=False,
-            )
-
-        response = AssistantMessage(
-            content=temp_response.choices[0].message.content,
-            additional_kwargs=temp_response.json()
-            if isinstance(temp_response.json(), dict)
-            else json.loads(temp_response.json()),
-        )
-        logger.debug(
-            f"[pne chat] response: {json.dumps(response.additional_kwargs, indent=2)}"
-        )
-        return response
-
-    def __call__(self, instruction: str, *args, **kwargs) -> str:
-        return self._predict(
-            MessageSet.from_listdict_data(
-                [
-                    {"content": "You are a helpful assistant.", "role": "system"},
-                    {"content": instruction, "role": "user"},
-                ]
-            )
-        ).content
-
-
-def _convert_message(messages: Union[List, MessageSet, str]) -> MessageSet:
-    """Convert str or List[Dict] to MessageSet.
-
-    Args:
-        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
-            API type data(List[Dict]) or MessageSet type.
-
-    Returns:
-        Return MessageSet type data.
-    """
-    if isinstance(messages, str):
-        messages: List[Dict] = [
-            {"content": "You are a helpful assistant", "role": "system"},
-            {"content": messages, "role": "user"},
-        ]
-    if isinstance(messages, list):
-        messages: MessageSet = MessageSet.from_listdict_data(messages)
-
-    return messages
-
-
-def _get_llm(
-    model: str = "gpt-3.5-turbo",
-    model_config: Optional[dict] = None,
-    custom_llm: Optional[BaseLLM] = None,
-) -> BaseLLM:
-    """Get LLM instance.
-
-    Args:
-        model(str): LLM model.
-        model_config(dict): LLM model config.
-        custom_llm(BaseLLM): custom LLM instance.
-
-    Returns:
-        Return LLM instance.
-    """
-    if custom_llm:
-        return custom_llm
-
-    return _LiteLLM(model=model, model_config=model_config)
-
-
-class AIChat:
-    def __init__(
-        self,
-        model: str = "gpt-3.5-turbo",
-        model_config: Optional[dict] = None,
-        tools: Optional[List[ToolTypes]] = None,
-        custom_llm: Optional[BaseLLM] = None,
-        enable_plan: bool = False,
-    ):
-        """Initialize the AIChat.
-
-        Args:
-            model(str): LLM model name, eg: "gpt-3.5-turbo".
-            model_config(Optional[dict]): LLM model config.
-            tools(Optional[List[ToolTypes]]): specified tools for llm, if exists, AIChat
-                will use Agent to run.
-            custom_llm(Optional[BaseLLM]): custom LLM instance.
-            enable_plan(bool): use Agent with plan ability if True.
-        """
-        self.llm: BaseLLM = _get_llm(model, model_config, custom_llm)
-        self.tools: Optional[List[ToolTypes]] = tools
-        self.agent: Optional[BaseAgent] = None
-
-        if tools:
-            if enable_plan:
-                self.agent = AssistantAgent(tools=self.tools, llm=self.llm)
-                logger.info("[pne chat] invoke AssistantAgent with plan ability.")
-            else:
-                self.agent = ToolAgent(tools=self.tools, llm=self.llm)
-                logger.info("[pne chat] invoke ToolAgent.")
-
-    def run(
-        self,
-        messages: Union[List, MessageSet, str],
-        output_schema: Optional[type(BaseModel)] = None,
-        examples: Optional[List[BaseModel]] = None,
-        return_raw_response: bool = False,
-        stream: bool = False,
-        **kwargs,
-    ) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
-        """Run the AIChat.
-
-        Args:
-            messages(Union[List, MessageSet, str]): chat messages. It can be str or
-                OpenAI.
-            API type data(List[Dict]) or MessageSet type.
-            output_schema(BaseModel): specified return type. See detail on in
-                OutputFormatter module.
-            examples(List[BaseModel]): examples for output_schema. See detail
-                on: OutputFormatter.
-            return_raw_response(bool): return OpenAI completion result if true,
-                otherwise return string type data.
-            stream(bool): return stream iterator if True.
-
-        Returns:
-            Return string normally, it means enable_original_return is default False. if
-                tools is provided, agent return string type data.
-            Return BaseMessage if enable_original_return is True and not in agent mode.
-            Return List[BaseMessage] if stream is True.
-            Return T if output_schema is provided.
-        """
-        if stream and (output_schema or self.tools):
-            raise ValueError(
-                "stream, tools and output_schema can't be True at the same time, "
-                "because stream is used to return Iterator[BaseMessage]."
-            )
-
-        if self.agent:
-            return self.agent.run(messages, output_schema=output_schema)
-
-        messages: MessageSet = _convert_message(messages)
-
-        # add output format into the last prompt if provide
-        if output_schema:
-            instruction: str = get_formatted_instructions(
-                json_schema=output_schema, examples=examples
-            )
-            messages.messages[-1].content += f"\n{instruction}"
-
-        logger.info(f"[pne chat] messages: {messages}")
-
-        response: AssistantMessage = self.llm.predict(messages, stream=stream, **kwargs)
-
-        logger.info(f"[pne chat] response: {response.additional_kwargs}")
-
-        # return output format if provide
-        if output_schema:
-            logger.info("[pne chat] return formatted response.")
-            return formatting_result(
-                pydantic_obj=output_schema, llm_output=response.content
-            )
-
-        return response if return_raw_response else response.content
-
-
-def chat(
-    messages: Union[List, MessageSet, str],
-    *,
-    model: str = "gpt-3.5-turbo",
-    model_config: Optional[dict] = None,
-    tools: Optional[List[ToolTypes]] = None,
-    output_schema: Optional[type(BaseModel)] = None,
-    examples: Optional[List[BaseModel]] = None,
-    return_raw_response: bool = False,
-    custom_llm: Optional[BaseLLM] = None,
-    enable_plan: bool = False,
-    stream: bool = False,
-    **kwargs,
-) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
-    """A universal chat method, you can chat any model like OpenAI completion.
-    It should be noted that chat() is only support chat model currently.
-
-    Args:
-        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
-            API type data(List[Dict]) or MessageSet type.
-        model(str): LLM model. Currently only support chat model.
-        model_config(Optional[dict]): LLM model config.
-        tools(List[BaseTool] | None): specified tools for llm.
-        output_schema(BaseModel): specified return type. See detail on: OutputFormatter.
-        examples(List[BaseModel]): examples for output_schema. See detail
-            on: OutputFormatter.
-        return_raw_response(bool): return OpenAI completion result if true, otherwise
-            return string type data.
-        custom_llm(BaseLLM): You can use custom LLM if you have.
-        enable_plan(bool): use Agent with plan ability if True.
-        stream(bool): return stream iterator if True.
-        **kwargs: litellm kwargs
-
-    Returns:
-        Return string normally, it means enable_original_return is default False.
-        Return BaseMessage if enable_original_return is True.
-        Return List[BaseMessage] if stream is True.
-        Return T if output_schema is provided.
-    """
-    return AIChat(
-        model=model,
-        model_config=model_config,
-        tools=tools,
-        custom_llm=custom_llm,
-        enable_plan=enable_plan,
-    ).run(
-        messages=messages,
-        output_schema=output_schema,
-        examples=examples,
-        return_raw_response=return_raw_response,
-        stream=stream,
-        **kwargs,
-    )
+import json
+from typing import Dict, List, Optional, TypeVar, Union
+
+import litellm
+
+from promptulate.agents.base import BaseAgent
+from promptulate.agents.tool_agent.agent import ToolAgent
+from promptulate.beta.agents.assistant_agent import AssistantAgent
+from promptulate.llms import BaseLLM
+from promptulate.output_formatter import formatting_result, get_formatted_instructions
+from promptulate.pydantic_v1 import BaseModel
+from promptulate.schema import (
+    AssistantMessage,
+    BaseMessage,
+    MessageSet,
+    StreamIterator,
+    ToolTypes,
+)
+from promptulate.tools.base import BaseTool
+from promptulate.utils.logger import logger
+
+T = TypeVar("T", bound=BaseModel)
+
+
+def parse_content(chunk) -> (str, str):
+    """Parse the litellm chunk.
+    Args:
+        chunk: litellm chunk.
+
+    Returns:
+        content: The content of the chunk.
+        ret_data: The additional data of the chunk.
+    """
+    content = chunk.choices[0].delta.content
+    ret_data = json.loads(chunk.json())
+    return content, ret_data
+
+
+class _LiteLLM(BaseLLM):
+    def __init__(
+        self, model: str, model_config: Optional[dict] = None, *args, **kwargs
+    ):
+        logger.info(f"[pne chat] init LiteLLM, model: {model} config: {model_config}")
+        super().__init__(*args, **kwargs)
+        self._model: str = model
+        self._model_config: dict = model_config or {}
+
+    def _predict(
+        self, messages: MessageSet, stream: bool = False, *args, **kwargs
+    ) -> Union[AssistantMessage, StreamIterator]:
+        logger.info(f"[pne chat] prompts: {messages.string_messages}")
+        temp_response = litellm.completion(
+            model=self._model, messages=messages.listdict_messages, **self._model_config
+        )
+
+        if stream:
+            return StreamIterator(
+                response_stream=temp_response,
+                parse_content=parse_content,
+                return_raw_response=False,
+            )
+
+        response = AssistantMessage(
+            content=temp_response.choices[0].message.content,
+            additional_kwargs=temp_response.json()
+            if isinstance(temp_response.json(), dict)
+            else json.loads(temp_response.json()),
+        )
+        logger.debug(
+            f"[pne chat] response: {json.dumps(response.additional_kwargs, indent=2)}"
+        )
+        return response
+
+    def __call__(self, instruction: str, *args, **kwargs) -> str:
+        return self._predict(
+            MessageSet.from_listdict_data(
+                [
+                    {"content": "You are a helpful assistant.", "role": "system"},
+                    {"content": instruction, "role": "user"},
+                ]
+            )
+        ).content
+
+
+def _convert_message(messages: Union[List, MessageSet, str]) -> MessageSet:
+    """Convert str or List[Dict] to MessageSet.
+
+    Args:
+        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
+            API type data(List[Dict]) or MessageSet type.
+
+    Returns:
+        Return MessageSet type data.
+    """
+    if isinstance(messages, str):
+        messages: List[Dict] = [
+            {"content": "You are a helpful assistant", "role": "system"},
+            {"content": messages, "role": "user"},
+        ]
+    if isinstance(messages, list):
+        messages: MessageSet = MessageSet.from_listdict_data(messages)
+
+    return messages
+
+
+def _get_llm(
+    model: str = "gpt-3.5-turbo",
+    model_config: Optional[dict] = None,
+    custom_llm: Optional[BaseLLM] = None,
+) -> BaseLLM:
+    """Get LLM instance.
+
+    Args:
+        model(str): LLM model.
+        model_config(dict): LLM model config.
+        custom_llm(BaseLLM): custom LLM instance.
+
+    Returns:
+        Return LLM instance.
+    """
+    if custom_llm:
+        return custom_llm
+
+    return _LiteLLM(model=model, model_config=model_config)
+
+
+class AIChat:
+    def __init__(
+        self,
+        model: str = "gpt-3.5-turbo",
+        model_config: Optional[dict] = None,
+        tools: Optional[List[ToolTypes]] = None,
+        custom_llm: Optional[BaseLLM] = None,
+        enable_plan: bool = False,
+    ):
+        """Initialize the AIChat.
+
+        Args:
+            model(str): LLM model name, eg: "gpt-3.5-turbo".
+            model_config(Optional[dict]): LLM model config.
+            tools(Optional[List[ToolTypes]]): specified tools for llm, if exists, AIChat
+                will use Agent to run.
+            custom_llm(Optional[BaseLLM]): custom LLM instance.
+            enable_plan(bool): use Agent with plan ability if True.
+        """
+        self.llm: BaseLLM = _get_llm(model, model_config, custom_llm)
+        self.tools: Optional[List[ToolTypes]] = tools
+        self.agent: Optional[BaseAgent] = None
+
+        if tools:
+            if enable_plan:
+                self.agent = AssistantAgent(tools=self.tools, llm=self.llm)
+                logger.info("[pne chat] invoke AssistantAgent with plan ability.")
+            else:
+                self.agent = ToolAgent(tools=self.tools, llm=self.llm)
+                logger.info("[pne chat] invoke ToolAgent.")
+
+    def run(
+        self,
+        messages: Union[List, MessageSet, str],
+        output_schema: Optional[type(BaseModel)] = None,
+        examples: Optional[List[BaseModel]] = None,
+        return_raw_response: bool = False,
+        stream: bool = False,
+        **kwargs,
+    ) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
+        """Run the AIChat.
+
+        Args:
+            messages(Union[List, MessageSet, str]): chat messages. It can be str or
+                OpenAI.
+            API type data(List[Dict]) or MessageSet type.
+            output_schema(BaseModel): specified return type. See detail on in
+                OutputFormatter module.
+            examples(List[BaseModel]): examples for output_schema. See detail
+                on: OutputFormatter.
+            return_raw_response(bool): return OpenAI completion result if true,
+                otherwise return string type data.
+            stream(bool): return stream iterator if True.
+
+        Returns:
+            Return string normally, it means enable_original_return is default False. if
+                tools is provided, agent return string type data.
+            Return BaseMessage if enable_original_return is True and not in agent mode.
+            Return List[BaseMessage] if stream is True.
+            Return T if output_schema is provided.
+        """
+        if stream and (output_schema or self.tools):
+            raise ValueError(
+                "stream, tools and output_schema can't be True at the same time, "
+                "because stream is used to return Iterator[BaseMessage]."
+            )
+
+        if self.agent:
+            return self.agent.run(messages, output_schema=output_schema)
+
+        messages: MessageSet = _convert_message(messages)
+
+        # add output format into the last prompt if provide
+        if output_schema:
+            instruction: str = get_formatted_instructions(
+                json_schema=output_schema, examples=examples
+            )
+            messages.messages[-1].content += f"\n{instruction}"
+
+        logger.info(f"[pne chat] messages: {messages}")
+
+        response: AssistantMessage = self.llm.predict(messages, stream=stream, **kwargs)
+
+        logger.info(f"[pne chat] response: {response.additional_kwargs}")
+
+        # return output format if provide
+        if output_schema:
+            logger.info("[pne chat] return formatted response.")
+            return formatting_result(
+                pydantic_obj=output_schema, llm_output=response.content
+            )
+
+        return response if return_raw_response else response.content
+
+
+def chat(
+    messages: Union[List, MessageSet, str],
+    *,
+    model: str = "gpt-3.5-turbo",
+    model_config: Optional[dict] = None,
+    tools: Optional[List[ToolTypes]] = None,
+    output_schema: Optional[type(BaseModel)] = None,
+    examples: Optional[List[BaseModel]] = None,
+    return_raw_response: bool = False,
+    custom_llm: Optional[BaseLLM] = None,
+    enable_plan: bool = False,
+    stream: bool = False,
+    **kwargs,
+) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
+    """A universal chat method, you can chat any model like OpenAI completion.
+    It should be noted that chat() is only support chat model currently.
+
+    Args:
+        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
+            API type data(List[Dict]) or MessageSet type.
+        model(str): LLM model. Currently only support chat model.
+        model_config(Optional[dict]): LLM model config.
+        tools(List[BaseTool] | None): specified tools for llm.
+        output_schema(BaseModel): specified return type. See detail on: OutputFormatter.
+        examples(List[BaseModel]): examples for output_schema. See detail
+            on: OutputFormatter.
+        return_raw_response(bool): return OpenAI completion result if true, otherwise
+            return string type data.
+        custom_llm(BaseLLM): You can use custom LLM if you have.
+        enable_plan(bool): use Agent with plan ability if True.
+        stream(bool): return stream iterator if True.
+        **kwargs: litellm kwargs
+
+    Returns:
+        Return string normally, it means enable_original_return is default False.
+        Return BaseMessage if enable_original_return is True.
+        Return List[BaseMessage] if stream is True.
+        Return T if output_schema is provided.
+    """
+    return AIChat(
+        model=model,
+        model_config=model_config,
+        tools=tools,
+        custom_llm=custom_llm,
+        enable_plan=enable_plan,
+    ).run(
+        messages=messages,
+        output_schema=output_schema,
+        examples=examples,
+        return_raw_response=return_raw_response,
+        stream=stream,
+        **kwargs,
+    )
diff --git a/promptulate/schema.py b/promptulate/schema.py
index 56eb2cf3..38a7f817 100644
--- a/promptulate/schema.py
+++ b/promptulate/schema.py
@@ -1,302 +1,302 @@
-from abc import abstractmethod
-from enum import Enum
-from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union
-
-from promptulate.pydantic_v1 import BaseModel, Field
-
-__all__ = [
-    "LLMType",
-    "BaseMessage",
-    "CompletionMessage",
-    "SystemMessage",
-    "UserMessage",
-    "AssistantMessage",
-    "MessageSet",
-    "init_chat_message_history",
-    "ToolTypes",
-    "StreamIterator",
-]
-
-if TYPE_CHECKING:
-    from langchain.tools.base import BaseTool as LangchainBaseToolType  # noqa
-    from promptulate.tools.base import BaseTool, Tool  # noqa
-
-ToolTypes = Union["BaseTool", "Tool", Callable, "LangchainBaseToolType"]
-
-
-class BaseMessage(BaseModel):
-    """Message basic object."""
-
-    content: str
-    additional_kwargs: dict = Field(default_factory=dict)
-
-    @property
-    @abstractmethod
-    def type(self) -> str:
-        """Type of the message, used for serialization."""
-
-
-class StreamIterator:
-    """
-    This class is an iterator for the response stream from the LLM model.
-
-    Attributes:
-        response_stream: The stream of responses from the LLM model.
-        parse_content: The callback function to parse the chunk.
-        return_raw_response: A boolean indicating whether to return the raw response
-        or not.
-    """
-
-    def __init__(
-        self,
-        response_stream,
-        parse_content: callable([[Any], [str, str]]),
-        return_raw_response: bool = False,
-    ):
-        """
-        The constructor for BaseStreamIterator class.
-
-        Parameters:
-            response_stream: The stream of responses from the LLM model.
-            return_raw_response (bool): A flag indicating whether to return the raw
-            response or not.
-        """
-        self.response_stream = response_stream
-        self.return_raw_response = return_raw_response
-        self.parse_content = parse_content
-
-    def __iter__(self) -> Union[Iterator[BaseMessage], Iterator[str]]:
-        """
-        The iterator method for the BaseStreamIterator class.
-
-        Returns:
-            self: An instance of the BaseStreamIterator class.
-        """
-        return self
-
-    def parse_chunk(self, chunk) -> Optional[Union[str, BaseMessage]]:
-        """
-        This method is used to parse a chunk from the response stream. It returns
-        None if the chunk is empty, otherwise it returns the parsed chunk.
-
-        Parameters:
-            chunk: The chunk to be parsed.
-
-        Returns:
-            Optional: The parsed chunk or None if the chunk is empty.
-        """
-        content, ret_data = self.parse_content(chunk)
-        if content is None:
-            return None
-        if self.return_raw_response:
-            additional_kwargs: dict = ret_data
-            message = AssistantMessage(
-                content=content,
-                additional_kwargs=additional_kwargs,
-            )
-            return message
-
-        return content
-
-    def __next__(self) -> Union[str, BaseMessage]:
-        """
-        The next method for the BaseStreamIterator class.
-
-        This method is used to get the next response from the LLM model. It iterates
-        over the response stream and parses each chunk using the parse_chunk method.
-        If the parsed chunk is not None, it returns the parsed chunk as the next
-        response. If there are no more messages in the response stream, it raises a
-        StopIteration exception.
-
-        Returns:
-            Union[str, BaseMessage]: The next response from the LLM model. If
-            return_raw_response is True, it returns an AssistantMessage instance,
-            otherwise it returns the content of the response as a string.
-        """
-        for chunk in self.response_stream:
-            message = self.parse_chunk(chunk)
-            if message is not None:
-                return message
-
-        # If there are no more messages, stop the iteration
-        raise StopIteration
-
-
-class CompletionMessage(BaseMessage):
-    """Type of completion message. Used in OpenAI currently"""
-
-    @property
-    def type(self) -> str:
-        return "completion"
-
-
-class SystemMessage(BaseMessage):
-    """Type of message that is a system message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        """Type of the message, used for serialization."""
-        return "system"
-
-
-class UserMessage(BaseMessage):
-    """Type of message that is a user message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        return "user"
-
-
-class AssistantMessage(BaseMessage):
-    """Type of message that is an assistant message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        return "assistant"
-
-
-MESSAGE_TYPE = {
-    "completion": CompletionMessage,
-    "system": SystemMessage,
-    "user": UserMessage,
-    "assistant": AssistantMessage,
-}
-
-
-class LLMType(str, Enum):
-    """All LLM type here"""
-
-    OpenAI = "OpenAI"
-    ChatOpenAI = "ChatOpenAI"
-    ErnieBot = "ErnieBot"
-    QianFan = "QianFan"
-    ZhiPu = "ZhiPu"
-
-
-class MessageSet:
-    """MessageSet can be used in Memory, LLMs, Framework and some else.
-    It's a universal chat message format in promptulate.
-    """
-
-    def __init__(
-        self,
-        messages: List[BaseMessage],
-        conversation_id: Optional[str] = None,
-        additional_kwargs: Optional[dict] = None,
-    ):
-        self.messages: List[BaseMessage] = messages
-        self.conversation_id: Optional[str] = conversation_id
-        self.additional_kwargs: dict = additional_kwargs or {}
-
-    @classmethod
-    def from_listdict_data(
-        cls, value: List[Dict], additional_kwargs: Optional[dict] = None
-    ) -> "MessageSet":
-        """initialize MessageSet from a List[Dict] data
-
-        Args:
-            value(List[Dict]): the example is as follows:
-                [
-                    {"type": "user", "content": "This is a message1."},
-                    {"type": "assistant", "content": "This is a message2."}
-                ]
-            additional_kwargs(Optional[dict]): additional kwargs
-
-        Returns:
-            initialized MessageSet
-        """
-        messages: List[BaseMessage] = [
-            MESSAGE_TYPE[item["role"]](content=item["content"]) for item in value
-        ]
-        return cls(messages=messages, additional_kwargs=additional_kwargs)
-
-    @property
-    def listdict_messages(self) -> List[Dict]:
-        converted_messages = []
-        for message in self.messages:
-            converted_messages.append(
-                {"role": message.type, "content": message.content}
-            )
-        return converted_messages
-
-    @property
-    def memory_messages(self) -> List[Dict]:
-        return self.listdict_messages
-
-    def to_llm_prompt(self, llm_type: LLMType) -> Any:
-        """Convert the MessageSet messages to specified llm prompt"""
-        if not llm_type:
-            ValueError(
-                "Missing llm_type, llm_type is needed if you want to use llm_prompt."
-            )
-        return _to_llm_prompt[llm_type](self)
-
-    @property
-    def string_messages(self) -> str:
-        """Convert the message to a string type, it can be used as a prompt for OpenAI
-        completion."""
-        string_result = ""
-        for message in self.messages:
-            string_result += f"{message.content}\n"
-        return string_result
-
-    def add_message(self, message: BaseMessage) -> None:
-        self.messages.append(message)
-
-    def add_completion_message(self, message: str) -> None:
-        self.messages.append(CompletionMessage(content=message))
-
-    def add_system_message(self, message: str) -> None:
-        self.messages.append(SystemMessage(content=message))
-
-    def add_user_message(self, message: str) -> None:
-        self.messages.append(UserMessage(content=message))
-
-    def add_ai_message(self, message: str) -> None:
-        self.messages.append(AssistantMessage(content=message))
-
-
-def init_chat_message_history(
-    system_content: str, user_content: str, llm: LLMType
-) -> MessageSet:
-    if llm == llm.ChatOpenAI or llm == llm.OpenAI:
-        messages = [
-            SystemMessage(content=system_content),
-            UserMessage(content=user_content),
-        ]
-    else:
-        messages = [
-            UserMessage(content=system_content),
-            AssistantMessage(content="好的"),
-            UserMessage(content=user_content),
-        ]
-    return MessageSet(messages=messages)
-
-
-def _to_openai_llm_prompt(message_set: MessageSet) -> str:
-    return message_set.string_messages
-
-
-def _to_chat_openai_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_ernie_bot_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_qian_fan_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_zhipu_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-_to_llm_prompt: Dict[LLMType, Callable] = {
-    LLMType.OpenAI: _to_openai_llm_prompt,
-    LLMType.ChatOpenAI: _to_chat_openai_llm_prompt,
-    LLMType.ErnieBot: _to_ernie_bot_llm_prompt,
-    LLMType.QianFan: _to_qian_fan_llm_prompt,
-    LLMType.ZhiPu: _to_zhipu_llm_prompt,
-}
+from abc import abstractmethod
+from enum import Enum
+from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union
+
+from promptulate.pydantic_v1 import BaseModel, Field
+
+__all__ = [
+    "LLMType",
+    "BaseMessage",
+    "CompletionMessage",
+    "SystemMessage",
+    "UserMessage",
+    "AssistantMessage",
+    "MessageSet",
+    "init_chat_message_history",
+    "ToolTypes",
+    "StreamIterator",
+]
+
+if TYPE_CHECKING:
+    from langchain.tools.base import BaseTool as LangchainBaseToolType  # noqa
+    from promptulate.tools.base import BaseTool, Tool  # noqa
+
+ToolTypes = Union["BaseTool", "Tool", Callable, "LangchainBaseToolType"]
+
+
+class BaseMessage(BaseModel):
+    """Message basic object."""
+
+    content: str
+    additional_kwargs: dict = Field(default_factory=dict)
+
+    @property
+    @abstractmethod
+    def type(self) -> str:
+        """Type of the message, used for serialization."""
+
+
+class StreamIterator:
+    """
+    This class is an iterator for the response stream from the LLM model.
+
+    Attributes:
+        response_stream: The stream of responses from the LLM model.
+        parse_content: The callback function to parse the chunk.
+        return_raw_response: A boolean indicating whether to return the raw response
+        or not.
+    """
+
+    def __init__(
+        self,
+        response_stream,
+        parse_content: callable([[Any], [str, str]]),
+        return_raw_response: bool = False,
+    ):
+        """
+        The constructor for BaseStreamIterator class.
+
+        Parameters:
+            response_stream: The stream of responses from the LLM model.
+            return_raw_response (bool): A flag indicating whether to return the raw
+            response or not.
+        """
+        self.response_stream = response_stream
+        self.return_raw_response = return_raw_response
+        self.parse_content = parse_content
+
+    def __iter__(self) -> Union[Iterator[BaseMessage], Iterator[str]]:
+        """
+        The iterator method for the BaseStreamIterator class.
+
+        Returns:
+            self: An instance of the BaseStreamIterator class.
+        """
+        return self
+
+    def parse_chunk(self, chunk) -> Optional[Union[str, BaseMessage]]:
+        """
+        This method is used to parse a chunk from the response stream. It returns
+        None if the chunk is empty, otherwise it returns the parsed chunk.
+
+        Parameters:
+            chunk: The chunk to be parsed.
+
+        Returns:
+            Optional: The parsed chunk or None if the chunk is empty.
+        """
+        content, ret_data = self.parse_content(chunk)
+        if content is None:
+            return None
+        if self.return_raw_response:
+            additional_kwargs: dict = ret_data
+            message = AssistantMessage(
+                content=content,
+                additional_kwargs=additional_kwargs,
+            )
+            return message
+
+        return content
+
+    def __next__(self) -> Union[str, BaseMessage]:
+        """
+        The next method for the BaseStreamIterator class.
+
+        This method is used to get the next response from the LLM model. It iterates
+        over the response stream and parses each chunk using the parse_chunk method.
+        If the parsed chunk is not None, it returns the parsed chunk as the next
+        response. If there are no more messages in the response stream, it raises a
+        StopIteration exception.
+
+        Returns:
+            Union[str, BaseMessage]: The next response from the LLM model. If
+            return_raw_response is True, it returns an AssistantMessage instance,
+            otherwise it returns the content of the response as a string.
+        """
+        for chunk in self.response_stream:
+            message = self.parse_chunk(chunk)
+            if message is not None:
+                return message
+
+        # If there are no more messages, stop the iteration
+        raise StopIteration
+
+
+class CompletionMessage(BaseMessage):
+    """Type of completion message. Used in OpenAI currently"""
+
+    @property
+    def type(self) -> str:
+        return "completion"
+
+
+class SystemMessage(BaseMessage):
+    """Type of message that is a system message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        """Type of the message, used for serialization."""
+        return "system"
+
+
+class UserMessage(BaseMessage):
+    """Type of message that is a user message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        return "user"
+
+
+class AssistantMessage(BaseMessage):
+    """Type of message that is an assistant message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        return "assistant"
+
+
+MESSAGE_TYPE = {
+    "completion": CompletionMessage,
+    "system": SystemMessage,
+    "user": UserMessage,
+    "assistant": AssistantMessage,
+}
+
+
+class LLMType(str, Enum):
+    """All LLM type here"""
+
+    OpenAI = "OpenAI"
+    ChatOpenAI = "ChatOpenAI"
+    ErnieBot = "ErnieBot"
+    QianFan = "QianFan"
+    ZhiPu = "ZhiPu"
+
+
+class MessageSet:
+    """MessageSet can be used in Memory, LLMs, Framework and some else.
+    It's a universal chat message format in promptulate.
+    """
+
+    def __init__(
+        self,
+        messages: List[BaseMessage],
+        conversation_id: Optional[str] = None,
+        additional_kwargs: Optional[dict] = None,
+    ):
+        self.messages: List[BaseMessage] = messages
+        self.conversation_id: Optional[str] = conversation_id
+        self.additional_kwargs: dict = additional_kwargs or {}
+
+    @classmethod
+    def from_listdict_data(
+        cls, value: List[Dict], additional_kwargs: Optional[dict] = None
+    ) -> "MessageSet":
+        """initialize MessageSet from a List[Dict] data
+
+        Args:
+            value(List[Dict]): the example is as follows:
+                [
+                    {"type": "user", "content": "This is a message1."},
+                    {"type": "assistant", "content": "This is a message2."}
+                ]
+            additional_kwargs(Optional[dict]): additional kwargs
+
+        Returns:
+            initialized MessageSet
+        """
+        messages: List[BaseMessage] = [
+            MESSAGE_TYPE[item["role"]](content=item["content"]) for item in value
+        ]
+        return cls(messages=messages, additional_kwargs=additional_kwargs)
+
+    @property
+    def listdict_messages(self) -> List[Dict]:
+        converted_messages = []
+        for message in self.messages:
+            converted_messages.append(
+                {"role": message.type, "content": message.content}
+            )
+        return converted_messages
+
+    @property
+    def memory_messages(self) -> List[Dict]:
+        return self.listdict_messages
+
+    def to_llm_prompt(self, llm_type: LLMType) -> Any:
+        """Convert the MessageSet messages to specified llm prompt"""
+        if not llm_type:
+            ValueError(
+                "Missing llm_type, llm_type is needed if you want to use llm_prompt."
+            )
+        return _to_llm_prompt[llm_type](self)
+
+    @property
+    def string_messages(self) -> str:
+        """Convert the message to a string type, it can be used as a prompt for OpenAI
+        completion."""
+        string_result = ""
+        for message in self.messages:
+            string_result += f"{message.content}\n"
+        return string_result
+
+    def add_message(self, message: BaseMessage) -> None:
+        self.messages.append(message)
+
+    def add_completion_message(self, message: str) -> None:
+        self.messages.append(CompletionMessage(content=message))
+
+    def add_system_message(self, message: str) -> None:
+        self.messages.append(SystemMessage(content=message))
+
+    def add_user_message(self, message: str) -> None:
+        self.messages.append(UserMessage(content=message))
+
+    def add_ai_message(self, message: str) -> None:
+        self.messages.append(AssistantMessage(content=message))
+
+
+def init_chat_message_history(
+    system_content: str, user_content: str, llm: LLMType
+) -> MessageSet:
+    if llm == llm.ChatOpenAI or llm == llm.OpenAI:
+        messages = [
+            SystemMessage(content=system_content),
+            UserMessage(content=user_content),
+        ]
+    else:
+        messages = [
+            UserMessage(content=system_content),
+            AssistantMessage(content="好的"),
+            UserMessage(content=user_content),
+        ]
+    return MessageSet(messages=messages)
+
+
+def _to_openai_llm_prompt(message_set: MessageSet) -> str:
+    return message_set.string_messages
+
+
+def _to_chat_openai_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_ernie_bot_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_qian_fan_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_zhipu_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+_to_llm_prompt: Dict[LLMType, Callable] = {
+    LLMType.OpenAI: _to_openai_llm_prompt,
+    LLMType.ChatOpenAI: _to_chat_openai_llm_prompt,
+    LLMType.ErnieBot: _to_ernie_bot_llm_prompt,
+    LLMType.QianFan: _to_qian_fan_llm_prompt,
+    LLMType.ZhiPu: _to_zhipu_llm_prompt,
+}
diff --git a/promptulate/tools/base.py b/promptulate/tools/base.py
index c68c6499..92b4ed70 100644
--- a/promptulate/tools/base.py
+++ b/promptulate/tools/base.py
@@ -1,385 +1,385 @@
-import inspect
-import warnings
-from abc import ABC, abstractmethod
-from typing import Any, Callable, Dict, List, Optional, Type, Union
-
-from promptulate.hook.base import Hook, HookTable
-from promptulate.pydantic_v1 import (
-    BaseModel,
-    Extra,
-    create_model,
-    validate_arguments,
-)
-from promptulate.utils.logger import logger
-
-
-class _SchemaConfig:
-    """Configuration for the pydantic model."""
-
-    extra: Any = Extra.forbid
-    arbitrary_types_allowed: bool = True
-
-
-def _create_subset_model(
-    name: str, model: BaseModel, field_names: list
-) -> Type[BaseModel]:
-    """Create a pydantic model with only a subset of model's fields."""
-    fields = {}
-    for field_name in field_names:
-        field = model.__fields__[field_name]
-        fields[field_name] = (field.outer_type_, field.field_info)
-    return create_model(name, **fields)
-
-
-def _pydantic_to_refined_schema(pydantic_obj: type(BaseModel)) -> Dict[str, Any]:
-    """Get refined schema(OpenAI function call type schema) from pydantic object."""
-    # Remove useless fields.
-    refined_schema = pydantic_obj.schema()
-
-    if "title" in refined_schema:
-        del refined_schema["title"]
-    for k, v in refined_schema["properties"].items():
-        if "title" in v:
-            del v["title"]
-
-    return refined_schema
-
-
-def _validate_refined_schema(schema: Dict) -> bool:
-    """Validate refined schema(OpenAI function call type schema).
-
-    Args:
-        schema: any dict
-
-    Returns:
-        bool: True if schema is openai function call type schema, False otherwise.
-    """
-    if "name" not in schema or "description" not in schema:
-        return False
-
-    if "properties" not in schema:
-        return False
-
-    return True
-
-
-def function_to_tool_schema(func: Callable) -> Dict[str, Any]:
-    """Create a tool schema from a function's signature.
-
-    Args:
-        func: Function to generate the schema from
-
-    Returns:
-        A OpenAI function call type json schema built by pydantic model.
-        ref: https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
-    """  # noqa
-    # https://docs.pydantic.dev/latest/usage/validation_decorator/
-    inferred_model = validate_arguments(func, config=_SchemaConfig).model  # type: ignore # noqa
-
-    # Extract function parameter names.
-    # Pydantic adds placeholder virtual fields we need to strip
-    signature = inspect.signature(func)
-    valid_properties: List[str] = [
-        param.name for param in signature.parameters.values()
-    ]
-
-    # Create a pydantic model with only the valid fields.
-    created_model = _create_subset_model(
-        f"{func.__name__}Schema", inferred_model, valid_properties
-    )
-    reduced_schema = created_model.schema()
-
-    # reduce schema
-    reduced_schema["description"] = func.__doc__ or ""
-    reduced_schema["name"] = func.__name__
-
-    if "title" in reduced_schema:
-        del reduced_schema["title"]
-    for k, v in reduced_schema["properties"].items():
-        if "title" in v:
-            del v["title"]
-
-    return reduced_schema
-
-
-class BaseTool(ABC, BaseModel):
-    """Interface tools must implement."""
-
-    name: str
-    """The unique name of the tool that clearly communicates its purpose."""
-    description: str
-    """Used to tell the model how/when/why to use the tool.
-    You can provide few-shot examples as a part of the description."""
-    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
-    """The parameters that the tool accepts. This can be a dictionary or a Pydantic
-    model."""
-    example: List[str] = None
-    """Show how to use this tool. This is few shot for agent. You few shot may like:
-
-    example1 = "Question: What is 37593 * 67?\n```\n37593 * 67\n```\nnumexpr.evaluate("37593 * 67")\nAnswer:2518731"
-    example2 = "Question: What is 37593^(1/5)?\n```\n37593**(1/5)\n```\nnumexpr.evaluate("37593**(1/5)")\nAnswer:8.222831614237718"
-    few_shot_example = [example1, example2]
-    """  # noqa
-
-    def __init__(self, **kwargs):
-        """Custom tool config.
-
-        Args:
-            **kwargs:
-                hooks(List[Callable]): for adding to hook_manager
-        """
-        warnings.warn(
-            "BaseTool is deprecated at v1.7.0. promptulate.tools.base.Tool is recommended.",  # noqa: E501
-            DeprecationWarning,
-        )
-        super().__init__(**kwargs)
-        if "hooks" in kwargs and kwargs["hooks"]:
-            for hook in kwargs["hooks"]:
-                Hook.mount_instance_hook(hook, self)
-        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
-
-    class Config:
-        arbitrary_types_allowed = True
-        extra = Extra.allow
-
-    def run(self, *args, **kwargs):
-        """run the tool including specified function and hooks"""
-        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
-        result: Any = self._run(*args, **kwargs)
-        logger.debug(f"[pne tool result] {result}")
-        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
-        return result
-
-    @abstractmethod
-    def _run(self, *args, **kwargs):
-        """Run detail business, implemented by subclass."""
-        raise NotImplementedError()
-
-
-class Tool(ABC):
-    """Abstract base class for tools. All tools must implement this interface."""
-
-    name: str
-    """Tool name"""
-    description: str
-    """Tool description"""
-    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
-    """Tool parameters"""
-
-    def __init__(self, *args, **kwargs):
-        self.check_params()
-        if "hooks" in kwargs and kwargs["hooks"]:
-            for hook in kwargs["hooks"]:
-                Hook.mount_instance_hook(hook, self)
-        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
-
-    def check_params(self):
-        """Check parameters when initialization."""
-        if not getattr(self, "name", None) or not getattr(self, "description", None):
-            raise TypeError(
-                f"{self.__class__.__name__} required parameters 'name' and 'description'."  # noqa: E501
-            )
-
-    def run(self, *args, **kwargs):
-        """run the tool including specified function and hooks"""
-        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
-        result: Any = self._run(*args, **kwargs)
-        logger.debug(f"[pne tool response] name: {self.name} result: {result}")
-        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
-        return result
-
-    @abstractmethod
-    def _run(self, *args, **kwargs):
-        """Run detail business, implemented by subclass."""
-        raise NotImplementedError()
-
-    def to_schema(self) -> Dict[str, Any]:
-        """
-        Converts the Tool instance to a OpenAI function call type JSON schema.
-
-        Returns:
-            dict: A dictionary representing the JSON schema of the Tool instance.
-        """
-        # If there are no parameters, return the basic schema.
-        if not self.parameters:
-            return {
-                "name": self.name,
-                "description": self.description,
-            }
-
-        # If parameters are defined by a Pydantic BaseModel, convert to schema.
-        if isinstance(self.parameters, type) and issubclass(self.parameters, BaseModel):
-            return {
-                "name": self.name,
-                "description": self.description,
-                "parameters": _pydantic_to_refined_schema(self.parameters),
-            }
-
-        # If parameters are defined by a schema dictionary, validate and return it.
-        if isinstance(self.parameters, dict):
-            if not _validate_refined_schema(self.parameters):
-                raise ValueError(
-                    f"The 'parameters' dictionary for {self.__class__.__name__} does not conform to the expected schema."  # noqa: E501
-                )
-            return self.parameters
-
-        # If parameters are neither a BaseModel nor a dictionary, raise an error.
-        raise TypeError(
-            f"The 'parameters' attribute of {self.__class__.__name__} must be either a subclass of BaseModel or a dictionary representing a schema."  # noqa: E501
-        )
-
-
-class ToolImpl(Tool):
-    def __init__(
-        self,
-        name: str,
-        description: str,
-        callback: Callable,
-        parameters: Union[dict, BaseModel] = None,
-        **kwargs,
-    ):
-        self.name: str = name
-        self.description: str = description
-        self.callback: Callable = callback
-        self.parameters: Union[dict, BaseModel] = parameters
-
-        super().__init__(**kwargs)
-
-    @classmethod
-    def from_function(cls, func: Callable) -> "ToolImpl":
-        """Create a ToolImpl instance from a function.
-
-        Args:
-            func: Function to create the ToolImpl instance from.
-
-        Returns:
-            A ToolImpl instance.
-        """
-        if not func.__doc__:
-            err_msg = """Please add docstring and variable type declarations for your function.Here is a best practice:
-def web_search(keyword: str, top_k: int = 10) -> str:
-    \"""search by keyword in web.
-    Args:
-        keyword: keyword to search
-        top_k: top k results to return
-
-    Returns:
-        str: search result
-    \"""
-    return "result"
-
-            """  # noqa
-            raise ValueError(err_msg)
-
-        schema = function_to_tool_schema(func)
-        return cls(
-            name=func.__name__,
-            description=func.__doc__,
-            callback=func,
-            parameters=schema,
-        )
-
-    @classmethod
-    def from_define_tool(
-        cls,
-        callback: Callable,
-        name: str = None,
-        description: str = None,
-        parameters: Optional[Union[Dict, Type[BaseModel]]] = None,
-    ) -> "ToolImpl":
-        """Create a ToolImpl instance from a function.
-
-        Args:
-            callback: Function to create the ToolImpl instance from.
-            name: tool name
-            description: tool description
-            parameters: tool parameters
-
-        Returns:
-            A ToolImpl instance.
-        """
-        if not parameters:
-            schema: dict = function_to_tool_schema(callback)
-        elif isinstance(parameters, dict) and _validate_refined_schema(parameters):
-            schema: dict = parameters
-        elif isinstance(parameters, type) and issubclass(parameters, BaseModel):
-            schema: dict = _pydantic_to_refined_schema(parameters)
-        else:
-            raise TypeError(
-                f"{[cls.__name__]} parameters must be BaseModel or JSON schema."
-            )
-
-        _description = description or ""
-        _doc = callback.__doc__ or ""
-
-        return cls(
-            name=name or callback.__name__,
-            description=f"{_description}\n{_doc}",
-            callback=callback,
-            parameters=schema,
-        )
-
-    @classmethod
-    def from_base_tool(cls, tool: BaseTool) -> "ToolImpl":
-        """Create a ToolImpl instance from a BaseTool instance.
-
-        Args:
-            tool: BaseTool instance to create the ToolImpl instance from.
-
-        Returns:
-            A ToolImpl instance.
-        """
-
-        return cls(
-            name=tool.name,
-            description=tool.description,
-            callback=tool.run,
-            parameters=tool.parameters,
-        )
-
-    def _run(self, *args, **kwargs):
-        return self.callback(*args, **kwargs)
-
-
-def define_tool(
-    *,
-    callback: Callable,
-    name: Optional[str] = None,
-    description: Optional[str] = None,
-    parameters: Union[dict, Type[BaseModel]] = None,
-) -> ToolImpl:
-    """
-    A tool with llm or API wrapper will automatically initialize the llm and API wrapper
-    classes, which can avoid this problem by initializing in this way.
-
-    Args:
-        callback: tool function when running
-        name: tool name
-        description: tool description
-        parameters: tool parameters
-
-    Returns:
-        A ToolImpl class (subclass of Tool).
-    """
-
-    return ToolImpl.from_define_tool(
-        callback=callback, name=name, description=description, parameters=parameters
-    )
-
-
-def function_to_tool(func: Callable) -> ToolImpl:
-    """Converts a function to a ToolImpl instance.
-
-    Args:
-        func: Function to convert to a ToolImpl instance.
-
-    Returns:
-        A ToolImpl instance.
-    """
-    return ToolImpl.from_function(func)
-
-
-class BaseToolKit:
-    @abstractmethod
-    def get_tools(self):
-        """get tools in the toolkit"""
+import inspect
+import warnings
+from abc import ABC, abstractmethod
+from typing import Any, Callable, Dict, List, Optional, Type, Union
+
+from promptulate.hook.base import Hook, HookTable
+from promptulate.pydantic_v1 import (
+    BaseModel,
+    Extra,
+    create_model,
+    validate_arguments,
+)
+from promptulate.utils.logger import logger
+
+
+class _SchemaConfig:
+    """Configuration for the pydantic model."""
+
+    extra: Any = Extra.forbid
+    arbitrary_types_allowed: bool = True
+
+
+def _create_subset_model(
+    name: str, model: BaseModel, field_names: list
+) -> Type[BaseModel]:
+    """Create a pydantic model with only a subset of model's fields."""
+    fields = {}
+    for field_name in field_names:
+        field = model.__fields__[field_name]
+        fields[field_name] = (field.outer_type_, field.field_info)
+    return create_model(name, **fields)
+
+
+def _pydantic_to_refined_schema(pydantic_obj: type(BaseModel)) -> Dict[str, Any]:
+    """Get refined schema(OpenAI function call type schema) from pydantic object."""
+    # Remove useless fields.
+    refined_schema = pydantic_obj.schema()
+
+    if "title" in refined_schema:
+        del refined_schema["title"]
+    for k, v in refined_schema["properties"].items():
+        if "title" in v:
+            del v["title"]
+
+    return refined_schema
+
+
+def _validate_refined_schema(schema: Dict) -> bool:
+    """Validate refined schema(OpenAI function call type schema).
+
+    Args:
+        schema: any dict
+
+    Returns:
+        bool: True if schema is openai function call type schema, False otherwise.
+    """
+    if "name" not in schema or "description" not in schema:
+        return False
+
+    if "properties" not in schema:
+        return False
+
+    return True
+
+
+def function_to_tool_schema(func: Callable) -> Dict[str, Any]:
+    """Create a tool schema from a function's signature.
+
+    Args:
+        func: Function to generate the schema from
+
+    Returns:
+        A OpenAI function call type json schema built by pydantic model.
+        ref: https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
+    """  # noqa
+    # https://docs.pydantic.dev/latest/usage/validation_decorator/
+    inferred_model = validate_arguments(func, config=_SchemaConfig).model  # type: ignore # noqa
+
+    # Extract function parameter names.
+    # Pydantic adds placeholder virtual fields we need to strip
+    signature = inspect.signature(func)
+    valid_properties: List[str] = [
+        param.name for param in signature.parameters.values()
+    ]
+
+    # Create a pydantic model with only the valid fields.
+    created_model = _create_subset_model(
+        f"{func.__name__}Schema", inferred_model, valid_properties
+    )
+    reduced_schema = created_model.schema()
+
+    # reduce schema
+    reduced_schema["description"] = func.__doc__ or ""
+    reduced_schema["name"] = func.__name__
+
+    if "title" in reduced_schema:
+        del reduced_schema["title"]
+    for k, v in reduced_schema["properties"].items():
+        if "title" in v:
+            del v["title"]
+
+    return reduced_schema
+
+
+class BaseTool(ABC, BaseModel):
+    """Interface tools must implement."""
+
+    name: str
+    """The unique name of the tool that clearly communicates its purpose."""
+    description: str
+    """Used to tell the model how/when/why to use the tool.
+    You can provide few-shot examples as a part of the description."""
+    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
+    """The parameters that the tool accepts. This can be a dictionary or a Pydantic
+    model."""
+    example: List[str] = None
+    """Show how to use this tool. This is few shot for agent. You few shot may like:
+
+    example1 = "Question: What is 37593 * 67?\n```\n37593 * 67\n```\nnumexpr.evaluate("37593 * 67")\nAnswer:2518731"
+    example2 = "Question: What is 37593^(1/5)?\n```\n37593**(1/5)\n```\nnumexpr.evaluate("37593**(1/5)")\nAnswer:8.222831614237718"
+    few_shot_example = [example1, example2]
+    """  # noqa
+
+    def __init__(self, **kwargs):
+        """Custom tool config.
+
+        Args:
+            **kwargs:
+                hooks(List[Callable]): for adding to hook_manager
+        """
+        warnings.warn(
+            "BaseTool is deprecated at v1.7.0. promptulate.tools.base.Tool is recommended.",  # noqa: E501
+            DeprecationWarning,
+        )
+        super().__init__(**kwargs)
+        if "hooks" in kwargs and kwargs["hooks"]:
+            for hook in kwargs["hooks"]:
+                Hook.mount_instance_hook(hook, self)
+        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
+
+    class Config:
+        arbitrary_types_allowed = True
+        extra = Extra.allow
+
+    def run(self, *args, **kwargs):
+        """run the tool including specified function and hooks"""
+        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
+        result: Any = self._run(*args, **kwargs)
+        logger.debug(f"[pne tool result] {result}")
+        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
+        return result
+
+    @abstractmethod
+    def _run(self, *args, **kwargs):
+        """Run detail business, implemented by subclass."""
+        raise NotImplementedError()
+
+
+class Tool(ABC):
+    """Abstract base class for tools. All tools must implement this interface."""
+
+    name: str
+    """Tool name"""
+    description: str
+    """Tool description"""
+    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
+    """Tool parameters"""
+
+    def __init__(self, *args, **kwargs):
+        self.check_params()
+        if "hooks" in kwargs and kwargs["hooks"]:
+            for hook in kwargs["hooks"]:
+                Hook.mount_instance_hook(hook, self)
+        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
+
+    def check_params(self):
+        """Check parameters when initialization."""
+        if not getattr(self, "name", None) or not getattr(self, "description", None):
+            raise TypeError(
+                f"{self.__class__.__name__} required parameters 'name' and 'description'."  # noqa: E501
+            )
+
+    def run(self, *args, **kwargs):
+        """run the tool including specified function and hooks"""
+        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
+        result: Any = self._run(*args, **kwargs)
+        logger.debug(f"[pne tool response] name: {self.name} result: {result}")
+        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
+        return result
+
+    @abstractmethod
+    def _run(self, *args, **kwargs):
+        """Run detail business, implemented by subclass."""
+        raise NotImplementedError()
+
+    def to_schema(self) -> Dict[str, Any]:
+        """
+        Converts the Tool instance to a OpenAI function call type JSON schema.
+
+        Returns:
+            dict: A dictionary representing the JSON schema of the Tool instance.
+        """
+        # If there are no parameters, return the basic schema.
+        if not self.parameters:
+            return {
+                "name": self.name,
+                "description": self.description,
+            }
+
+        # If parameters are defined by a Pydantic BaseModel, convert to schema.
+        if isinstance(self.parameters, type) and issubclass(self.parameters, BaseModel):
+            return {
+                "name": self.name,
+                "description": self.description,
+                "parameters": _pydantic_to_refined_schema(self.parameters),
+            }
+
+        # If parameters are defined by a schema dictionary, validate and return it.
+        if isinstance(self.parameters, dict):
+            if not _validate_refined_schema(self.parameters):
+                raise ValueError(
+                    f"The 'parameters' dictionary for {self.__class__.__name__} does not conform to the expected schema."  # noqa: E501
+                )
+            return self.parameters
+
+        # If parameters are neither a BaseModel nor a dictionary, raise an error.
+        raise TypeError(
+            f"The 'parameters' attribute of {self.__class__.__name__} must be either a subclass of BaseModel or a dictionary representing a schema."  # noqa: E501
+        )
+
+
+class ToolImpl(Tool):
+    def __init__(
+        self,
+        name: str,
+        description: str,
+        callback: Callable,
+        parameters: Union[dict, BaseModel] = None,
+        **kwargs,
+    ):
+        self.name: str = name
+        self.description: str = description
+        self.callback: Callable = callback
+        self.parameters: Union[dict, BaseModel] = parameters
+
+        super().__init__(**kwargs)
+
+    @classmethod
+    def from_function(cls, func: Callable) -> "ToolImpl":
+        """Create a ToolImpl instance from a function.
+
+        Args:
+            func: Function to create the ToolImpl instance from.
+
+        Returns:
+            A ToolImpl instance.
+        """
+        if not func.__doc__:
+            err_msg = """Please add docstring and variable type declarations for your function.Here is a best practice:
+def web_search(keyword: str, top_k: int = 10) -> str:
+    \"""search by keyword in web.
+    Args:
+        keyword: keyword to search
+        top_k: top k results to return
+
+    Returns:
+        str: search result
+    \"""
+    return "result"
+
+            """  # noqa
+            raise ValueError(err_msg)
+
+        schema = function_to_tool_schema(func)
+        return cls(
+            name=func.__name__,
+            description=func.__doc__,
+            callback=func,
+            parameters=schema,
+        )
+
+    @classmethod
+    def from_define_tool(
+        cls,
+        callback: Callable,
+        name: str = None,
+        description: str = None,
+        parameters: Optional[Union[Dict, Type[BaseModel]]] = None,
+    ) -> "ToolImpl":
+        """Create a ToolImpl instance from a function.
+
+        Args:
+            callback: Function to create the ToolImpl instance from.
+            name: tool name
+            description: tool description
+            parameters: tool parameters
+
+        Returns:
+            A ToolImpl instance.
+        """
+        if not parameters:
+            schema: dict = function_to_tool_schema(callback)
+        elif isinstance(parameters, dict) and _validate_refined_schema(parameters):
+            schema: dict = parameters
+        elif isinstance(parameters, type) and issubclass(parameters, BaseModel):
+            schema: dict = _pydantic_to_refined_schema(parameters)
+        else:
+            raise TypeError(
+                f"{[cls.__name__]} parameters must be BaseModel or JSON schema."
+            )
+
+        _description = description or ""
+        _doc = callback.__doc__ or ""
+
+        return cls(
+            name=name or callback.__name__,
+            description=f"{_description}\n{_doc}",
+            callback=callback,
+            parameters=schema,
+        )
+
+    @classmethod
+    def from_base_tool(cls, tool: BaseTool) -> "ToolImpl":
+        """Create a ToolImpl instance from a BaseTool instance.
+
+        Args:
+            tool: BaseTool instance to create the ToolImpl instance from.
+
+        Returns:
+            A ToolImpl instance.
+        """
+
+        return cls(
+            name=tool.name,
+            description=tool.description,
+            callback=tool.run,
+            parameters=tool.parameters,
+        )
+
+    def _run(self, *args, **kwargs):
+        return self.callback(*args, **kwargs)
+
+
+def define_tool(
+    *,
+    callback: Callable,
+    name: Optional[str] = None,
+    description: Optional[str] = None,
+    parameters: Union[dict, Type[BaseModel]] = None,
+) -> ToolImpl:
+    """
+    A tool with llm or API wrapper will automatically initialize the llm and API wrapper
+    classes, which can avoid this problem by initializing in this way.
+
+    Args:
+        callback: tool function when running
+        name: tool name
+        description: tool description
+        parameters: tool parameters
+
+    Returns:
+        A ToolImpl class (subclass of Tool).
+    """
+
+    return ToolImpl.from_define_tool(
+        callback=callback, name=name, description=description, parameters=parameters
+    )
+
+
+def function_to_tool(func: Callable) -> ToolImpl:
+    """Converts a function to a ToolImpl instance.
+
+    Args:
+        func: Function to convert to a ToolImpl instance.
+
+    Returns:
+        A ToolImpl instance.
+    """
+    return ToolImpl.from_function(func)
+
+
+class BaseToolKit:
+    @abstractmethod
+    def get_tools(self):
+        """get tools in the toolkit"""
diff --git a/promptulate/tools/file/toolkit.py b/promptulate/tools/file/toolkit.py
index f30c41e5..35a4a59d 100644
--- a/promptulate/tools/file/toolkit.py
+++ b/promptulate/tools/file/toolkit.py
@@ -1,51 +1,51 @@
-import os
-from typing import List, Optional
-
-from promptulate.tools.base import BaseToolKit, Tool
-from promptulate.tools.file.tools import (
-    AppendFileTool,
-    CopyFileTool,
-    DeleteFileTool,
-    ListDirectoryTool,
-    MoveFileTool,
-    ReadFileTool,
-    WriteFileTool,
-)
-
-TOOL_MAPPER = {
-    "write": WriteFileTool,
-    "append": AppendFileTool,
-    "read": ReadFileTool,
-    "delete": DeleteFileTool,
-    "list": ListDirectoryTool,
-    "copy": CopyFileTool,
-    "move": MoveFileTool,
-}
-
-
-class FileToolKit(BaseToolKit):
-    """File ToolKit
-
-    Args:
-        root_dir: The root directory of the file tool.
-        selected_tools: The selected tools of the file tool.
-
-    Returns:
-        The instance object of the corresponding tool
-    """
-
-    def __init__(self, root_dir: str = None, modes: Optional[List[str]] = None) -> None:
-        self.root_dir = root_dir or os.getcwd()
-        self.modes = modes or []
-
-        for mode in self.modes:
-            if mode not in TOOL_MAPPER.keys():
-                raise ValueError(
-                    f"{mode} does not exist.\n"
-                    f"Please select from {list(TOOL_MAPPER.keys())}"
-                )
-
-    def get_tools(self) -> List[Tool]:
-        if self.modes:
-            return [TOOL_MAPPER[mode](self.root_dir) for mode in self.modes]
-        return [tool(self.root_dir) for tool in TOOL_MAPPER.values()]
+import os
+from typing import List, Optional
+
+from promptulate.tools.base import BaseToolKit, Tool
+from promptulate.tools.file.tools import (
+    AppendFileTool,
+    CopyFileTool,
+    DeleteFileTool,
+    ListDirectoryTool,
+    MoveFileTool,
+    ReadFileTool,
+    WriteFileTool,
+)
+
+TOOL_MAPPER = {
+    "write": WriteFileTool,
+    "append": AppendFileTool,
+    "read": ReadFileTool,
+    "delete": DeleteFileTool,
+    "list": ListDirectoryTool,
+    "copy": CopyFileTool,
+    "move": MoveFileTool,
+}
+
+
+class FileToolKit(BaseToolKit):
+    """File ToolKit
+
+    Args:
+        root_dir: The root directory of the file tool.
+        selected_tools: The selected tools of the file tool.
+
+    Returns:
+        The instance object of the corresponding tool
+    """
+
+    def __init__(self, root_dir: str = None, modes: Optional[List[str]] = None) -> None:
+        self.root_dir = root_dir or os.getcwd()
+        self.modes = modes or []
+
+        for mode in self.modes:
+            if mode not in TOOL_MAPPER.keys():
+                raise ValueError(
+                    f"{mode} does not exist.\n"
+                    f"Please select from {list(TOOL_MAPPER.keys())}"
+                )
+
+    def get_tools(self) -> List[Tool]:
+        if self.modes:
+            return [TOOL_MAPPER[mode](self.root_dir) for mode in self.modes]
+        return [tool(self.root_dir) for tool in TOOL_MAPPER.values()]
diff --git a/promptulate/tools/manager.py b/promptulate/tools/manager.py
index 16a1fb3a..18d19c66 100644
--- a/promptulate/tools/manager.py
+++ b/promptulate/tools/manager.py
@@ -1,110 +1,110 @@
-import inspect
-import json
-from typing import Any, List, Optional, Union
-
-from promptulate.schema import ToolTypes
-from promptulate.tools.base import BaseTool, Tool, ToolImpl, function_to_tool
-from promptulate.tools.langchain.tools import LangchainTool
-
-
-def _judge_langchain_tool_and_wrap(tool: Any) -> Optional[Tool]:
-    """Judge if the tool is a langchain tool and wrap it.
-
-    Args:
-        tool(Any): The tool to be judged.
-
-    Returns:
-        Optional[Tool]: The wrapped tool or None if not a langchain tool.
-    """
-    try:
-        from langchain.tools.base import BaseTool as LangchainBaseTool
-
-        if isinstance(tool, LangchainBaseTool):
-            return LangchainTool(tool)
-
-    except ImportError:
-        raise ValueError(
-            (
-                f"Error tool type {tool}, please check the tool type.",
-                "If you are using langchain tool, please install -U langchain.",
-            )
-        )
-
-
-def _initialize_tool(tool: ToolTypes) -> Optional[Tool]:
-    """Initialize the tool.
-
-    Args:
-        tool(Union[BaseTool, Callable, Tool, "LangchainBaseToolType"]): The tool to be
-            initialized.
-
-    Returns:
-        Optional[Tool]: The initialized tool.
-    """
-    if isinstance(tool, BaseTool):
-        return ToolImpl.from_base_tool(tool)
-    elif isinstance(tool, Tool):
-        return tool
-    elif inspect.isfunction(tool):
-        return function_to_tool(tool)
-
-    return _judge_langchain_tool_and_wrap(tool)
-
-
-class ToolManager:
-    """ToolManager helps Agent to manage tools"""
-
-    def __init__(self, tools: List[ToolTypes]):
-        self.tools: List[Tool] = [
-            _initialize_tool(tool)
-            for tool in tools
-            if _initialize_tool(tool) is not None
-        ]
-
-    def get_tool(self, tool_name: str) -> Optional[Tool]:
-        """Find specified tool by tool name.
-        Args:
-            tool_name(str): The name of the tool.
-
-        Returns:
-            Optional[Tool]: The specified tool or None if not found.
-        """
-        return next((tool for tool in self.tools if tool.name == tool_name), None)
-
-    def run_tool(self, tool_name: str, parameters: Union[str, dict]) -> str:
-        """Run tool by input tool name and data inputs
-
-        Args:
-            tool_name(str): The name of the tool.
-            parameters(Union[str, dict]): The parameters for the tool.
-
-        Returns:
-            str: The result of the tool.
-        """
-        tool = self.get_tool(tool_name)
-
-        if tool is None:
-            return (
-                f"{tool_name} has not been provided yet, please use the provided tool."
-            )
-
-        if isinstance(parameters, dict):
-            return tool.run(**parameters)
-        else:
-            return tool.run(parameters)
-
-    @property
-    def tool_names(self) -> str:
-        """Get all tool names."""
-        tool_names = ""
-        for tool in self.tools:
-            tool_names += f"{tool.name}, "
-        return tool_names[:-2]
-
-    @property
-    def tool_descriptions(self) -> str:
-        """Get all tool descriptions, including the schema if available."""
-        tool_descriptions = ""
-        for tool in self.tools:
-            tool_descriptions += json.dumps(tool.to_schema()) + "\n"
-        return tool_descriptions
+import inspect
+import json
+from typing import Any, List, Optional, Union
+
+from promptulate.schema import ToolTypes
+from promptulate.tools.base import BaseTool, Tool, ToolImpl, function_to_tool
+from promptulate.tools.langchain.tools import LangchainTool
+
+
+def _judge_langchain_tool_and_wrap(tool: Any) -> Optional[Tool]:
+    """Judge if the tool is a langchain tool and wrap it.
+
+    Args:
+        tool(Any): The tool to be judged.
+
+    Returns:
+        Optional[Tool]: The wrapped tool or None if not a langchain tool.
+    """
+    try:
+        from langchain.tools.base import BaseTool as LangchainBaseTool
+
+        if isinstance(tool, LangchainBaseTool):
+            return LangchainTool(tool)
+
+    except ImportError:
+        raise ValueError(
+            (
+                f"Error tool type {tool}, please check the tool type.",
+                "If you are using langchain tool, please install -U langchain.",
+            )
+        )
+
+
+def _initialize_tool(tool: ToolTypes) -> Optional[Tool]:
+    """Initialize the tool.
+
+    Args:
+        tool(Union[BaseTool, Callable, Tool, "LangchainBaseToolType"]): The tool to be
+            initialized.
+
+    Returns:
+        Optional[Tool]: The initialized tool.
+    """
+    if isinstance(tool, BaseTool):
+        return ToolImpl.from_base_tool(tool)
+    elif isinstance(tool, Tool):
+        return tool
+    elif inspect.isfunction(tool):
+        return function_to_tool(tool)
+
+    return _judge_langchain_tool_and_wrap(tool)
+
+
+class ToolManager:
+    """ToolManager helps Agent to manage tools"""
+
+    def __init__(self, tools: List[ToolTypes]):
+        self.tools: List[Tool] = [
+            _initialize_tool(tool)
+            for tool in tools
+            if _initialize_tool(tool) is not None
+        ]
+
+    def get_tool(self, tool_name: str) -> Optional[Tool]:
+        """Find specified tool by tool name.
+        Args:
+            tool_name(str): The name of the tool.
+
+        Returns:
+            Optional[Tool]: The specified tool or None if not found.
+        """
+        return next((tool for tool in self.tools if tool.name == tool_name), None)
+
+    def run_tool(self, tool_name: str, parameters: Union[str, dict]) -> str:
+        """Run tool by input tool name and data inputs
+
+        Args:
+            tool_name(str): The name of the tool.
+            parameters(Union[str, dict]): The parameters for the tool.
+
+        Returns:
+            str: The result of the tool.
+        """
+        tool = self.get_tool(tool_name)
+
+        if tool is None:
+            return (
+                f"{tool_name} has not been provided yet, please use the provided tool."
+            )
+
+        if isinstance(parameters, dict):
+            return tool.run(**parameters)
+        else:
+            return tool.run(parameters)
+
+    @property
+    def tool_names(self) -> str:
+        """Get all tool names."""
+        tool_names = ""
+        for tool in self.tools:
+            tool_names += f"{tool.name}, "
+        return tool_names[:-2]
+
+    @property
+    def tool_descriptions(self) -> str:
+        """Get all tool descriptions, including the schema if available."""
+        tool_descriptions = ""
+        for tool in self.tools:
+            tool_descriptions += json.dumps(tool.to_schema()) + "\n"
+        return tool_descriptions
diff --git a/pyproject.toml b/pyproject.toml
index 9c5b4bb9..95827d7d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,121 +1,121 @@
-[build-system]
-build-backend = "poetry.core.masonry.api"
-requires = ["poetry-core"]
-
-[tool.poetry]
-authors = ["Zeeland <zeeland4work@gmail.com>"]
-description = "A powerful LLM Application development framework."
-name = "promptulate"
-readme = "README.md"
-repository = "https://github.com/Undertone0809/promptulate"
-version = "1.15.0"
-keywords = [
-    "promptulate",
-    "pne",
-    "prompt",
-    "agent",
-    "openai",
-    "chatgpt",
-    "gpt",
-    "llm",
-    "openai",
-    "langchain",
-    "litellm"
-]
-
-[tool.poetry.dependencies]
-broadcast-service = "1.3.2"
-click = "^8.1.7"
-cushy-storage = "^1.3.7"
-litellm = "^1.16.19"
-numexpr = "^2.8.4"
-pydantic = ">=1,<3"
-python = ">=3.8.1,<4.0"
-python-dotenv = "^1.0.0"
-questionary = "^2.0.1"
-requests = "^2.31.0"
-jinja2 = "^3.1.3"
-typing-extensions = "^4.10.0"
-
-
-[tool.poetry.group.dev.dependencies]
-coverage = "^7.3.4"
-coverage-badge = "^1.1.0"
-pre-commit = "^3.5.0"
-pytest = "^7.4.4"
-pytest-cov = "^4.1.0"
-pytest-html = ">=3.1.1,<5.0.0"
-ruff = "^0.1.13"
-pytest-mock = "^3.12.0"
-
-[tool.poetry.group.test_integration.dependencies]
-langchain = "^0.1.1"
-arxiv = "^1.4.7"
-duckduckgo_search = "^3.9.11"
-pyjwt = "^2.8.0"
-
-[tool.poetry.scripts]
-pne = "promptulate.client.pne:main"
-pne-chat = "promptulate.client.chat:main"
-
-#[[tool.poetry.source]]
-#name = "tsinghua"
-#priority = "default"
-#url = "https://pypi.tuna.tsinghua.edu.cn/simple"
-
-[tool.ruff]
-# https://beta.ruff.rs/docs/settings/
-# https://docs.astral.sh/ruff/configuration/
-line-length = 88
-
-# https://beta.ruff.rs/docs/rules/
-extend-select = ["I"]
-ignore = ["F401"]
-select = ["E", "W", "F", "I"]
-
-# Exclude a variety of commonly ignored directories.
-exclude = [
-  ".bzr",
-  ".direnv",
-  ".eggs",
-  ".git",
-  ".git-rewrite",
-  ".hg",
-  ".mypy_cache",
-  ".nox",
-  ".pants.d",
-  ".pytype",
-  ".ruff_cache",
-  ".svn",
-  ".tox",
-  ".venv",
-  "__pypackages__",
-  "_build",
-  "buck-out",
-  "build",
-  "dist",
-  "node_modules",
-  "venv",
-]
-ignore-init-module-imports = true
-respect-gitignore = true
-
-[tool.ruff.format]
-# Like Black, use double quotes for strings.
-quote-style = "double"
-
-# Like Black, indent with spaces, rather than tabs.
-indent-style = "space"
-
-# Like Black, respect magic trailing commas.
-skip-magic-trailing-comma = false
-
-# Like Black, automatically detect the appropriate line ending.
-line-ending = "auto"
-
-[tool.coverage.run]
-source = ["tests"]
-
-[coverage.report]
-fail_under = 50
-show_missing = true
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.poetry]
+authors = ["Zeeland <zeeland4work@gmail.com>"]
+description = "A powerful LLM Application development framework."
+name = "promptulate"
+readme = "README.md"
+repository = "https://github.com/Undertone0809/promptulate"
+version = "1.15.0"
+keywords = [
+    "promptulate",
+    "pne",
+    "prompt",
+    "agent",
+    "openai",
+    "chatgpt",
+    "gpt",
+    "llm",
+    "openai",
+    "langchain",
+    "litellm"
+]
+
+[tool.poetry.dependencies]
+broadcast-service = "1.3.2"
+click = "^8.1.7"
+cushy-storage = "^1.3.7"
+litellm = "^1.16.19"
+numexpr = "^2.8.4"
+pydantic = ">=1,<3"
+python = ">=3.8.1,<4.0"
+python-dotenv = "^1.0.0"
+questionary = "^2.0.1"
+requests = "^2.31.0"
+jinja2 = "^3.1.3"
+typing-extensions = "^4.10.0"
+
+
+[tool.poetry.group.dev.dependencies]
+coverage = "^7.3.4"
+coverage-badge = "^1.1.0"
+pre-commit = "^3.5.0"
+pytest = "^7.4.4"
+pytest-cov = "^4.1.0"
+pytest-html = ">=3.1.1,<5.0.0"
+ruff = "^0.1.13"
+pytest-mock = "^3.12.0"
+
+[tool.poetry.group.test_integration.dependencies]
+langchain = "^0.1.1"
+arxiv = "^1.4.7"
+duckduckgo_search = "^3.9.11"
+pyjwt = "^2.8.0"
+
+[tool.poetry.scripts]
+pne = "promptulate.client.pne:main"
+pne-chat = "promptulate.client.chat:main"
+
+#[[tool.poetry.source]]
+#name = "tsinghua"
+#priority = "default"
+#url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+
+[tool.ruff]
+# https://beta.ruff.rs/docs/settings/
+# https://docs.astral.sh/ruff/configuration/
+line-length = 88
+
+# https://beta.ruff.rs/docs/rules/
+extend-select = ["I"]
+ignore = ["F401"]
+select = ["E", "W", "F", "I"]
+
+# Exclude a variety of commonly ignored directories.
+exclude = [
+  ".bzr",
+  ".direnv",
+  ".eggs",
+  ".git",
+  ".git-rewrite",
+  ".hg",
+  ".mypy_cache",
+  ".nox",
+  ".pants.d",
+  ".pytype",
+  ".ruff_cache",
+  ".svn",
+  ".tox",
+  ".venv",
+  "__pypackages__",
+  "_build",
+  "buck-out",
+  "build",
+  "dist",
+  "node_modules",
+  "venv",
+]
+ignore-init-module-imports = true
+respect-gitignore = true
+
+[tool.ruff.format]
+# Like Black, use double quotes for strings.
+quote-style = "double"
+
+# Like Black, indent with spaces, rather than tabs.
+indent-style = "space"
+
+# Like Black, respect magic trailing commas.
+skip-magic-trailing-comma = false
+
+# Like Black, automatically detect the appropriate line ending.
+line-ending = "auto"
+
+[tool.coverage.run]
+source = ["tests"]
+
+[coverage.report]
+fail_under = 50
+show_missing = true

From a611b23da4785b73fbf4ef36338127dbbaf66cb7 Mon Sep 17 00:00:00 2001
From: zeeland <zeeland4work@gmail.com>
Date: Thu, 18 Apr 2024 17:47:25 +0800
Subject: [PATCH 2/4] pref: optimize makefile

---
 Makefile | 216 ++++++++++++++++++++++++++++---------------------------
 1 file changed, 110 insertions(+), 106 deletions(-)

diff --git a/Makefile b/Makefile
index 09c6b3f1..318910e8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,106 +1,110 @@
-SHELL := /usr/bin/env bash
-OS := $(shell python -c "import sys; print(sys.platform)")
-
-# all test files define here
-DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py
-DEV_TEST_HOOK_FILES := ./tests/hook/test_llm.py ./tests/hook/test_tool_hook.py
-
-DEV_TEST_LLM_FILES := ./tests/llms/test_openai.py ./tests/llms/test_factory.py
-DEV_TEST_AGENT_FILES := ./tests/agents/test_tool_agent.py ./tests/agents/test_assistant_agent.py
-DEV_TEST_FILES := $(DEV_TEST_TOOL_FILES) $(DEV_TEST_HOOK_FILES) $(DEV_TEST_LLM_FILES) $(DEV_TEST_AGENT_FILES) ./tests/test_chat.py ./tests/output_formatter ./tests/test_import.py ./tests/utils/test_string_template.py
-
-
-ifeq ($(OS),win32)
-	PYTHONPATH := $(shell python -c "import os; print(os.getcwd())")
-    TEST_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
-	TEST_PROD_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
-else
-	PYTHONPATH := `pwd`
-    TEST_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
-	TEST_PROD_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
-endif
-
-.PHONY: lock
-lock:
-	poetry lock -n && poetry export --without-hashes > requirements.txt
-
-.PHONY: install
-install:
-	poetry install --with dev
-
-.PHONY: install-integration
-install-integration:
-	poetry install --with dev,test_integration
-
-.PHONY: install-docs
-install-docs:
-	npm i docsify-cli -g
-
-.PHONY: pre-commit-install
-pre-commit-install:
-	poetry run pre-commit install
-
-.PHONY: polish-codestyle
-polish-codestyle:
-	poetry run ruff format --config pyproject.toml promptulate tests example
-	poetry run ruff check --fix --config pyproject.toml promptulate tests example
-
-.PHONY: formatting
-formatting: polish-codestyle
-
-.PHONY: test
-test:
-	$(TEST_COMMAND)
-
-.PHONY: test-prod
-test-prod:
-	$(TEST_PROD_COMMAND)
-	poetry run coverage-badge -o docs/images/coverage.svg -f
-
-.PHONY: check-codestyle
-check-codestyle:
-	poetry run ruff format --check --config pyproject.toml promptulate tests example
-	poetry run ruff check --config pyproject.toml promptulate tests example
-
-.PHONY: lint
-lint: check-codestyle test
-
-# https://github.com/Maxlinn/linn-jupyter-site-template/blob/main/.github/workflows/linn-jupyter-site-template-deploy.yml
-# Any notebook will be converted here.
-# If there are any notebook will be changed, then the notebook will be converted to markdown and pushed to the repo.
-.PHONY: build-docs
-build-docs:
-	jupyter nbconvert ./example/chat_usage.ipynb --to markdown --output-dir ./docs/use_cases/
-	jupyter nbconvert ./example/tools/custom_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
-	jupyter nbconvert ./example/llm/custom_llm.ipynb --to markdown --output-dir ./docs/modules/llm
-	jupyter nbconvert ./example/tools/langchain_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
-		jupyter nbconvert ./example/agent/assistant_agent_usage.ipynb --to markdown --output-dir ./docs/modules/agents
-
-
-.PHONY: start-docs
-start-docs:
-	docsify serve docs
-
-#* Cleaning
-.PHONY: pycache-remove
-pycache-remove:
-	find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf
-
-.PHONY: dsstore-remove
-dsstore-remove:
-	find . | grep -E ".DS_Store" | xargs rm -rf
-
-.PHONY: ipynbcheckpoints-remove
-ipynbcheckpoints-remove:
-	find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
-
-.PHONY: pytestcache-remove
-pytestcache-remove:
-	find . | grep -E ".pytest_cache" | xargs rm -rf
-
-.PHONY: build-remove
-build-remove:
-	rm -rf build/
-
-.PHONY: cleanup
-cleanup: pycache-remove dsstore-remove ipynbcheckpoints-remove pytestcache-remove
+SHELL := /usr/bin/env bash
+OS := $(shell python -c "import sys; print(sys.platform)")
+
+# all test files define here
+DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py
+DEV_TEST_HOOK_FILES := ./tests/hook/test_llm.py ./tests/hook/test_tool_hook.py
+
+DEV_TEST_LLM_FILES := ./tests/llms/test_openai.py ./tests/llms/test_factory.py
+DEV_TEST_AGENT_FILES := ./tests/agents/test_tool_agent.py ./tests/agents/test_assistant_agent.py
+DEV_TEST_FILES := $(DEV_TEST_TOOL_FILES) $(DEV_TEST_HOOK_FILES) $(DEV_TEST_LLM_FILES) $(DEV_TEST_AGENT_FILES) ./tests/test_chat.py ./tests/output_formatter ./tests/test_import.py ./tests/utils/test_string_template.py
+
+
+ifeq ($(OS),win32)
+	PYTHONPATH := $(shell python -c "import os; print(os.getcwd())")
+    TEST_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
+	TEST_PROD_COMMAND := set PYTHONPATH=$(PYTHONPATH) && poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
+else
+	PYTHONPATH := `pwd`
+    TEST_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate $(DEV_TEST_FILES)
+	TEST_PROD_COMMAND := PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=promptulate tests
+endif
+
+lock:
+	poetry lock -n && poetry export --without-hashes > requirements.txt
+
+install:
+	poetry install --with dev
+
+install-integration:
+	poetry install --with dev,test_integration
+
+install-docs:
+	npm i docsify-cli -g
+
+pre-commit-install:
+	poetry run pre-commit install
+
+polish-codestyle:
+	poetry run ruff format --config pyproject.toml promptulate tests example
+	poetry run ruff check --fix --config pyproject.toml promptulate tests example
+
+formatting: polish-codestyle
+format: polish-codestyle
+
+test:
+	$(TEST_COMMAND)
+
+test-prod:
+	$(TEST_PROD_COMMAND)
+	poetry run coverage-badge -o docs/images/coverage.svg -f
+
+check-codestyle:
+	poetry run ruff format --check --config pyproject.toml promptulate tests example
+	poetry run ruff check --config pyproject.toml promptulate tests example
+
+lint: check-codestyle test
+
+# https://github.com/Maxlinn/linn-jupyter-site-template/blob/main/.github/workflows/linn-jupyter-site-template-deploy.yml
+# Any notebook will be converted here.
+# If there are any notebook will be changed, then the notebook will be converted to markdown and pushed to the repo.
+build-docs:
+	jupyter nbconvert ./example/chat_usage.ipynb --to markdown --output-dir ./docs/use_cases/
+	jupyter nbconvert ./example/tools/custom_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
+	jupyter nbconvert ./example/llm/custom_llm.ipynb --to markdown --output-dir ./docs/modules/llm
+	jupyter nbconvert ./example/tools/langchain_tool_usage.ipynb --to markdown --output-dir ./docs/modules/tools
+		jupyter nbconvert ./example/agent/assistant_agent_usage.ipynb --to markdown --output-dir ./docs/modules/agents
+
+start-docs:
+	docsify serve docs
+
+#* Cleaning
+pycache-remove:
+	find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf
+
+dsstore-remove:
+	find . | grep -E ".DS_Store" | xargs rm -rf
+
+ipynbcheckpoints-remove:
+	find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
+
+pytestcache-remove:
+	find . | grep -E ".pytest_cache" | xargs rm -rf
+
+build-remove:
+	rm -rf build/
+
+cleanup: pycache-remove dsstore-remove ipynbcheckpoints-remove pytestcache-remove
+
+help:
+	@echo "lock: Lock the dependencies and export to requirements.txt"
+	@echo "install: Install the dependencies"
+	@echo "install-integration: Install the dependencies for integration testing"
+	@echo "install-docs: Install the dependencies for building docs"
+	@echo "pre-commit-install: Install the pre-commit hooks"
+	@echo "polish-codestyle: Format the code"
+	@echo "formatting: Format the code"
+	@echo "test: Run the tests"
+	@echo "test-prod: Run the tests for production"
+	@echo "check-codestyle: Check the code style"
+	@echo "lint: Run the tests and check the code style"
+	@echo "build-docs: Build the docs"
+	@echo "start-docs: Start the docs server"
+	@echo "pycache-remove: Remove the pycache"
+	@echo "dsstore-remove: Remove the .DS_Store files"
+	@echo "ipynbcheckpoints-remove: Remove the ipynb checkpoints"
+	@echo "pytestcache-remove: Remove the pytest cache"
+	@echo "build-remove: Remove the build directory"
+	@echo "cleanup: Remove all the cache files"
+
+.PHONY: lock install install-integration install-docs pre-commit-install polish-codestyle formatting format test test-prod check-codestyle lint build-docs start-docs pycache-remove dsstore-remove ipynbcheckpoints-remove pytestcache-remove build-remove cleanup help
\ No newline at end of file

From 7ffb32af7a4726b6ba15d487ad284a9e37824dc5 Mon Sep 17 00:00:00 2001
From: zeeland <zeeland4work@gmail.com>
Date: Thu, 18 Apr 2024 17:48:48 +0800
Subject: [PATCH 3/4] fix: error when init toolkit in tool manager

---
 docs/index.html                               | 220 ++---
 promptulate/agents/tool_agent/agent.py        | 434 +++++-----
 promptulate/agents/tool_agent/prompt.py       | 199 ++---
 promptulate/beta/__init__.py                  |   3 +
 .../beta/agents/assistant_agent/agent.py      |   2 +-
 promptulate/chat.py                           | 547 ++++++------
 promptulate/schema.py                         | 597 +++++++-------
 promptulate/tools/base.py                     | 776 +++++++++---------
 promptulate/tools/file/toolkit.py             | 108 +--
 promptulate/tools/manager.py                  | 237 +++---
 pyproject.toml                                | 242 +++---
 tests/agents/test_tool_agent.py               |  18 +
 12 files changed, 1713 insertions(+), 1670 deletions(-)

diff --git a/docs/index.html b/docs/index.html
index 1bcd2bcf..0aaee97a 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -1,110 +1,110 @@
-<!DOCTYPE html>
-<html lang="zh">
-<head>
-    <meta charset="UTF-8">
-    <title>Document</title>
-    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
-    <meta name="description" content="Description">
-    <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
-    <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify@4/lib/themes/vue.css">
-    <script src="//cdn.jsdelivr.net/npm/docsify-edit-on-github"></script>
-    <style>
-    </style>
-</head>
-<body>
-<nav>
-    <a href="#/">文档 v1.11.2</a>
-    <a href="#/">示例</a>
-</nav>
-<div id="app"></div>
-<script>
-    window.$docsify = {
-        name: 'Promptulate',
-        repo: 'https://github.com/Undertone0809/promptulate',
-        // logo: '/images/logo.svg',
-        loadNavbar: true,
-        loadSidebar: true,
-        subMaxLevel: 3,
-        coverpage: true,
-        search: {
-            paths: 'auto',
-            placeholder: 'Search',
-            noData: 'Not Found',
-            depth: 3,
-        },
-        count: {
-            countable: true,
-            fontsize: '0.9em',
-            color: 'rgb(90,90,90)',
-            language: 'english'
-        },
-        plugins: [
-            EditOnGithubPlugin.create(
-                'https://github.com/Undertone0809/promptulate/tree/main/docs/',
-                null,
-                function (file) {
-                    if (file.indexOf('en') === -1) {
-                        return '编辑文档'
-                    } else {
-                        return 'edit on github'
-                    }
-                }
-            ),
-            function (hook) {
-                var footer = [
-                    '<hr/>',
-                    '<footer>',
-                    '<span><a href="https://github.com/Undertone0809/">Promptulate/Zeeland</a> &copy;2023.</span>',
-                    '<span>Published with <a href="https://github.com/QingWei-Li/docsify" target="_blank">docsify</a>.</span>',
-                    '</footer>'
-                ].join('')
-
-                hook.afterEach(function (html) {
-                    return html + footer
-                })
-            },
-        ]
-
-    }
-</script>
-<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.css">
-
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/gitalk.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.min.js"></script>
-<script>
-    var gitalk = new Gitalk({
-        clientID: '0b1cb7e36ac5fa3233ba',
-        clientSecret: '4148c415d7ed173e3dcffc861b9ac3eea79fbdbb',
-        repo: 'promptulate',
-        owner: 'Undertone0809',
-        admin: ['Undertone0809'],
-        title: `Document comment ${location.hash.match(/#(.*?)([?]|$)/)[1]}`,
-        id: location.hash.match(/#(.*?)([?]|$)/)[1],
-    })
-    // Listen for changes in hash in the URL. If an MD file is found to have changed,
-    // refresh the page to solve the problem of using a single digital comment issue for the entire website.
-    window.onhashchange = function (event) {
-        if (event.newURL.split('?')[0] !== event.oldURL.split('?')[0]) {
-            location.reload()
-        }
-    }
-</script>
-<!-- Docsify v4 -->
-<script src="//cdn.jsdelivr.net/npm/docsify@4"></script>
-<!-- search plugin -->
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/search.min.js"></script>
-<!-- image zoom plugin -->1
-<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/zoom-image.min.js"></script>
-<!-- copy code plugin -->
-<script src="//cdn.jsdelivr.net/npm/docsify-copy-code"></script>
-<script src="//unpkg.com/docsify-count/dist/countable.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-bash.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-python.min.js"></script>
-<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-json.min.js"></script>
-
-<script id="embedai" src="https://embedai.thesamur.ai/embedai.js" data-id="pne-docs"></script>
-
-<!-- sidebar plugin -->
-<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify-sidebar-collapse/dist/sidebar.min.css" />
-</body>
-</html>
+<!DOCTYPE html>
+<html lang="zh">
+<head>
+    <meta charset="UTF-8">
+    <title>Document</title>
+    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
+    <meta name="description" content="Description">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0">
+    <link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify@4/lib/themes/vue.css">
+    <script src="//cdn.jsdelivr.net/npm/docsify-edit-on-github"></script>
+    <style>
+    </style>
+</head>
+<body>
+<nav>
+    <a href="#/">文档</a>
+    <a href="#/">示例</a>
+</nav>
+<div id="app"></div>
+<script>
+    window.$docsify = {
+        name: 'Promptulate',
+        repo: 'https://github.com/Undertone0809/promptulate',
+        // logo: '/images/logo.svg',
+        loadNavbar: true,
+        loadSidebar: true,
+        subMaxLevel: 3,
+        coverpage: true,
+        search: {
+            paths: 'auto',
+            placeholder: 'Search',
+            noData: 'Not Found',
+            depth: 3,
+        },
+        count: {
+            countable: true,
+            fontsize: '0.9em',
+            color: 'rgb(90,90,90)',
+            language: 'english'
+        },
+        plugins: [
+            EditOnGithubPlugin.create(
+                'https://github.com/Undertone0809/promptulate/tree/main/docs/',
+                null,
+                function (file) {
+                    if (file.indexOf('en') === -1) {
+                        return '编辑文档'
+                    } else {
+                        return 'edit on github'
+                    }
+                }
+            ),
+            function (hook) {
+                var footer = [
+                    '<hr/>',
+                    '<footer>',
+                    '<span><a href="https://github.com/Undertone0809/">Promptulate/Zeeland</a> &copy;2023.</span>',
+                    '<span>Published with <a href="https://github.com/QingWei-Li/docsify" target="_blank">docsify</a>.</span>',
+                    '</footer>'
+                ].join('')
+
+                hook.afterEach(function (html) {
+                    return html + footer
+                })
+            },
+        ]
+
+    }
+</script>
+<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.css">
+
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/gitalk.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/gitalk/dist/gitalk.min.js"></script>
+<script>
+    var gitalk = new Gitalk({
+        clientID: '0b1cb7e36ac5fa3233ba',
+        clientSecret: '4148c415d7ed173e3dcffc861b9ac3eea79fbdbb',
+        repo: 'promptulate',
+        owner: 'Undertone0809',
+        admin: ['Undertone0809'],
+        title: `Document comment ${location.hash.match(/#(.*?)([?]|$)/)[1]}`,
+        id: location.hash.match(/#(.*?)([?]|$)/)[1],
+    })
+    // Listen for changes in hash in the URL. If an MD file is found to have changed,
+    // refresh the page to solve the problem of using a single digital comment issue for the entire website.
+    window.onhashchange = function (event) {
+        if (event.newURL.split('?')[0] !== event.oldURL.split('?')[0]) {
+            location.reload()
+        }
+    }
+</script>
+<!-- Docsify v4 -->
+<script src="//cdn.jsdelivr.net/npm/docsify@4"></script>
+<!-- search plugin -->
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/search.min.js"></script>
+<!-- image zoom plugin -->1
+<script src="//cdn.jsdelivr.net/npm/docsify/lib/plugins/zoom-image.min.js"></script>
+<!-- copy code plugin -->
+<script src="//cdn.jsdelivr.net/npm/docsify-copy-code"></script>
+<script src="//unpkg.com/docsify-count/dist/countable.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-bash.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-python.min.js"></script>
+<script src="//cdn.jsdelivr.net/npm/prismjs@1/components/prism-json.min.js"></script>
+
+<script id="embedai" src="https://embedai.thesamur.ai/embedai.js" data-id="pne-docs"></script>
+
+<!-- sidebar plugin -->
+<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/docsify-sidebar-collapse/dist/sidebar.min.css" />
+</body>
+</html>
diff --git a/promptulate/agents/tool_agent/agent.py b/promptulate/agents/tool_agent/agent.py
index f21ae2e2..b15b5cba 100644
--- a/promptulate/agents/tool_agent/agent.py
+++ b/promptulate/agents/tool_agent/agent.py
@@ -1,217 +1,217 @@
-import json
-import time
-from typing import Callable, List, Optional, TypedDict, Union
-
-from promptulate.agents import BaseAgent
-from promptulate.agents.tool_agent.prompt import (
-    PREFIX_TEMPLATE,
-    REACT_SYSTEM_PROMPT_TEMPLATE,
-)
-from promptulate.hook import Hook, HookTable
-from promptulate.llms.base import BaseLLM
-from promptulate.llms.openai.openai import ChatOpenAI
-from promptulate.schema import ToolTypes
-from promptulate.tools.manager import ToolManager
-from promptulate.utils.logger import logger
-from promptulate.utils.string_template import StringTemplate
-
-
-class ActionResponse(TypedDict):
-    thought: str
-    action_name: str
-    action_parameters: Union[dict, str]
-
-
-class ToolAgent(BaseAgent):
-    """
-    An agent who is good at using tool. ref ReAct.
-
-    Attributes:
-        llm (BaseLLM): The language model driver. Default is ChatOpenAI with model
-            "gpt-3.5-turbo-16k".
-        stop_sequences (List[str]): The sequences that, when met, will stop the output
-            of the llm.
-        system_prompt_template (StringTemplate): The preset system prompt template.
-        prefix_prompt_template (StringTemplate): The prefix system prompt template.
-        tool_manager (ToolManager): Used to manage all tools.
-        conversation_prompt (str): Stores all conversation messages during a
-            conversation.
-        max_iterations (Optional[int]): The maximum number of executions. Default is 15.
-        max_execution_time (Optional[float]): The longest running time. No default
-            value.
-        enable_role (bool): Flag to enable role. Default is False.
-        agent_name (str): The name of the agent. Default is "pne-bot".
-        agent_identity (str): The identity of the agent. Default is "bot".
-        agent_goal (str): The goal of the agent. Default is "provides better assistance
-            and services for humans.".
-        agent_constraints (str): The constraints of the agent. Default is "none".
-    """
-
-    def __init__(
-        self,
-        *,
-        llm: BaseLLM = None,
-        tools: Optional[List[ToolTypes]] = None,
-        prefix_prompt_template: StringTemplate = StringTemplate(PREFIX_TEMPLATE),
-        hooks: Optional[List[Callable]] = None,
-        enable_role: bool = False,
-        agent_name: str = "tool-agent",
-        agent_identity: str = "tool-agent",
-        agent_goal: str = "provides better assistance and services for humans.",
-        agent_constraints: str = "none",
-        tool_manager: Optional[ToolManager] = None,
-        _from: Optional[str] = None,
-    ):
-        if tools is not None and tool_manager is not None:
-            raise ValueError(
-                "Please provide either 'tools' or 'tool_manager', but not both simultaneously."  # noqa
-            )
-
-        super().__init__(hooks=hooks, agent_type="Tool Agent", _from=_from)
-        self.llm: BaseLLM = llm or ChatOpenAI(model="gpt-4-1106-preview")
-        """llm provider"""
-        self.tool_manager: ToolManager = (
-            tool_manager if tool_manager is not None else ToolManager(tools or [])
-        )
-        """Used to manage all tools, Only create a new ToolManager if 'tool_manager' is
-        not provided."""
-        self.system_prompt_template: StringTemplate = REACT_SYSTEM_PROMPT_TEMPLATE
-        """Preset system prompt template."""
-        self.prefix_prompt_template: StringTemplate = prefix_prompt_template
-        """Prefix system prompt template."""
-        self.conversation_prompt: str = ""
-        """Store all conversation message when conversation. ToolAgent use dynamic
-        system prompt."""
-        self.max_iterations: Optional[int] = 15
-        """The maximum number of executions."""
-        self.max_execution_time: Optional[float] = None
-        """The longest running time. """
-        self.enable_role: bool = enable_role
-        self.agent_name: str = agent_name
-        self.agent_identity: str = agent_identity
-        self.agent_goal: str = agent_goal
-        self.agent_constraints: str = agent_constraints
-
-    def get_llm(self) -> BaseLLM:
-        return self.llm
-
-    def _build_system_prompt(self, instruction: str) -> str:
-        """Build the system prompt."""
-        prefix_prompt = (
-            self.prefix_prompt_template.format(
-                agent_identity=self.agent_identity,
-                agent_name=self.agent_name,
-                agent_goal=self.agent_goal,
-                agent_constraints=self.agent_constraints,
-            )
-            if self.enable_role
-            else ""
-        )
-
-        return prefix_prompt + self.system_prompt_template.format(
-            question=instruction,
-            tool_descriptions=self.tool_manager.tool_descriptions,
-        )
-
-    @property
-    def current_date(self) -> str:
-        """Get the current date."""
-        return f"Current date: {time.strftime('%Y-%m-%d %H:%M:%S')}"
-
-    def _run(
-        self, instruction: str, return_raw_data: bool = False, **kwargs
-    ) -> Union[str, ActionResponse]:
-        """Run the tool agent. The tool agent will interact with the LLM and the tool.
-
-        Args:
-            instruction(str): The instruction to the tool agent.
-            return_raw_data(bool): Whether to return raw data. Default is False.
-
-        Returns:
-            The output of the tool agent.
-        """
-        self.conversation_prompt = self._build_system_prompt(instruction)
-        logger.info(f"[pne] ToolAgent system prompt: {self.conversation_prompt}")
-
-        iterations = 0
-        used_time = 0.0
-        start_time = time.time()
-
-        while self._should_continue(iterations, used_time):
-            llm_resp: str = self.llm(
-                instruction=self.conversation_prompt + self.current_date
-            )
-            while llm_resp == "":
-                llm_resp = self.llm(
-                    instruction=self.conversation_prompt + self.current_date
-                )
-
-            action_resp: ActionResponse = self._parse_llm_response(llm_resp)
-            self.conversation_prompt += f"{llm_resp}\n"
-            logger.info(
-                f"[pne] tool agent <{iterations}> current prompt: {self.conversation_prompt}"  # noqa
-            )
-
-            if "finish" in action_resp["action_name"]:
-                if return_raw_data:
-                    return action_resp
-
-                return action_resp["action_parameters"]["content"]
-
-            Hook.call_hook(
-                HookTable.ON_AGENT_ACTION,
-                self,
-                thought=action_resp["thought"],
-                action=action_resp["action_name"],
-                action_input=action_resp["action_parameters"],
-            )
-
-            tool_result = self.tool_manager.run_tool(
-                action_resp["action_name"], action_resp["action_parameters"]
-            )
-            Hook.call_hook(
-                HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result
-            )
-            self.conversation_prompt += f"Observation: {tool_result}\n"
-
-            iterations += 1
-            used_time += time.time() - start_time
-
-    def _should_continue(self, current_iteration: int, current_time_elapsed) -> bool:
-        """Determine whether to stop, both timeout and exceeding the maximum number of
-        iterations will stop.
-
-        Args:
-            current_iteration: current iteration times.
-            current_time_elapsed: current running time.
-
-        Returns:
-            Whether to stop.
-        """
-        if self.max_iterations and current_iteration >= self.max_iterations:
-            return False
-        if self.max_execution_time and current_time_elapsed >= self.max_execution_time:
-            return False
-        return True
-
-    def _parse_llm_response(self, llm_resp: str) -> ActionResponse:
-        """Parse next instruction of LLM output.
-
-        Args:
-            llm_resp(str): output of LLM
-
-        Returns:
-            Return a tuple, (thought,action,action input)
-            action(str): tool name
-            action_input(dict | str): tool parameters
-        """
-        llm_resp: str = (
-            llm_resp.replace("```json", "").replace("```JSON", "").replace("```", "")
-        )
-        data: dict = json.loads(llm_resp)
-
-        return ActionResponse(
-            thought=data["thought"],
-            action_name=data["action"]["name"],
-            action_parameters=data["action"]["args"],
-        )
+import json
+import time
+from typing import Callable, List, Optional, TypedDict, Union
+
+from promptulate.agents import BaseAgent
+from promptulate.agents.tool_agent.prompt import (
+    PREFIX_TEMPLATE,
+    REACT_SYSTEM_PROMPT_TEMPLATE,
+)
+from promptulate.hook import Hook, HookTable
+from promptulate.llms.base import BaseLLM
+from promptulate.llms.openai.openai import ChatOpenAI
+from promptulate.tools.base import ToolTypes
+from promptulate.tools.manager import ToolManager
+from promptulate.utils.logger import logger
+from promptulate.utils.string_template import StringTemplate
+
+
+class ActionResponse(TypedDict):
+    analysis: str
+    action_name: str
+    action_parameters: Union[dict, str]
+
+
+class ToolAgent(BaseAgent):
+    """
+    An agent who is good at using tool. ref ReAct.
+
+    Attributes:
+        llm (BaseLLM): The language model driver. Default is ChatOpenAI with model
+            "gpt-3.5-turbo-16k".
+            of the llm.
+        system_prompt_template (StringTemplate): The preset system prompt template.
+        prefix_prompt_template (StringTemplate): The prefix system prompt template.
+        tool_manager (ToolManager): Used to manage all tools.
+        conversation_prompt (str): Stores all conversation messages during a
+            conversation.
+        max_iterations (Optional[int]): The maximum number of executions. Default is 15.
+        max_execution_time (Optional[float]): The longest running time. No default
+            value.
+        enable_role (bool): Flag to enable role. Default is False.
+        agent_name (str): The name of the agent. Default is "pne-bot".
+        agent_identity (str): The identity of the agent. Default is "bot".
+        agent_goal (str): The goal of the agent. Default is "provides better assistance
+            and services for humans.".
+        agent_constraints (str): The constraints of the agent. Default is "none".
+        _from (Optional[str]): The initialization source. Default is None.
+    """
+
+    def __init__(
+        self,
+        *,
+        llm: BaseLLM = None,
+        tools: Optional[List[ToolTypes]] = None,
+        prefix_prompt_template: StringTemplate = StringTemplate(PREFIX_TEMPLATE),
+        hooks: Optional[List[Callable]] = None,
+        enable_role: bool = False,
+        agent_name: str = "tool-agent",
+        agent_identity: str = "tool-agent",
+        agent_goal: str = "provides better assistance and services for humans.",
+        agent_constraints: str = "none",
+        tool_manager: Optional[ToolManager] = None,
+        _from: Optional[str] = None,
+    ):
+        if tools is not None and tool_manager is not None:
+            raise ValueError(
+                "Please provide either 'tools' or 'tool_manager', but not both simultaneously."  # noqa
+            )
+
+        super().__init__(hooks=hooks, agent_type="Tool Agent", _from=_from)
+        self.llm: BaseLLM = llm or ChatOpenAI(model="gpt-4-1106-preview")
+        """llm provider"""
+        self.tool_manager: ToolManager = (
+            tool_manager if tool_manager is not None else ToolManager(tools or [])
+        )
+        """Used to manage all tools, Only create a new ToolManager if 'tool_manager' is
+        not provided."""
+        self.system_prompt_template: StringTemplate = REACT_SYSTEM_PROMPT_TEMPLATE
+        """Preset system prompt template."""
+        self.prefix_prompt_template: StringTemplate = prefix_prompt_template
+        """Prefix system prompt template."""
+        self.conversation_prompt: str = ""
+        """Store all conversation message when conversation. ToolAgent use dynamic
+        system prompt."""
+        self.max_iterations: Optional[int] = 15
+        """The maximum number of executions."""
+        self.max_execution_time: Optional[float] = None
+        """The longest running time. """
+        self.enable_role: bool = enable_role
+        self.agent_name: str = agent_name
+        self.agent_identity: str = agent_identity
+        self.agent_goal: str = agent_goal
+        self.agent_constraints: str = agent_constraints
+
+    def get_llm(self) -> BaseLLM:
+        return self.llm
+
+    def _build_system_prompt(self, instruction: str) -> str:
+        """Build the system prompt."""
+        prefix_prompt = (
+            self.prefix_prompt_template.format(
+                agent_identity=self.agent_identity,
+                agent_name=self.agent_name,
+                agent_goal=self.agent_goal,
+                agent_constraints=self.agent_constraints,
+            )
+            if self.enable_role
+            else ""
+        )
+
+        return prefix_prompt + self.system_prompt_template.format(
+            question=instruction,
+            tool_descriptions=self.tool_manager.tool_descriptions,
+        )
+
+    @property
+    def current_date(self) -> str:
+        """Get the current date."""
+        return f"Current date: {time.strftime('%Y-%m-%d %H:%M:%S')}"
+
+    def _run(
+        self, instruction: str, return_raw_data: bool = False, **kwargs
+    ) -> Union[str, ActionResponse]:
+        """Run the tool agent. The tool agent will interact with the LLM and the tool.
+
+        Args:
+            instruction(str): The instruction to the tool agent.
+            return_raw_data(bool): Whether to return raw data. Default is False.
+
+        Returns:
+            The output of the tool agent.
+        """
+        self.conversation_prompt = self._build_system_prompt(instruction)
+        logger.info(f"[pne] ToolAgent system prompt: {self.conversation_prompt}")
+
+        iterations = 0
+        used_time = 0.0
+        start_time = time.time()
+
+        while self._should_continue(iterations, used_time):
+            llm_resp: str = self.llm(
+                instruction=self.conversation_prompt + self.current_date
+            )
+            while llm_resp == "":
+                llm_resp = self.llm(
+                    instruction=self.conversation_prompt + self.current_date
+                )
+
+            action_resp: ActionResponse = self._parse_llm_response(llm_resp)
+            self.conversation_prompt += f"{llm_resp}\n"
+            logger.info(
+                f"[pne] tool agent <{iterations}> current prompt: {self.conversation_prompt}"  # noqa
+            )
+
+            if "finish" in action_resp["action_name"]:
+                if return_raw_data:
+                    return action_resp
+
+                return action_resp["action_parameters"]["content"]
+
+            Hook.call_hook(
+                HookTable.ON_AGENT_ACTION,
+                self,
+                thought=action_resp["analysis"],
+                action=action_resp["action_name"],
+                action_input=action_resp["action_parameters"],
+            )
+
+            tool_result = self.tool_manager.run_tool(
+                action_resp["action_name"], action_resp["action_parameters"]
+            )
+            Hook.call_hook(
+                HookTable.ON_AGENT_OBSERVATION, self, observation=tool_result
+            )
+            self.conversation_prompt += f"Observation: {tool_result}\n"
+
+            iterations += 1
+            used_time += time.time() - start_time
+
+    def _should_continue(self, current_iteration: int, current_time_elapsed) -> bool:
+        """Determine whether to stop, both timeout and exceeding the maximum number of
+        iterations will stop.
+
+        Args:
+            current_iteration: current iteration times.
+            current_time_elapsed: current running time.
+
+        Returns:
+            Whether to stop.
+        """
+        if self.max_iterations and current_iteration >= self.max_iterations:
+            return False
+        if self.max_execution_time and current_time_elapsed >= self.max_execution_time:
+            return False
+        return True
+
+    def _parse_llm_response(self, llm_resp: str) -> ActionResponse:
+        """Parse next instruction of LLM output.
+
+        Args:
+            llm_resp(str): output of LLM
+
+        Returns:
+            Return a tuple, (thought,action,action input)
+            action(str): tool name
+            action_input(dict | str): tool parameters
+        """
+        llm_resp: str = (
+            llm_resp.replace("```json", "").replace("```JSON", "").replace("```", "")
+        )
+        data: dict = json.loads(llm_resp)
+
+        return ActionResponse(
+            analysis=data["analysis"],
+            action_name=data["action"]["name"],
+            action_parameters=data["action"]["args"],
+        )
diff --git a/promptulate/agents/tool_agent/prompt.py b/promptulate/agents/tool_agent/prompt.py
index 6acb130d..c0f3fd92 100644
--- a/promptulate/agents/tool_agent/prompt.py
+++ b/promptulate/agents/tool_agent/prompt.py
@@ -1,99 +1,100 @@
-from promptulate.utils.string_template import StringTemplate
-
-SYSTEM_PROMPT_TEMPLATE = StringTemplate(
-    template_format="jinja2",
-    template="""As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
-
-## Tools
-You have access to the following tools, the tools information is provided by the following schema:
-{{tool_descriptions}}
-
-## Task
-Currently, you are working on the following task:
-{{task}}
-
-To achieve your goals, you need to choose the appropriate tools for reasoning.
-For example: If the user wants to check the weather in Beijing tomorrow. The first step is to use websearch to query the weather in Beijing. After obtaining the results, in the second step, you can use the finish command to return the results.
-
-## Constraints
-- Choose only ONE tool in one step.
-- Choose tool carefully as it is critical to accomplish the task.
-- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
-
-{{current_process}}
-
-{{output_format}}
-""",  # noqa: E501
-)
-
-REACT_SYSTEM_PROMPT_TEMPLATE = StringTemplate(
-    template_format="jinja2",
-    template="""
-As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
-
-## Tools
-You have access to the following tools, the tools information is provided by the following schema:
-{{tool_descriptions}}
-
-## Output Format
-To answer the question, Use the following JSON format. JSON only, no explanation. Otherwise, you will be punished.
-The output should be formatted as a JSON instance that conforms to the format below. JSON only, no explanation.
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": # the action to take, must be one of provided tools
-    {
-    "name": "tool name",
-    "args": "tool input parameters, json type data"
-    }
-}
-```
-
-If this format is used, the user will respond in the following format:
-
-```
-Observation: tool response
-```
-
-You should keep repeating the above format until you have enough information
-to answer the question without using any more tools. At that point, you MUST respond
-in the one of the following two formats:
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": {
-    "name": "finish",
-    "args": {"content": "You answer here."}
-    }
-}
-```
-
-```json
-{
-"thought": "The thought of what to do and why.",
-"self_criticism":"Constructive self-criticism of the thought",
-"action": {
-    "name": "finish",
-    "args": {"content": "Sorry, I cannot answer your query, because (Summary all the upper steps, and explain)"}
-    }
-}
-```
-
-## Attention
-- Your output is JSON only and no explanation.
-- Choose only ONE tool and you can't do without using any tools in one step.
-- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
-- Whether the action input is JSON or str depends on the definition of the tool.
-
-## User question
-{{question}}
-
-## Current Conversation
-Below is the current conversation consisting of interleaving human and assistant history.
-""",  # noqa: E501
-)
-PREFIX_TEMPLATE = """You are a {agent_identity}, named {agent_name}, your goal is {agent_goal}, and the constraint is {agent_constraints}. """  # noqa
+from promptulate.utils.string_template import StringTemplate
+
+SYSTEM_PROMPT_TEMPLATE = StringTemplate(
+    template_format="jinja2",
+    template="""As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
+
+## Tools
+You have access to the following tools, the tools information is provided by the following schema:
+{{tool_descriptions}}
+
+## Task
+Currently, you are working on the following task:
+{{task}}
+
+To achieve your goals, you need to choose the appropriate tools for reasoning.
+For example: If the user wants to check the weather in Beijing tomorrow. The first step is to use websearch to query the weather in Beijing. After obtaining the results, in the second step, you can use the finish command to return the results.
+
+## Constraints
+- Choose only ONE tool in one step.
+- Choose tool carefully as it is critical to accomplish the task.
+- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
+
+{{current_process}}
+
+{{output_format}}
+""",  # noqa: E501
+)
+
+REACT_SYSTEM_PROMPT_TEMPLATE = StringTemplate(
+    template_format="jinja2",
+    template="""
+As a diligent Task Agent, you goal is to effectively accomplish the provided task or question as best as you can.
+
+## Tools
+You have access to the following tools, the tools information is provided by the following schema:
+{{tool_descriptions}}
+
+## Output Format
+To answer the question, Use the following JSON format. JSON only, no explanation. Otherwise, you will be punished.
+The output should be formatted as a JSON instance that conforms to the format below. JSON only, no explanation.
+
+```json
+{
+"analysis": "The thought of what to do and why.",
+"action": # the action to take, must be one of provided tools
+    {
+    "name": "tool name",
+    "args": "tool input parameters, json type data"
+    }
+}
+```
+
+If this format is used, the user will respond in the following format:
+
+```
+Observation: tool response
+```
+
+You should keep repeating the above format until you have enough information
+to answer the question without using any more tools. At that point, you MUST respond
+in the one of the following two formats:
+
+- If you can answer the question:
+
+```json
+{
+"analysis": "The thought of what to do and why.",
+"action": {
+    "name": "finish",
+    "args": {"content": "You answer here."}
+    }
+}
+```
+
+- If you cannot answer the question in the current context:
+
+```json
+{
+"thought": "The thought of what to do and why.",
+"action": {
+    "name": "finish",
+    "args": {"content": "Sorry, I cannot answer your query, because (Summary all the upper steps, and explain)"}
+    }
+}
+```
+
+## Attention
+- Your output is JSON only and no explanation.
+- Choose only ONE tool and you can't do without using any tools in one step.
+- Your final answer output language should be consistent with the language used by the user. Middle step output is English.
+- Whether the action input is JSON or str depends on the definition of the tool.
+
+## User question
+{{question}}
+
+## Current Conversation
+Below is the current conversation consisting of interleaving human and assistant history.
+""",  # noqa: E501
+)
+PREFIX_TEMPLATE = """You are a {agent_identity}, named {agent_name}, your goal is {agent_goal}, and the constraint is {agent_constraints}. """  # noqa
diff --git a/promptulate/beta/__init__.py b/promptulate/beta/__init__.py
index e69de29b..2786e4c7 100644
--- a/promptulate/beta/__init__.py
+++ b/promptulate/beta/__init__.py
@@ -0,0 +1,3 @@
+from promptulate.beta import agents, rag
+
+__all__ = ["agents", "rag"]
diff --git a/promptulate/beta/agents/assistant_agent/agent.py b/promptulate/beta/agents/assistant_agent/agent.py
index 75c26933..3c750c6c 100644
--- a/promptulate/beta/agents/assistant_agent/agent.py
+++ b/promptulate/beta/agents/assistant_agent/agent.py
@@ -11,7 +11,7 @@
 from promptulate.beta.agents.assistant_agent.schema import Plan
 from promptulate.hook import Hook, HookTable
 from promptulate.llms.base import BaseLLM
-from promptulate.schema import ToolTypes
+from promptulate.tools.base import ToolTypes
 from promptulate.tools.manager import ToolManager
 from promptulate.utils.logger import logger
 
diff --git a/promptulate/chat.py b/promptulate/chat.py
index e763bd93..b96cc974 100644
--- a/promptulate/chat.py
+++ b/promptulate/chat.py
@@ -1,274 +1,273 @@
-import json
-from typing import Dict, List, Optional, TypeVar, Union
-
-import litellm
-
-from promptulate.agents.base import BaseAgent
-from promptulate.agents.tool_agent.agent import ToolAgent
-from promptulate.beta.agents.assistant_agent import AssistantAgent
-from promptulate.llms import BaseLLM
-from promptulate.output_formatter import formatting_result, get_formatted_instructions
-from promptulate.pydantic_v1 import BaseModel
-from promptulate.schema import (
-    AssistantMessage,
-    BaseMessage,
-    MessageSet,
-    StreamIterator,
-    ToolTypes,
-)
-from promptulate.tools.base import BaseTool
-from promptulate.utils.logger import logger
-
-T = TypeVar("T", bound=BaseModel)
-
-
-def parse_content(chunk) -> (str, str):
-    """Parse the litellm chunk.
-    Args:
-        chunk: litellm chunk.
-
-    Returns:
-        content: The content of the chunk.
-        ret_data: The additional data of the chunk.
-    """
-    content = chunk.choices[0].delta.content
-    ret_data = json.loads(chunk.json())
-    return content, ret_data
-
-
-class _LiteLLM(BaseLLM):
-    def __init__(
-        self, model: str, model_config: Optional[dict] = None, *args, **kwargs
-    ):
-        logger.info(f"[pne chat] init LiteLLM, model: {model} config: {model_config}")
-        super().__init__(*args, **kwargs)
-        self._model: str = model
-        self._model_config: dict = model_config or {}
-
-    def _predict(
-        self, messages: MessageSet, stream: bool = False, *args, **kwargs
-    ) -> Union[AssistantMessage, StreamIterator]:
-        logger.info(f"[pne chat] prompts: {messages.string_messages}")
-        temp_response = litellm.completion(
-            model=self._model, messages=messages.listdict_messages, **self._model_config
-        )
-
-        if stream:
-            return StreamIterator(
-                response_stream=temp_response,
-                parse_content=parse_content,
-                return_raw_response=False,
-            )
-
-        response = AssistantMessage(
-            content=temp_response.choices[0].message.content,
-            additional_kwargs=temp_response.json()
-            if isinstance(temp_response.json(), dict)
-            else json.loads(temp_response.json()),
-        )
-        logger.debug(
-            f"[pne chat] response: {json.dumps(response.additional_kwargs, indent=2)}"
-        )
-        return response
-
-    def __call__(self, instruction: str, *args, **kwargs) -> str:
-        return self._predict(
-            MessageSet.from_listdict_data(
-                [
-                    {"content": "You are a helpful assistant.", "role": "system"},
-                    {"content": instruction, "role": "user"},
-                ]
-            )
-        ).content
-
-
-def _convert_message(messages: Union[List, MessageSet, str]) -> MessageSet:
-    """Convert str or List[Dict] to MessageSet.
-
-    Args:
-        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
-            API type data(List[Dict]) or MessageSet type.
-
-    Returns:
-        Return MessageSet type data.
-    """
-    if isinstance(messages, str):
-        messages: List[Dict] = [
-            {"content": "You are a helpful assistant", "role": "system"},
-            {"content": messages, "role": "user"},
-        ]
-    if isinstance(messages, list):
-        messages: MessageSet = MessageSet.from_listdict_data(messages)
-
-    return messages
-
-
-def _get_llm(
-    model: str = "gpt-3.5-turbo",
-    model_config: Optional[dict] = None,
-    custom_llm: Optional[BaseLLM] = None,
-) -> BaseLLM:
-    """Get LLM instance.
-
-    Args:
-        model(str): LLM model.
-        model_config(dict): LLM model config.
-        custom_llm(BaseLLM): custom LLM instance.
-
-    Returns:
-        Return LLM instance.
-    """
-    if custom_llm:
-        return custom_llm
-
-    return _LiteLLM(model=model, model_config=model_config)
-
-
-class AIChat:
-    def __init__(
-        self,
-        model: str = "gpt-3.5-turbo",
-        model_config: Optional[dict] = None,
-        tools: Optional[List[ToolTypes]] = None,
-        custom_llm: Optional[BaseLLM] = None,
-        enable_plan: bool = False,
-    ):
-        """Initialize the AIChat.
-
-        Args:
-            model(str): LLM model name, eg: "gpt-3.5-turbo".
-            model_config(Optional[dict]): LLM model config.
-            tools(Optional[List[ToolTypes]]): specified tools for llm, if exists, AIChat
-                will use Agent to run.
-            custom_llm(Optional[BaseLLM]): custom LLM instance.
-            enable_plan(bool): use Agent with plan ability if True.
-        """
-        self.llm: BaseLLM = _get_llm(model, model_config, custom_llm)
-        self.tools: Optional[List[ToolTypes]] = tools
-        self.agent: Optional[BaseAgent] = None
-
-        if tools:
-            if enable_plan:
-                self.agent = AssistantAgent(tools=self.tools, llm=self.llm)
-                logger.info("[pne chat] invoke AssistantAgent with plan ability.")
-            else:
-                self.agent = ToolAgent(tools=self.tools, llm=self.llm)
-                logger.info("[pne chat] invoke ToolAgent.")
-
-    def run(
-        self,
-        messages: Union[List, MessageSet, str],
-        output_schema: Optional[type(BaseModel)] = None,
-        examples: Optional[List[BaseModel]] = None,
-        return_raw_response: bool = False,
-        stream: bool = False,
-        **kwargs,
-    ) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
-        """Run the AIChat.
-
-        Args:
-            messages(Union[List, MessageSet, str]): chat messages. It can be str or
-                OpenAI.
-            API type data(List[Dict]) or MessageSet type.
-            output_schema(BaseModel): specified return type. See detail on in
-                OutputFormatter module.
-            examples(List[BaseModel]): examples for output_schema. See detail
-                on: OutputFormatter.
-            return_raw_response(bool): return OpenAI completion result if true,
-                otherwise return string type data.
-            stream(bool): return stream iterator if True.
-
-        Returns:
-            Return string normally, it means enable_original_return is default False. if
-                tools is provided, agent return string type data.
-            Return BaseMessage if enable_original_return is True and not in agent mode.
-            Return List[BaseMessage] if stream is True.
-            Return T if output_schema is provided.
-        """
-        if stream and (output_schema or self.tools):
-            raise ValueError(
-                "stream, tools and output_schema can't be True at the same time, "
-                "because stream is used to return Iterator[BaseMessage]."
-            )
-
-        if self.agent:
-            return self.agent.run(messages, output_schema=output_schema)
-
-        messages: MessageSet = _convert_message(messages)
-
-        # add output format into the last prompt if provide
-        if output_schema:
-            instruction: str = get_formatted_instructions(
-                json_schema=output_schema, examples=examples
-            )
-            messages.messages[-1].content += f"\n{instruction}"
-
-        logger.info(f"[pne chat] messages: {messages}")
-
-        response: AssistantMessage = self.llm.predict(messages, stream=stream, **kwargs)
-
-        logger.info(f"[pne chat] response: {response.additional_kwargs}")
-
-        # return output format if provide
-        if output_schema:
-            logger.info("[pne chat] return formatted response.")
-            return formatting_result(
-                pydantic_obj=output_schema, llm_output=response.content
-            )
-
-        return response if return_raw_response else response.content
-
-
-def chat(
-    messages: Union[List, MessageSet, str],
-    *,
-    model: str = "gpt-3.5-turbo",
-    model_config: Optional[dict] = None,
-    tools: Optional[List[ToolTypes]] = None,
-    output_schema: Optional[type(BaseModel)] = None,
-    examples: Optional[List[BaseModel]] = None,
-    return_raw_response: bool = False,
-    custom_llm: Optional[BaseLLM] = None,
-    enable_plan: bool = False,
-    stream: bool = False,
-    **kwargs,
-) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
-    """A universal chat method, you can chat any model like OpenAI completion.
-    It should be noted that chat() is only support chat model currently.
-
-    Args:
-        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
-            API type data(List[Dict]) or MessageSet type.
-        model(str): LLM model. Currently only support chat model.
-        model_config(Optional[dict]): LLM model config.
-        tools(List[BaseTool] | None): specified tools for llm.
-        output_schema(BaseModel): specified return type. See detail on: OutputFormatter.
-        examples(List[BaseModel]): examples for output_schema. See detail
-            on: OutputFormatter.
-        return_raw_response(bool): return OpenAI completion result if true, otherwise
-            return string type data.
-        custom_llm(BaseLLM): You can use custom LLM if you have.
-        enable_plan(bool): use Agent with plan ability if True.
-        stream(bool): return stream iterator if True.
-        **kwargs: litellm kwargs
-
-    Returns:
-        Return string normally, it means enable_original_return is default False.
-        Return BaseMessage if enable_original_return is True.
-        Return List[BaseMessage] if stream is True.
-        Return T if output_schema is provided.
-    """
-    return AIChat(
-        model=model,
-        model_config=model_config,
-        tools=tools,
-        custom_llm=custom_llm,
-        enable_plan=enable_plan,
-    ).run(
-        messages=messages,
-        output_schema=output_schema,
-        examples=examples,
-        return_raw_response=return_raw_response,
-        stream=stream,
-        **kwargs,
-    )
+import json
+from typing import Dict, List, Optional, TypeVar, Union
+
+import litellm
+
+from promptulate.agents.base import BaseAgent
+from promptulate.agents.tool_agent.agent import ToolAgent
+from promptulate.beta.agents.assistant_agent import AssistantAgent
+from promptulate.llms import BaseLLM
+from promptulate.output_formatter import formatting_result, get_formatted_instructions
+from promptulate.pydantic_v1 import BaseModel
+from promptulate.schema import (
+    AssistantMessage,
+    BaseMessage,
+    MessageSet,
+    StreamIterator,
+)
+from promptulate.tools.base import BaseTool, ToolTypes
+from promptulate.utils.logger import logger
+
+T = TypeVar("T", bound=BaseModel)
+
+
+def parse_content(chunk) -> (str, str):
+    """Parse the litellm chunk.
+    Args:
+        chunk: litellm chunk.
+
+    Returns:
+        content: The content of the chunk.
+        ret_data: The additional data of the chunk.
+    """
+    content = chunk.choices[0].delta.content
+    ret_data = json.loads(chunk.json())
+    return content, ret_data
+
+
+class _LiteLLM(BaseLLM):
+    def __init__(
+        self, model: str, model_config: Optional[dict] = None, *args, **kwargs
+    ):
+        logger.info(f"[pne chat] init LiteLLM, model: {model} config: {model_config}")
+        super().__init__(*args, **kwargs)
+        self._model: str = model
+        self._model_config: dict = model_config or {}
+
+    def _predict(
+        self, messages: MessageSet, stream: bool = False, *args, **kwargs
+    ) -> Union[AssistantMessage, StreamIterator]:
+        logger.info(f"[pne chat] prompts: {messages.string_messages}")
+        temp_response = litellm.completion(
+            model=self._model, messages=messages.listdict_messages, **self._model_config
+        )
+
+        if stream:
+            return StreamIterator(
+                response_stream=temp_response,
+                parse_content=parse_content,
+                return_raw_response=False,
+            )
+
+        response = AssistantMessage(
+            content=temp_response.choices[0].message.content,
+            additional_kwargs=temp_response.json()
+            if isinstance(temp_response.json(), dict)
+            else json.loads(temp_response.json()),
+        )
+        logger.debug(
+            f"[pne chat] response: {json.dumps(response.additional_kwargs, indent=2)}"
+        )
+        return response
+
+    def __call__(self, instruction: str, *args, **kwargs) -> str:
+        return self._predict(
+            MessageSet.from_listdict_data(
+                [
+                    {"content": "You are a helpful assistant.", "role": "system"},
+                    {"content": instruction, "role": "user"},
+                ]
+            )
+        ).content
+
+
+def _convert_message(messages: Union[List, MessageSet, str]) -> MessageSet:
+    """Convert str or List[Dict] to MessageSet.
+
+    Args:
+        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
+            API type data(List[Dict]) or MessageSet type.
+
+    Returns:
+        Return MessageSet type data.
+    """
+    if isinstance(messages, str):
+        messages: List[Dict] = [
+            {"content": "You are a helpful assistant", "role": "system"},
+            {"content": messages, "role": "user"},
+        ]
+    if isinstance(messages, list):
+        messages: MessageSet = MessageSet.from_listdict_data(messages)
+
+    return messages
+
+
+def _get_llm(
+    model: str = "gpt-3.5-turbo",
+    model_config: Optional[dict] = None,
+    custom_llm: Optional[BaseLLM] = None,
+) -> BaseLLM:
+    """Get LLM instance.
+
+    Args:
+        model(str): LLM model.
+        model_config(dict): LLM model config.
+        custom_llm(BaseLLM): custom LLM instance.
+
+    Returns:
+        Return LLM instance.
+    """
+    if custom_llm:
+        return custom_llm
+
+    return _LiteLLM(model=model, model_config=model_config)
+
+
+class AIChat:
+    def __init__(
+        self,
+        model: str = "gpt-3.5-turbo",
+        model_config: Optional[dict] = None,
+        tools: Optional[List[ToolTypes]] = None,
+        custom_llm: Optional[BaseLLM] = None,
+        enable_plan: bool = False,
+    ):
+        """Initialize the AIChat.
+
+        Args:
+            model(str): LLM model name, eg: "gpt-3.5-turbo".
+            model_config(Optional[dict]): LLM model config.
+            tools(Optional[List[ToolTypes]]): specified tools for llm, if exists, AIChat
+                will use Agent to run.
+            custom_llm(Optional[BaseLLM]): custom LLM instance.
+            enable_plan(bool): use Agent with plan ability if True.
+        """
+        self.llm: BaseLLM = _get_llm(model, model_config, custom_llm)
+        self.tools: Optional[List[ToolTypes]] = tools
+        self.agent: Optional[BaseAgent] = None
+
+        if tools:
+            if enable_plan:
+                self.agent = AssistantAgent(tools=self.tools, llm=self.llm)
+                logger.info("[pne chat] invoke AssistantAgent with plan ability.")
+            else:
+                self.agent = ToolAgent(tools=self.tools, llm=self.llm)
+                logger.info("[pne chat] invoke ToolAgent.")
+
+    def run(
+        self,
+        messages: Union[List, MessageSet, str],
+        output_schema: Optional[type(BaseModel)] = None,
+        examples: Optional[List[BaseModel]] = None,
+        return_raw_response: bool = False,
+        stream: bool = False,
+        **kwargs,
+    ) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
+        """Run the AIChat.
+
+        Args:
+            messages(Union[List, MessageSet, str]): chat messages. It can be str or
+                OpenAI.
+            API type data(List[Dict]) or MessageSet type.
+            output_schema(BaseModel): specified return type. See detail on in
+                OutputFormatter module.
+            examples(List[BaseModel]): examples for output_schema. See detail
+                on: OutputFormatter.
+            return_raw_response(bool): return OpenAI completion result if true,
+                otherwise return string type data.
+            stream(bool): return stream iterator if True.
+
+        Returns:
+            Return string normally, it means enable_original_return is default False. if
+                tools is provided, agent return string type data.
+            Return BaseMessage if enable_original_return is True and not in agent mode.
+            Return List[BaseMessage] if stream is True.
+            Return T if output_schema is provided.
+        """
+        if stream and (output_schema or self.tools):
+            raise ValueError(
+                "stream, tools and output_schema can't be True at the same time, "
+                "because stream is used to return Iterator[BaseMessage]."
+            )
+
+        if self.agent:
+            return self.agent.run(messages, output_schema=output_schema)
+
+        messages: MessageSet = _convert_message(messages)
+
+        # add output format into the last prompt if provide
+        if output_schema:
+            instruction: str = get_formatted_instructions(
+                json_schema=output_schema, examples=examples
+            )
+            messages.messages[-1].content += f"\n{instruction}"
+
+        logger.info(f"[pne chat] messages: {messages}")
+
+        response: AssistantMessage = self.llm.predict(messages, stream=stream, **kwargs)
+
+        logger.info(f"[pne chat] response: {response.additional_kwargs}")
+
+        # return output format if provide
+        if output_schema:
+            logger.info("[pne chat] return formatted response.")
+            return formatting_result(
+                pydantic_obj=output_schema, llm_output=response.content
+            )
+
+        return response if return_raw_response else response.content
+
+
+def chat(
+    messages: Union[List, MessageSet, str],
+    *,
+    model: str = "gpt-3.5-turbo",
+    model_config: Optional[dict] = None,
+    tools: Optional[List[ToolTypes]] = None,
+    output_schema: Optional[type(BaseModel)] = None,
+    examples: Optional[List[BaseModel]] = None,
+    return_raw_response: bool = False,
+    custom_llm: Optional[BaseLLM] = None,
+    enable_plan: bool = False,
+    stream: bool = False,
+    **kwargs,
+) -> Union[str, BaseMessage, T, List[BaseMessage], StreamIterator]:
+    """A universal chat method, you can chat any model like OpenAI completion.
+    It should be noted that chat() is only support chat model currently.
+
+    Args:
+        messages(Union[List, MessageSet, str]): chat messages. It can be str or OpenAI
+            API type data(List[Dict]) or MessageSet type.
+        model(str): LLM model. Currently only support chat model.
+        model_config(Optional[dict]): LLM model config.
+        tools(List[BaseTool] | None): specified tools for llm.
+        output_schema(BaseModel): specified return type. See detail on: OutputFormatter.
+        examples(List[BaseModel]): examples for output_schema. See detail
+            on: OutputFormatter.
+        return_raw_response(bool): return OpenAI completion result if true, otherwise
+            return string type data.
+        custom_llm(BaseLLM): You can use custom LLM if you have.
+        enable_plan(bool): use Agent with plan ability if True.
+        stream(bool): return stream iterator if True.
+        **kwargs: litellm kwargs
+
+    Returns:
+        Return string normally, it means enable_original_return is default False.
+        Return BaseMessage if enable_original_return is True.
+        Return List[BaseMessage] if stream is True.
+        Return T if output_schema is provided.
+    """
+    return AIChat(
+        model=model,
+        model_config=model_config,
+        tools=tools,
+        custom_llm=custom_llm,
+        enable_plan=enable_plan,
+    ).run(
+        messages=messages,
+        output_schema=output_schema,
+        examples=examples,
+        return_raw_response=return_raw_response,
+        stream=stream,
+        **kwargs,
+    )
diff --git a/promptulate/schema.py b/promptulate/schema.py
index 38a7f817..cad3ee40 100644
--- a/promptulate/schema.py
+++ b/promptulate/schema.py
@@ -1,302 +1,295 @@
-from abc import abstractmethod
-from enum import Enum
-from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union
-
-from promptulate.pydantic_v1 import BaseModel, Field
-
-__all__ = [
-    "LLMType",
-    "BaseMessage",
-    "CompletionMessage",
-    "SystemMessage",
-    "UserMessage",
-    "AssistantMessage",
-    "MessageSet",
-    "init_chat_message_history",
-    "ToolTypes",
-    "StreamIterator",
-]
-
-if TYPE_CHECKING:
-    from langchain.tools.base import BaseTool as LangchainBaseToolType  # noqa
-    from promptulate.tools.base import BaseTool, Tool  # noqa
-
-ToolTypes = Union["BaseTool", "Tool", Callable, "LangchainBaseToolType"]
-
-
-class BaseMessage(BaseModel):
-    """Message basic object."""
-
-    content: str
-    additional_kwargs: dict = Field(default_factory=dict)
-
-    @property
-    @abstractmethod
-    def type(self) -> str:
-        """Type of the message, used for serialization."""
-
-
-class StreamIterator:
-    """
-    This class is an iterator for the response stream from the LLM model.
-
-    Attributes:
-        response_stream: The stream of responses from the LLM model.
-        parse_content: The callback function to parse the chunk.
-        return_raw_response: A boolean indicating whether to return the raw response
-        or not.
-    """
-
-    def __init__(
-        self,
-        response_stream,
-        parse_content: callable([[Any], [str, str]]),
-        return_raw_response: bool = False,
-    ):
-        """
-        The constructor for BaseStreamIterator class.
-
-        Parameters:
-            response_stream: The stream of responses from the LLM model.
-            return_raw_response (bool): A flag indicating whether to return the raw
-            response or not.
-        """
-        self.response_stream = response_stream
-        self.return_raw_response = return_raw_response
-        self.parse_content = parse_content
-
-    def __iter__(self) -> Union[Iterator[BaseMessage], Iterator[str]]:
-        """
-        The iterator method for the BaseStreamIterator class.
-
-        Returns:
-            self: An instance of the BaseStreamIterator class.
-        """
-        return self
-
-    def parse_chunk(self, chunk) -> Optional[Union[str, BaseMessage]]:
-        """
-        This method is used to parse a chunk from the response stream. It returns
-        None if the chunk is empty, otherwise it returns the parsed chunk.
-
-        Parameters:
-            chunk: The chunk to be parsed.
-
-        Returns:
-            Optional: The parsed chunk or None if the chunk is empty.
-        """
-        content, ret_data = self.parse_content(chunk)
-        if content is None:
-            return None
-        if self.return_raw_response:
-            additional_kwargs: dict = ret_data
-            message = AssistantMessage(
-                content=content,
-                additional_kwargs=additional_kwargs,
-            )
-            return message
-
-        return content
-
-    def __next__(self) -> Union[str, BaseMessage]:
-        """
-        The next method for the BaseStreamIterator class.
-
-        This method is used to get the next response from the LLM model. It iterates
-        over the response stream and parses each chunk using the parse_chunk method.
-        If the parsed chunk is not None, it returns the parsed chunk as the next
-        response. If there are no more messages in the response stream, it raises a
-        StopIteration exception.
-
-        Returns:
-            Union[str, BaseMessage]: The next response from the LLM model. If
-            return_raw_response is True, it returns an AssistantMessage instance,
-            otherwise it returns the content of the response as a string.
-        """
-        for chunk in self.response_stream:
-            message = self.parse_chunk(chunk)
-            if message is not None:
-                return message
-
-        # If there are no more messages, stop the iteration
-        raise StopIteration
-
-
-class CompletionMessage(BaseMessage):
-    """Type of completion message. Used in OpenAI currently"""
-
-    @property
-    def type(self) -> str:
-        return "completion"
-
-
-class SystemMessage(BaseMessage):
-    """Type of message that is a system message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        """Type of the message, used for serialization."""
-        return "system"
-
-
-class UserMessage(BaseMessage):
-    """Type of message that is a user message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        return "user"
-
-
-class AssistantMessage(BaseMessage):
-    """Type of message that is an assistant message. Currently used in OpenAI."""
-
-    @property
-    def type(self) -> str:
-        return "assistant"
-
-
-MESSAGE_TYPE = {
-    "completion": CompletionMessage,
-    "system": SystemMessage,
-    "user": UserMessage,
-    "assistant": AssistantMessage,
-}
-
-
-class LLMType(str, Enum):
-    """All LLM type here"""
-
-    OpenAI = "OpenAI"
-    ChatOpenAI = "ChatOpenAI"
-    ErnieBot = "ErnieBot"
-    QianFan = "QianFan"
-    ZhiPu = "ZhiPu"
-
-
-class MessageSet:
-    """MessageSet can be used in Memory, LLMs, Framework and some else.
-    It's a universal chat message format in promptulate.
-    """
-
-    def __init__(
-        self,
-        messages: List[BaseMessage],
-        conversation_id: Optional[str] = None,
-        additional_kwargs: Optional[dict] = None,
-    ):
-        self.messages: List[BaseMessage] = messages
-        self.conversation_id: Optional[str] = conversation_id
-        self.additional_kwargs: dict = additional_kwargs or {}
-
-    @classmethod
-    def from_listdict_data(
-        cls, value: List[Dict], additional_kwargs: Optional[dict] = None
-    ) -> "MessageSet":
-        """initialize MessageSet from a List[Dict] data
-
-        Args:
-            value(List[Dict]): the example is as follows:
-                [
-                    {"type": "user", "content": "This is a message1."},
-                    {"type": "assistant", "content": "This is a message2."}
-                ]
-            additional_kwargs(Optional[dict]): additional kwargs
-
-        Returns:
-            initialized MessageSet
-        """
-        messages: List[BaseMessage] = [
-            MESSAGE_TYPE[item["role"]](content=item["content"]) for item in value
-        ]
-        return cls(messages=messages, additional_kwargs=additional_kwargs)
-
-    @property
-    def listdict_messages(self) -> List[Dict]:
-        converted_messages = []
-        for message in self.messages:
-            converted_messages.append(
-                {"role": message.type, "content": message.content}
-            )
-        return converted_messages
-
-    @property
-    def memory_messages(self) -> List[Dict]:
-        return self.listdict_messages
-
-    def to_llm_prompt(self, llm_type: LLMType) -> Any:
-        """Convert the MessageSet messages to specified llm prompt"""
-        if not llm_type:
-            ValueError(
-                "Missing llm_type, llm_type is needed if you want to use llm_prompt."
-            )
-        return _to_llm_prompt[llm_type](self)
-
-    @property
-    def string_messages(self) -> str:
-        """Convert the message to a string type, it can be used as a prompt for OpenAI
-        completion."""
-        string_result = ""
-        for message in self.messages:
-            string_result += f"{message.content}\n"
-        return string_result
-
-    def add_message(self, message: BaseMessage) -> None:
-        self.messages.append(message)
-
-    def add_completion_message(self, message: str) -> None:
-        self.messages.append(CompletionMessage(content=message))
-
-    def add_system_message(self, message: str) -> None:
-        self.messages.append(SystemMessage(content=message))
-
-    def add_user_message(self, message: str) -> None:
-        self.messages.append(UserMessage(content=message))
-
-    def add_ai_message(self, message: str) -> None:
-        self.messages.append(AssistantMessage(content=message))
-
-
-def init_chat_message_history(
-    system_content: str, user_content: str, llm: LLMType
-) -> MessageSet:
-    if llm == llm.ChatOpenAI or llm == llm.OpenAI:
-        messages = [
-            SystemMessage(content=system_content),
-            UserMessage(content=user_content),
-        ]
-    else:
-        messages = [
-            UserMessage(content=system_content),
-            AssistantMessage(content="好的"),
-            UserMessage(content=user_content),
-        ]
-    return MessageSet(messages=messages)
-
-
-def _to_openai_llm_prompt(message_set: MessageSet) -> str:
-    return message_set.string_messages
-
-
-def _to_chat_openai_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_ernie_bot_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_qian_fan_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-def _to_zhipu_llm_prompt(message_set: MessageSet) -> List[Dict]:
-    return message_set.listdict_messages
-
-
-_to_llm_prompt: Dict[LLMType, Callable] = {
-    LLMType.OpenAI: _to_openai_llm_prompt,
-    LLMType.ChatOpenAI: _to_chat_openai_llm_prompt,
-    LLMType.ErnieBot: _to_ernie_bot_llm_prompt,
-    LLMType.QianFan: _to_qian_fan_llm_prompt,
-    LLMType.ZhiPu: _to_zhipu_llm_prompt,
-}
+from abc import abstractmethod
+from enum import Enum
+from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union
+
+from promptulate.pydantic_v1 import BaseModel, Field
+
+__all__ = [
+    "LLMType",
+    "BaseMessage",
+    "CompletionMessage",
+    "SystemMessage",
+    "UserMessage",
+    "AssistantMessage",
+    "MessageSet",
+    "init_chat_message_history",
+    "StreamIterator",
+]
+
+
+class BaseMessage(BaseModel):
+    """Message basic object."""
+
+    content: str
+    additional_kwargs: dict = Field(default_factory=dict)
+
+    @property
+    @abstractmethod
+    def type(self) -> str:
+        """Type of the message, used for serialization."""
+
+
+class StreamIterator:
+    """
+    This class is an iterator for the response stream from the LLM model.
+
+    Attributes:
+        response_stream: The stream of responses from the LLM model.
+        parse_content: The callback function to parse the chunk.
+        return_raw_response: A boolean indicating whether to return the raw response
+        or not.
+    """
+
+    def __init__(
+        self,
+        response_stream,
+        parse_content: callable([[Any], [str, str]]),
+        return_raw_response: bool = False,
+    ):
+        """
+        The constructor for BaseStreamIterator class.
+
+        Parameters:
+            response_stream: The stream of responses from the LLM model.
+            return_raw_response (bool): A flag indicating whether to return the raw
+            response or not.
+        """
+        self.response_stream = response_stream
+        self.return_raw_response = return_raw_response
+        self.parse_content = parse_content
+
+    def __iter__(self) -> Union[Iterator[BaseMessage], Iterator[str]]:
+        """
+        The iterator method for the BaseStreamIterator class.
+
+        Returns:
+            self: An instance of the BaseStreamIterator class.
+        """
+        return self
+
+    def parse_chunk(self, chunk) -> Optional[Union[str, BaseMessage]]:
+        """
+        This method is used to parse a chunk from the response stream. It returns
+        None if the chunk is empty, otherwise it returns the parsed chunk.
+
+        Parameters:
+            chunk: The chunk to be parsed.
+
+        Returns:
+            Optional: The parsed chunk or None if the chunk is empty.
+        """
+        content, ret_data = self.parse_content(chunk)
+        if content is None:
+            return None
+        if self.return_raw_response:
+            additional_kwargs: dict = ret_data
+            message = AssistantMessage(
+                content=content,
+                additional_kwargs=additional_kwargs,
+            )
+            return message
+
+        return content
+
+    def __next__(self) -> Union[str, BaseMessage]:
+        """
+        The next method for the BaseStreamIterator class.
+
+        This method is used to get the next response from the LLM model. It iterates
+        over the response stream and parses each chunk using the parse_chunk method.
+        If the parsed chunk is not None, it returns the parsed chunk as the next
+        response. If there are no more messages in the response stream, it raises a
+        StopIteration exception.
+
+        Returns:
+            Union[str, BaseMessage]: The next response from the LLM model. If
+            return_raw_response is True, it returns an AssistantMessage instance,
+            otherwise it returns the content of the response as a string.
+        """
+        for chunk in self.response_stream:
+            message = self.parse_chunk(chunk)
+            if message is not None:
+                return message
+
+        # If there are no more messages, stop the iteration
+        raise StopIteration
+
+
+class CompletionMessage(BaseMessage):
+    """Type of completion message. Used in OpenAI currently"""
+
+    @property
+    def type(self) -> str:
+        return "completion"
+
+
+class SystemMessage(BaseMessage):
+    """Type of message that is a system message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        """Type of the message, used for serialization."""
+        return "system"
+
+
+class UserMessage(BaseMessage):
+    """Type of message that is a user message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        return "user"
+
+
+class AssistantMessage(BaseMessage):
+    """Type of message that is an assistant message. Currently used in OpenAI."""
+
+    @property
+    def type(self) -> str:
+        return "assistant"
+
+
+MESSAGE_TYPE = {
+    "completion": CompletionMessage,
+    "system": SystemMessage,
+    "user": UserMessage,
+    "assistant": AssistantMessage,
+}
+
+
+class LLMType(str, Enum):
+    """All LLM type here"""
+
+    OpenAI = "OpenAI"
+    ChatOpenAI = "ChatOpenAI"
+    ErnieBot = "ErnieBot"
+    QianFan = "QianFan"
+    ZhiPu = "ZhiPu"
+
+
+class MessageSet:
+    """MessageSet can be used in Memory, LLMs, Framework and some else.
+    It's a universal chat message format in promptulate.
+    """
+
+    def __init__(
+        self,
+        messages: List[BaseMessage],
+        conversation_id: Optional[str] = None,
+        additional_kwargs: Optional[dict] = None,
+    ):
+        self.messages: List[BaseMessage] = messages
+        self.conversation_id: Optional[str] = conversation_id
+        self.additional_kwargs: dict = additional_kwargs or {}
+
+    @classmethod
+    def from_listdict_data(
+        cls, value: List[Dict], additional_kwargs: Optional[dict] = None
+    ) -> "MessageSet":
+        """initialize MessageSet from a List[Dict] data
+
+        Args:
+            value(List[Dict]): the example is as follows:
+                [
+                    {"type": "user", "content": "This is a message1."},
+                    {"type": "assistant", "content": "This is a message2."}
+                ]
+            additional_kwargs(Optional[dict]): additional kwargs
+
+        Returns:
+            initialized MessageSet
+        """
+        messages: List[BaseMessage] = [
+            MESSAGE_TYPE[item["role"]](content=item["content"]) for item in value
+        ]
+        return cls(messages=messages, additional_kwargs=additional_kwargs)
+
+    @property
+    def listdict_messages(self) -> List[Dict]:
+        converted_messages = []
+        for message in self.messages:
+            converted_messages.append(
+                {"role": message.type, "content": message.content}
+            )
+        return converted_messages
+
+    @property
+    def memory_messages(self) -> List[Dict]:
+        return self.listdict_messages
+
+    def to_llm_prompt(self, llm_type: LLMType) -> Any:
+        """Convert the MessageSet messages to specified llm prompt"""
+        if not llm_type:
+            ValueError(
+                "Missing llm_type, llm_type is needed if you want to use llm_prompt."
+            )
+        return _to_llm_prompt[llm_type](self)
+
+    @property
+    def string_messages(self) -> str:
+        """Convert the message to a string type, it can be used as a prompt for OpenAI
+        completion."""
+        string_result = ""
+        for message in self.messages:
+            string_result += f"{message.content}\n"
+        return string_result
+
+    def add_message(self, message: BaseMessage) -> None:
+        self.messages.append(message)
+
+    def add_completion_message(self, message: str) -> None:
+        self.messages.append(CompletionMessage(content=message))
+
+    def add_system_message(self, message: str) -> None:
+        self.messages.append(SystemMessage(content=message))
+
+    def add_user_message(self, message: str) -> None:
+        self.messages.append(UserMessage(content=message))
+
+    def add_ai_message(self, message: str) -> None:
+        self.messages.append(AssistantMessage(content=message))
+
+
+def init_chat_message_history(
+    system_content: str, user_content: str, llm: LLMType
+) -> MessageSet:
+    if llm == llm.ChatOpenAI or llm == llm.OpenAI:
+        messages = [
+            SystemMessage(content=system_content),
+            UserMessage(content=user_content),
+        ]
+    else:
+        messages = [
+            UserMessage(content=system_content),
+            AssistantMessage(content="好的"),
+            UserMessage(content=user_content),
+        ]
+    return MessageSet(messages=messages)
+
+
+def _to_openai_llm_prompt(message_set: MessageSet) -> str:
+    return message_set.string_messages
+
+
+def _to_chat_openai_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_ernie_bot_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_qian_fan_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+def _to_zhipu_llm_prompt(message_set: MessageSet) -> List[Dict]:
+    return message_set.listdict_messages
+
+
+_to_llm_prompt: Dict[LLMType, Callable] = {
+    LLMType.OpenAI: _to_openai_llm_prompt,
+    LLMType.ChatOpenAI: _to_chat_openai_llm_prompt,
+    LLMType.ErnieBot: _to_ernie_bot_llm_prompt,
+    LLMType.QianFan: _to_qian_fan_llm_prompt,
+    LLMType.ZhiPu: _to_zhipu_llm_prompt,
+}
diff --git a/promptulate/tools/base.py b/promptulate/tools/base.py
index 92b4ed70..cfa7243a 100644
--- a/promptulate/tools/base.py
+++ b/promptulate/tools/base.py
@@ -1,385 +1,391 @@
-import inspect
-import warnings
-from abc import ABC, abstractmethod
-from typing import Any, Callable, Dict, List, Optional, Type, Union
-
-from promptulate.hook.base import Hook, HookTable
-from promptulate.pydantic_v1 import (
-    BaseModel,
-    Extra,
-    create_model,
-    validate_arguments,
-)
-from promptulate.utils.logger import logger
-
-
-class _SchemaConfig:
-    """Configuration for the pydantic model."""
-
-    extra: Any = Extra.forbid
-    arbitrary_types_allowed: bool = True
-
-
-def _create_subset_model(
-    name: str, model: BaseModel, field_names: list
-) -> Type[BaseModel]:
-    """Create a pydantic model with only a subset of model's fields."""
-    fields = {}
-    for field_name in field_names:
-        field = model.__fields__[field_name]
-        fields[field_name] = (field.outer_type_, field.field_info)
-    return create_model(name, **fields)
-
-
-def _pydantic_to_refined_schema(pydantic_obj: type(BaseModel)) -> Dict[str, Any]:
-    """Get refined schema(OpenAI function call type schema) from pydantic object."""
-    # Remove useless fields.
-    refined_schema = pydantic_obj.schema()
-
-    if "title" in refined_schema:
-        del refined_schema["title"]
-    for k, v in refined_schema["properties"].items():
-        if "title" in v:
-            del v["title"]
-
-    return refined_schema
-
-
-def _validate_refined_schema(schema: Dict) -> bool:
-    """Validate refined schema(OpenAI function call type schema).
-
-    Args:
-        schema: any dict
-
-    Returns:
-        bool: True if schema is openai function call type schema, False otherwise.
-    """
-    if "name" not in schema or "description" not in schema:
-        return False
-
-    if "properties" not in schema:
-        return False
-
-    return True
-
-
-def function_to_tool_schema(func: Callable) -> Dict[str, Any]:
-    """Create a tool schema from a function's signature.
-
-    Args:
-        func: Function to generate the schema from
-
-    Returns:
-        A OpenAI function call type json schema built by pydantic model.
-        ref: https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
-    """  # noqa
-    # https://docs.pydantic.dev/latest/usage/validation_decorator/
-    inferred_model = validate_arguments(func, config=_SchemaConfig).model  # type: ignore # noqa
-
-    # Extract function parameter names.
-    # Pydantic adds placeholder virtual fields we need to strip
-    signature = inspect.signature(func)
-    valid_properties: List[str] = [
-        param.name for param in signature.parameters.values()
-    ]
-
-    # Create a pydantic model with only the valid fields.
-    created_model = _create_subset_model(
-        f"{func.__name__}Schema", inferred_model, valid_properties
-    )
-    reduced_schema = created_model.schema()
-
-    # reduce schema
-    reduced_schema["description"] = func.__doc__ or ""
-    reduced_schema["name"] = func.__name__
-
-    if "title" in reduced_schema:
-        del reduced_schema["title"]
-    for k, v in reduced_schema["properties"].items():
-        if "title" in v:
-            del v["title"]
-
-    return reduced_schema
-
-
-class BaseTool(ABC, BaseModel):
-    """Interface tools must implement."""
-
-    name: str
-    """The unique name of the tool that clearly communicates its purpose."""
-    description: str
-    """Used to tell the model how/when/why to use the tool.
-    You can provide few-shot examples as a part of the description."""
-    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
-    """The parameters that the tool accepts. This can be a dictionary or a Pydantic
-    model."""
-    example: List[str] = None
-    """Show how to use this tool. This is few shot for agent. You few shot may like:
-
-    example1 = "Question: What is 37593 * 67?\n```\n37593 * 67\n```\nnumexpr.evaluate("37593 * 67")\nAnswer:2518731"
-    example2 = "Question: What is 37593^(1/5)?\n```\n37593**(1/5)\n```\nnumexpr.evaluate("37593**(1/5)")\nAnswer:8.222831614237718"
-    few_shot_example = [example1, example2]
-    """  # noqa
-
-    def __init__(self, **kwargs):
-        """Custom tool config.
-
-        Args:
-            **kwargs:
-                hooks(List[Callable]): for adding to hook_manager
-        """
-        warnings.warn(
-            "BaseTool is deprecated at v1.7.0. promptulate.tools.base.Tool is recommended.",  # noqa: E501
-            DeprecationWarning,
-        )
-        super().__init__(**kwargs)
-        if "hooks" in kwargs and kwargs["hooks"]:
-            for hook in kwargs["hooks"]:
-                Hook.mount_instance_hook(hook, self)
-        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
-
-    class Config:
-        arbitrary_types_allowed = True
-        extra = Extra.allow
-
-    def run(self, *args, **kwargs):
-        """run the tool including specified function and hooks"""
-        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
-        result: Any = self._run(*args, **kwargs)
-        logger.debug(f"[pne tool result] {result}")
-        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
-        return result
-
-    @abstractmethod
-    def _run(self, *args, **kwargs):
-        """Run detail business, implemented by subclass."""
-        raise NotImplementedError()
-
-
-class Tool(ABC):
-    """Abstract base class for tools. All tools must implement this interface."""
-
-    name: str
-    """Tool name"""
-    description: str
-    """Tool description"""
-    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
-    """Tool parameters"""
-
-    def __init__(self, *args, **kwargs):
-        self.check_params()
-        if "hooks" in kwargs and kwargs["hooks"]:
-            for hook in kwargs["hooks"]:
-                Hook.mount_instance_hook(hook, self)
-        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
-
-    def check_params(self):
-        """Check parameters when initialization."""
-        if not getattr(self, "name", None) or not getattr(self, "description", None):
-            raise TypeError(
-                f"{self.__class__.__name__} required parameters 'name' and 'description'."  # noqa: E501
-            )
-
-    def run(self, *args, **kwargs):
-        """run the tool including specified function and hooks"""
-        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
-        result: Any = self._run(*args, **kwargs)
-        logger.debug(f"[pne tool response] name: {self.name} result: {result}")
-        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
-        return result
-
-    @abstractmethod
-    def _run(self, *args, **kwargs):
-        """Run detail business, implemented by subclass."""
-        raise NotImplementedError()
-
-    def to_schema(self) -> Dict[str, Any]:
-        """
-        Converts the Tool instance to a OpenAI function call type JSON schema.
-
-        Returns:
-            dict: A dictionary representing the JSON schema of the Tool instance.
-        """
-        # If there are no parameters, return the basic schema.
-        if not self.parameters:
-            return {
-                "name": self.name,
-                "description": self.description,
-            }
-
-        # If parameters are defined by a Pydantic BaseModel, convert to schema.
-        if isinstance(self.parameters, type) and issubclass(self.parameters, BaseModel):
-            return {
-                "name": self.name,
-                "description": self.description,
-                "parameters": _pydantic_to_refined_schema(self.parameters),
-            }
-
-        # If parameters are defined by a schema dictionary, validate and return it.
-        if isinstance(self.parameters, dict):
-            if not _validate_refined_schema(self.parameters):
-                raise ValueError(
-                    f"The 'parameters' dictionary for {self.__class__.__name__} does not conform to the expected schema."  # noqa: E501
-                )
-            return self.parameters
-
-        # If parameters are neither a BaseModel nor a dictionary, raise an error.
-        raise TypeError(
-            f"The 'parameters' attribute of {self.__class__.__name__} must be either a subclass of BaseModel or a dictionary representing a schema."  # noqa: E501
-        )
-
-
-class ToolImpl(Tool):
-    def __init__(
-        self,
-        name: str,
-        description: str,
-        callback: Callable,
-        parameters: Union[dict, BaseModel] = None,
-        **kwargs,
-    ):
-        self.name: str = name
-        self.description: str = description
-        self.callback: Callable = callback
-        self.parameters: Union[dict, BaseModel] = parameters
-
-        super().__init__(**kwargs)
-
-    @classmethod
-    def from_function(cls, func: Callable) -> "ToolImpl":
-        """Create a ToolImpl instance from a function.
-
-        Args:
-            func: Function to create the ToolImpl instance from.
-
-        Returns:
-            A ToolImpl instance.
-        """
-        if not func.__doc__:
-            err_msg = """Please add docstring and variable type declarations for your function.Here is a best practice:
-def web_search(keyword: str, top_k: int = 10) -> str:
-    \"""search by keyword in web.
-    Args:
-        keyword: keyword to search
-        top_k: top k results to return
-
-    Returns:
-        str: search result
-    \"""
-    return "result"
-
-            """  # noqa
-            raise ValueError(err_msg)
-
-        schema = function_to_tool_schema(func)
-        return cls(
-            name=func.__name__,
-            description=func.__doc__,
-            callback=func,
-            parameters=schema,
-        )
-
-    @classmethod
-    def from_define_tool(
-        cls,
-        callback: Callable,
-        name: str = None,
-        description: str = None,
-        parameters: Optional[Union[Dict, Type[BaseModel]]] = None,
-    ) -> "ToolImpl":
-        """Create a ToolImpl instance from a function.
-
-        Args:
-            callback: Function to create the ToolImpl instance from.
-            name: tool name
-            description: tool description
-            parameters: tool parameters
-
-        Returns:
-            A ToolImpl instance.
-        """
-        if not parameters:
-            schema: dict = function_to_tool_schema(callback)
-        elif isinstance(parameters, dict) and _validate_refined_schema(parameters):
-            schema: dict = parameters
-        elif isinstance(parameters, type) and issubclass(parameters, BaseModel):
-            schema: dict = _pydantic_to_refined_schema(parameters)
-        else:
-            raise TypeError(
-                f"{[cls.__name__]} parameters must be BaseModel or JSON schema."
-            )
-
-        _description = description or ""
-        _doc = callback.__doc__ or ""
-
-        return cls(
-            name=name or callback.__name__,
-            description=f"{_description}\n{_doc}",
-            callback=callback,
-            parameters=schema,
-        )
-
-    @classmethod
-    def from_base_tool(cls, tool: BaseTool) -> "ToolImpl":
-        """Create a ToolImpl instance from a BaseTool instance.
-
-        Args:
-            tool: BaseTool instance to create the ToolImpl instance from.
-
-        Returns:
-            A ToolImpl instance.
-        """
-
-        return cls(
-            name=tool.name,
-            description=tool.description,
-            callback=tool.run,
-            parameters=tool.parameters,
-        )
-
-    def _run(self, *args, **kwargs):
-        return self.callback(*args, **kwargs)
-
-
-def define_tool(
-    *,
-    callback: Callable,
-    name: Optional[str] = None,
-    description: Optional[str] = None,
-    parameters: Union[dict, Type[BaseModel]] = None,
-) -> ToolImpl:
-    """
-    A tool with llm or API wrapper will automatically initialize the llm and API wrapper
-    classes, which can avoid this problem by initializing in this way.
-
-    Args:
-        callback: tool function when running
-        name: tool name
-        description: tool description
-        parameters: tool parameters
-
-    Returns:
-        A ToolImpl class (subclass of Tool).
-    """
-
-    return ToolImpl.from_define_tool(
-        callback=callback, name=name, description=description, parameters=parameters
-    )
-
-
-def function_to_tool(func: Callable) -> ToolImpl:
-    """Converts a function to a ToolImpl instance.
-
-    Args:
-        func: Function to convert to a ToolImpl instance.
-
-    Returns:
-        A ToolImpl instance.
-    """
-    return ToolImpl.from_function(func)
-
-
-class BaseToolKit:
-    @abstractmethod
-    def get_tools(self):
-        """get tools in the toolkit"""
+import inspect
+import warnings
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union
+
+from promptulate.hook.base import Hook, HookTable
+from promptulate.pydantic_v1 import (
+    BaseModel,
+    Extra,
+    create_model,
+    validate_arguments,
+)
+from promptulate.utils.logger import logger
+
+if TYPE_CHECKING:
+    from langchain.tools.base import BaseTool as LangchainBaseToolType  # noqa
+
+ToolTypes = Union["BaseTool", "Tool", Callable, "LangchainBaseToolType", "BaseToolKit"]
+
+
+class _SchemaConfig:
+    """Configuration for the pydantic model."""
+
+    extra: Any = Extra.forbid
+    arbitrary_types_allowed: bool = True
+
+
+def _create_subset_model(
+    name: str, model: BaseModel, field_names: list
+) -> Type[BaseModel]:
+    """Create a pydantic model with only a subset of model's fields."""
+    fields = {}
+    for field_name in field_names:
+        field = model.__fields__[field_name]
+        fields[field_name] = (field.outer_type_, field.field_info)
+    return create_model(name, **fields)
+
+
+def _pydantic_to_refined_schema(pydantic_obj: type(BaseModel)) -> Dict[str, Any]:
+    """Get refined schema(OpenAI function call type schema) from pydantic object."""
+    # Remove useless fields.
+    refined_schema = pydantic_obj.schema()
+
+    if "title" in refined_schema:
+        del refined_schema["title"]
+    for k, v in refined_schema["properties"].items():
+        if "title" in v:
+            del v["title"]
+
+    return refined_schema
+
+
+def _validate_refined_schema(schema: Dict) -> bool:
+    """Validate refined schema(OpenAI function call type schema).
+
+    Args:
+        schema: any dict
+
+    Returns:
+        bool: True if schema is openai function call type schema, False otherwise.
+    """
+    if "name" not in schema or "description" not in schema:
+        return False
+
+    if "properties" not in schema:
+        return False
+
+    return True
+
+
+def function_to_tool_schema(func: Callable) -> Dict[str, Any]:
+    """Create a tool schema from a function's signature.
+
+    Args:
+        func: Function to generate the schema from
+
+    Returns:
+        A OpenAI function call type json schema built by pydantic model.
+        ref: https://platform.openai.com/docs/api-reference/chat/create#chat-create-function_call
+    """  # noqa
+    # https://docs.pydantic.dev/latest/usage/validation_decorator/
+    inferred_model = validate_arguments(func, config=_SchemaConfig).model  # type: ignore # noqa
+
+    # Extract function parameter names.
+    # Pydantic adds placeholder virtual fields we need to strip
+    signature = inspect.signature(func)
+    valid_properties: List[str] = [
+        param.name for param in signature.parameters.values()
+    ]
+
+    # Create a pydantic model with only the valid fields.
+    created_model = _create_subset_model(
+        f"{func.__name__}Schema", inferred_model, valid_properties
+    )
+    reduced_schema = created_model.schema()
+
+    # reduce schema
+    reduced_schema["description"] = func.__doc__ or ""
+    reduced_schema["name"] = func.__name__
+
+    if "title" in reduced_schema:
+        del reduced_schema["title"]
+    for k, v in reduced_schema["properties"].items():
+        if "title" in v:
+            del v["title"]
+
+    return reduced_schema
+
+
+class BaseTool(ABC, BaseModel):
+    """Interface tools must implement."""
+
+    name: str
+    """The unique name of the tool that clearly communicates its purpose."""
+    description: str
+    """Used to tell the model how/when/why to use the tool.
+    You can provide few-shot examples as a part of the description."""
+    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
+    """The parameters that the tool accepts. This can be a dictionary or a Pydantic
+    model."""
+    example: List[str] = None
+    """Show how to use this tool. This is few shot for agent. You few shot may like:
+
+    example1 = "Question: What is 37593 * 67?\n```\n37593 * 67\n```\nnumexpr.evaluate("37593 * 67")\nAnswer:2518731"
+    example2 = "Question: What is 37593^(1/5)?\n```\n37593**(1/5)\n```\nnumexpr.evaluate("37593**(1/5)")\nAnswer:8.222831614237718"
+    few_shot_example = [example1, example2]
+    """  # noqa
+
+    def __init__(self, **kwargs):
+        """Custom tool config.
+
+        Args:
+            **kwargs:
+                hooks(List[Callable]): for adding to hook_manager
+        """
+        warnings.warn(
+            "BaseTool is deprecated at v1.7.0. promptulate.tools.base.Tool is recommended.",  # noqa: E501
+            DeprecationWarning,
+        )
+        super().__init__(**kwargs)
+        if "hooks" in kwargs and kwargs["hooks"]:
+            for hook in kwargs["hooks"]:
+                Hook.mount_instance_hook(hook, self)
+        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
+
+    class Config:
+        arbitrary_types_allowed = True
+        extra = Extra.allow
+
+    def run(self, *args, **kwargs):
+        """run the tool including specified function and hooks"""
+        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
+        result: Any = self._run(*args, **kwargs)
+        logger.debug(f"[pne tool result] {result}")
+        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
+        return result
+
+    @abstractmethod
+    def _run(self, *args, **kwargs):
+        """Run detail business, implemented by subclass."""
+        raise NotImplementedError()
+
+
+class Tool(ABC):
+    """Abstract base class for tools. All tools must implement this interface."""
+
+    name: str
+    """Tool name"""
+    description: str
+    """Tool description"""
+    parameters: Optional[Union[Dict, Type[BaseModel]]] = None
+    """Tool parameters"""
+
+    def __init__(self, *args, **kwargs):
+        self.check_params()
+        if "hooks" in kwargs and kwargs["hooks"]:
+            for hook in kwargs["hooks"]:
+                Hook.mount_instance_hook(hook, self)
+        Hook.call_hook(HookTable.ON_TOOL_CREATE, self, **kwargs)
+
+    def check_params(self):
+        """Check parameters when initialization."""
+        if not getattr(self, "name", None) or not getattr(self, "description", None):
+            raise TypeError(
+                f"{self.__class__.__name__} required parameters 'name' and 'description'."  # noqa: E501
+            )
+
+    def run(self, *args, **kwargs):
+        """run the tool including specified function and hooks"""
+        Hook.call_hook(HookTable.ON_TOOL_START, self, *args, **kwargs)
+        result: Any = self._run(*args, **kwargs)
+        logger.debug(f"[pne tool response] name: {self.name} result: {result}")
+        Hook.call_hook(HookTable.ON_TOOL_RESULT, self, result=result)
+        return result
+
+    @abstractmethod
+    def _run(self, *args, **kwargs):
+        """Run detail business, implemented by subclass."""
+        raise NotImplementedError()
+
+    def to_schema(self) -> Dict[str, Any]:
+        """
+        Converts the Tool instance to a OpenAI function call type JSON schema.
+
+        Returns:
+            dict: A dictionary representing the JSON schema of the Tool instance.
+        """
+        # If there are no parameters, return the basic schema.
+        if not self.parameters:
+            return {
+                "name": self.name,
+                "description": self.description,
+            }
+
+        # If parameters are defined by a Pydantic BaseModel, convert to schema.
+        if isinstance(self.parameters, type) and issubclass(self.parameters, BaseModel):
+            return {
+                "name": self.name,
+                "description": self.description,
+                "parameters": _pydantic_to_refined_schema(self.parameters),
+            }
+
+        # If parameters are defined by a schema dictionary, validate and return it.
+        if isinstance(self.parameters, dict):
+            if not _validate_refined_schema(self.parameters):
+                raise ValueError(
+                    f"The 'parameters' dictionary for {self.__class__.__name__} does not conform to the expected schema."  # noqa: E501
+                )
+            return self.parameters
+
+        # If parameters are neither a BaseModel nor a dictionary, raise an error.
+        raise TypeError(
+            f"The 'parameters' attribute of {self.__class__.__name__} must be either a subclass of BaseModel or a dictionary representing a schema."  # noqa: E501
+        )
+
+
+class ToolImpl(Tool):
+    def __init__(
+        self,
+        name: str,
+        description: str,
+        callback: Callable,
+        parameters: Union[dict, BaseModel] = None,
+        **kwargs,
+    ):
+        self.name: str = name
+        self.description: str = description
+        self.callback: Callable = callback
+        self.parameters: Union[dict, BaseModel] = parameters
+
+        super().__init__(**kwargs)
+
+    @classmethod
+    def from_function(cls, func: Callable) -> "ToolImpl":
+        """Create a ToolImpl instance from a function.
+
+        Args:
+            func: Function to create the ToolImpl instance from.
+
+        Returns:
+            A ToolImpl instance.
+        """
+        if not func.__doc__:
+            err_msg = """Please add docstring and variable type declarations for your function.Here is a best practice:
+def web_search(keyword: str, top_k: int = 10) -> str:
+    \"""search by keyword in web.
+    Args:
+        keyword: keyword to search
+        top_k: top k results to return
+
+    Returns:
+        str: search result
+    \"""
+    return "result"
+
+            """  # noqa
+            raise ValueError(err_msg)
+
+        schema = function_to_tool_schema(func)
+        return cls(
+            name=func.__name__,
+            description=func.__doc__,
+            callback=func,
+            parameters=schema,
+        )
+
+    @classmethod
+    def from_define_tool(
+        cls,
+        callback: Callable,
+        name: str = None,
+        description: str = None,
+        parameters: Optional[Union[Dict, Type[BaseModel]]] = None,
+    ) -> "ToolImpl":
+        """Create a ToolImpl instance from a function.
+
+        Args:
+            callback: Function to create the ToolImpl instance from.
+            name: tool name
+            description: tool description
+            parameters: tool parameters
+
+        Returns:
+            A ToolImpl instance.
+        """
+        if not parameters:
+            schema: dict = function_to_tool_schema(callback)
+        elif isinstance(parameters, dict) and _validate_refined_schema(parameters):
+            schema: dict = parameters
+        elif isinstance(parameters, type) and issubclass(parameters, BaseModel):
+            schema: dict = _pydantic_to_refined_schema(parameters)
+        else:
+            raise TypeError(
+                f"{[cls.__name__]} parameters must be BaseModel or JSON schema."
+            )
+
+        _description = description or ""
+        _doc = callback.__doc__ or ""
+
+        return cls(
+            name=name or callback.__name__,
+            description=f"{_description}\n{_doc}",
+            callback=callback,
+            parameters=schema,
+        )
+
+    @classmethod
+    def from_base_tool(cls, tool: BaseTool) -> "ToolImpl":
+        """Create a ToolImpl instance from a BaseTool instance.
+
+        Args:
+            tool: BaseTool instance to create the ToolImpl instance from.
+
+        Returns:
+            A ToolImpl instance.
+        """
+
+        return cls(
+            name=tool.name,
+            description=tool.description,
+            callback=tool.run,
+            parameters=tool.parameters,
+        )
+
+    def _run(self, *args, **kwargs):
+        return self.callback(*args, **kwargs)
+
+
+def define_tool(
+    *,
+    callback: Callable,
+    name: Optional[str] = None,
+    description: Optional[str] = None,
+    parameters: Union[dict, Type[BaseModel]] = None,
+) -> ToolImpl:
+    """
+    A tool with llm or API wrapper will automatically initialize the llm and API wrapper
+    classes, which can avoid this problem by initializing in this way.
+
+    Args:
+        callback: tool function when running
+        name: tool name
+        description: tool description
+        parameters: tool parameters
+
+    Returns:
+        A ToolImpl class (subclass of Tool).
+    """
+
+    return ToolImpl.from_define_tool(
+        callback=callback, name=name, description=description, parameters=parameters
+    )
+
+
+def function_to_tool(func: Callable) -> ToolImpl:
+    """Converts a function to a ToolImpl instance.
+
+    Args:
+        func: Function to convert to a ToolImpl instance.
+
+    Returns:
+        A ToolImpl instance.
+    """
+    return ToolImpl.from_function(func)
+
+
+class BaseToolKit:
+    @abstractmethod
+    def get_tools(self) -> List[ToolTypes]:
+        """get tools in the toolkit"""
+        raise NotImplementedError
diff --git a/promptulate/tools/file/toolkit.py b/promptulate/tools/file/toolkit.py
index 35a4a59d..61bdc6b7 100644
--- a/promptulate/tools/file/toolkit.py
+++ b/promptulate/tools/file/toolkit.py
@@ -1,51 +1,57 @@
-import os
-from typing import List, Optional
-
-from promptulate.tools.base import BaseToolKit, Tool
-from promptulate.tools.file.tools import (
-    AppendFileTool,
-    CopyFileTool,
-    DeleteFileTool,
-    ListDirectoryTool,
-    MoveFileTool,
-    ReadFileTool,
-    WriteFileTool,
-)
-
-TOOL_MAPPER = {
-    "write": WriteFileTool,
-    "append": AppendFileTool,
-    "read": ReadFileTool,
-    "delete": DeleteFileTool,
-    "list": ListDirectoryTool,
-    "copy": CopyFileTool,
-    "move": MoveFileTool,
-}
-
-
-class FileToolKit(BaseToolKit):
-    """File ToolKit
-
-    Args:
-        root_dir: The root directory of the file tool.
-        selected_tools: The selected tools of the file tool.
-
-    Returns:
-        The instance object of the corresponding tool
-    """
-
-    def __init__(self, root_dir: str = None, modes: Optional[List[str]] = None) -> None:
-        self.root_dir = root_dir or os.getcwd()
-        self.modes = modes or []
-
-        for mode in self.modes:
-            if mode not in TOOL_MAPPER.keys():
-                raise ValueError(
-                    f"{mode} does not exist.\n"
-                    f"Please select from {list(TOOL_MAPPER.keys())}"
-                )
-
-    def get_tools(self) -> List[Tool]:
-        if self.modes:
-            return [TOOL_MAPPER[mode](self.root_dir) for mode in self.modes]
-        return [tool(self.root_dir) for tool in TOOL_MAPPER.values()]
+import os
+from typing import List, Literal, Optional
+
+from promptulate.tools.base import BaseToolKit, ToolTypes
+from promptulate.tools.file.tools import (
+    AppendFileTool,
+    CopyFileTool,
+    DeleteFileTool,
+    ListDirectoryTool,
+    MoveFileTool,
+    ReadFileTool,
+    WriteFileTool,
+)
+
+FileToolType = Literal["write", "append", "read", "delete", "list", "copy", "move"]
+TOOL_MAPPER = {
+    "write": WriteFileTool,
+    "append": AppendFileTool,
+    "read": ReadFileTool,
+    "delete": DeleteFileTool,
+    "list": ListDirectoryTool,
+    "copy": CopyFileTool,
+    "move": MoveFileTool,
+}
+
+
+class FileToolKit(BaseToolKit):
+    """File ToolKit
+
+    Args:
+        root_dir: The root directory of the file tool.
+        modes(Option): The modes of the file tool. Default is None.
+
+    Returns:
+        The instance object of the corresponding tool
+    """
+
+    def __init__(
+        self,
+        *,
+        root_dir: Optional[str] = None,
+        modes: Optional[List[FileToolType]] = None,
+    ) -> None:
+        self.root_dir = root_dir or os.getcwd()
+        self.modes = modes or []
+
+        for mode in self.modes:
+            if mode not in TOOL_MAPPER.keys():
+                raise ValueError(
+                    f"{mode} does not exist.\n"
+                    f"Please select from {list(TOOL_MAPPER.keys())}"
+                )
+
+    def get_tools(self) -> List[ToolTypes]:
+        if self.modes:
+            return [TOOL_MAPPER[mode](self.root_dir) for mode in self.modes]
+        return [tool(self.root_dir) for tool in TOOL_MAPPER.values()]
diff --git a/promptulate/tools/manager.py b/promptulate/tools/manager.py
index 18d19c66..d8604e6d 100644
--- a/promptulate/tools/manager.py
+++ b/promptulate/tools/manager.py
@@ -1,110 +1,127 @@
-import inspect
-import json
-from typing import Any, List, Optional, Union
-
-from promptulate.schema import ToolTypes
-from promptulate.tools.base import BaseTool, Tool, ToolImpl, function_to_tool
-from promptulate.tools.langchain.tools import LangchainTool
-
-
-def _judge_langchain_tool_and_wrap(tool: Any) -> Optional[Tool]:
-    """Judge if the tool is a langchain tool and wrap it.
-
-    Args:
-        tool(Any): The tool to be judged.
-
-    Returns:
-        Optional[Tool]: The wrapped tool or None if not a langchain tool.
-    """
-    try:
-        from langchain.tools.base import BaseTool as LangchainBaseTool
-
-        if isinstance(tool, LangchainBaseTool):
-            return LangchainTool(tool)
-
-    except ImportError:
-        raise ValueError(
-            (
-                f"Error tool type {tool}, please check the tool type.",
-                "If you are using langchain tool, please install -U langchain.",
-            )
-        )
-
-
-def _initialize_tool(tool: ToolTypes) -> Optional[Tool]:
-    """Initialize the tool.
-
-    Args:
-        tool(Union[BaseTool, Callable, Tool, "LangchainBaseToolType"]): The tool to be
-            initialized.
-
-    Returns:
-        Optional[Tool]: The initialized tool.
-    """
-    if isinstance(tool, BaseTool):
-        return ToolImpl.from_base_tool(tool)
-    elif isinstance(tool, Tool):
-        return tool
-    elif inspect.isfunction(tool):
-        return function_to_tool(tool)
-
-    return _judge_langchain_tool_and_wrap(tool)
-
-
-class ToolManager:
-    """ToolManager helps Agent to manage tools"""
-
-    def __init__(self, tools: List[ToolTypes]):
-        self.tools: List[Tool] = [
-            _initialize_tool(tool)
-            for tool in tools
-            if _initialize_tool(tool) is not None
-        ]
-
-    def get_tool(self, tool_name: str) -> Optional[Tool]:
-        """Find specified tool by tool name.
-        Args:
-            tool_name(str): The name of the tool.
-
-        Returns:
-            Optional[Tool]: The specified tool or None if not found.
-        """
-        return next((tool for tool in self.tools if tool.name == tool_name), None)
-
-    def run_tool(self, tool_name: str, parameters: Union[str, dict]) -> str:
-        """Run tool by input tool name and data inputs
-
-        Args:
-            tool_name(str): The name of the tool.
-            parameters(Union[str, dict]): The parameters for the tool.
-
-        Returns:
-            str: The result of the tool.
-        """
-        tool = self.get_tool(tool_name)
-
-        if tool is None:
-            return (
-                f"{tool_name} has not been provided yet, please use the provided tool."
-            )
-
-        if isinstance(parameters, dict):
-            return tool.run(**parameters)
-        else:
-            return tool.run(parameters)
-
-    @property
-    def tool_names(self) -> str:
-        """Get all tool names."""
-        tool_names = ""
-        for tool in self.tools:
-            tool_names += f"{tool.name}, "
-        return tool_names[:-2]
-
-    @property
-    def tool_descriptions(self) -> str:
-        """Get all tool descriptions, including the schema if available."""
-        tool_descriptions = ""
-        for tool in self.tools:
-            tool_descriptions += json.dumps(tool.to_schema()) + "\n"
-        return tool_descriptions
+import inspect
+import json
+from typing import Any, List, Optional, Union
+
+from promptulate.tools.base import (
+    BaseTool,
+    BaseToolKit,
+    Tool,
+    ToolImpl,
+    ToolTypes,
+    function_to_tool,
+)
+from promptulate.tools.langchain.tools import LangchainTool
+
+
+def _judge_langchain_tool_and_wrap(tool: Any) -> Tool:
+    """Judge if the tool is a langchain tool and wrap it.
+
+    Args:
+        tool(Any): The tool to be judged.
+
+    Returns:
+        Optional[Tool]: The wrapped tool or None if not a langchain tool.
+    """
+    try:
+        from langchain.tools.base import BaseTool as LangchainBaseTool
+
+        if isinstance(tool, LangchainBaseTool):
+            return LangchainTool(tool)
+
+        raise ValueError(f"Unknown tool type {tool}.")
+    except ImportError:
+        raise ValueError(
+            (
+                f"Error tool type {tool}, please check the tool type.",
+                "If you are using langchain tool, please install -U langchain.",
+            )
+        )
+
+
+def _initialize_tool(tool: ToolTypes) -> Union[Tool, List[Tool]]:
+    """Initialize the tool.
+
+    Args:
+        tool(Union[BaseTool, Callable, Tool, "LangchainBaseToolType"]): The tool to be
+            initialized.
+
+    Returns:
+        Optional[Tool]: The initialized tool.
+    """
+    if isinstance(tool, BaseToolKit):
+        initialized_tools = []
+        for tool in tool.get_tools():
+            initialized_tools.append(_initialize_tool(tool))
+        return initialized_tools
+
+    if isinstance(tool, BaseTool):
+        return ToolImpl.from_base_tool(tool)
+    elif isinstance(tool, Tool):
+        return tool
+    elif inspect.isfunction(tool):
+        return function_to_tool(tool)
+
+    return _judge_langchain_tool_and_wrap(tool)
+
+
+class ToolManager:
+    """ToolManager helps Agent to manage tools"""
+
+    def __init__(self, tools: List[ToolTypes]):
+        self.tools: List[Tool] = []
+
+        for tool in tools:
+            initialized_tool: Union[list, Tool] = _initialize_tool(tool)
+
+            if isinstance(initialized_tool, list):
+                self.tools.extend(initialized_tool)
+            else:
+                self.tools.append(initialized_tool)
+
+    def get_tool(self, tool_name: str) -> Optional[Tool]:
+        """Find specified tool by tool name.
+        Args:
+            tool_name(str): The name of the tool.
+
+        Returns:
+            Optional[Tool]: The specified tool or None if not found.
+        """
+        return next((tool for tool in self.tools if tool.name == tool_name), None)
+
+    def run_tool(self, tool_name: str, parameters: Union[str, dict]) -> str:
+        """Run tool by input tool name and data inputs
+
+        Args:
+            tool_name(str): The name of the tool.
+            parameters(Union[str, dict]): The parameters for the tool.
+
+        Returns:
+            str: The result of the tool.
+        """
+        tool = self.get_tool(tool_name)
+
+        if tool is None:
+            return (
+                f"{tool_name} has not been provided yet, please use the provided tool."
+            )
+
+        if isinstance(parameters, dict):
+            return tool.run(**parameters)
+        else:
+            return tool.run(parameters)
+
+    @property
+    def tool_names(self) -> str:
+        """Get all tool names."""
+        tool_names = ""
+        for tool in self.tools:
+            tool_names += f"{tool.name}, "
+        return tool_names[:-2]
+
+    @property
+    def tool_descriptions(self) -> str:
+        """Get all tool descriptions, including the schema if available."""
+        tool_descriptions = ""
+        for tool in self.tools:
+            tool_descriptions += json.dumps(tool.to_schema()) + "\n"
+        return tool_descriptions
diff --git a/pyproject.toml b/pyproject.toml
index 95827d7d..c6c1d74f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,121 +1,121 @@
-[build-system]
-build-backend = "poetry.core.masonry.api"
-requires = ["poetry-core"]
-
-[tool.poetry]
-authors = ["Zeeland <zeeland4work@gmail.com>"]
-description = "A powerful LLM Application development framework."
-name = "promptulate"
-readme = "README.md"
-repository = "https://github.com/Undertone0809/promptulate"
-version = "1.15.0"
-keywords = [
-    "promptulate",
-    "pne",
-    "prompt",
-    "agent",
-    "openai",
-    "chatgpt",
-    "gpt",
-    "llm",
-    "openai",
-    "langchain",
-    "litellm"
-]
-
-[tool.poetry.dependencies]
-broadcast-service = "1.3.2"
-click = "^8.1.7"
-cushy-storage = "^1.3.7"
-litellm = "^1.16.19"
-numexpr = "^2.8.4"
-pydantic = ">=1,<3"
-python = ">=3.8.1,<4.0"
-python-dotenv = "^1.0.0"
-questionary = "^2.0.1"
-requests = "^2.31.0"
-jinja2 = "^3.1.3"
-typing-extensions = "^4.10.0"
-
-
-[tool.poetry.group.dev.dependencies]
-coverage = "^7.3.4"
-coverage-badge = "^1.1.0"
-pre-commit = "^3.5.0"
-pytest = "^7.4.4"
-pytest-cov = "^4.1.0"
-pytest-html = ">=3.1.1,<5.0.0"
-ruff = "^0.1.13"
-pytest-mock = "^3.12.0"
-
-[tool.poetry.group.test_integration.dependencies]
-langchain = "^0.1.1"
-arxiv = "^1.4.7"
-duckduckgo_search = "^3.9.11"
-pyjwt = "^2.8.0"
-
-[tool.poetry.scripts]
-pne = "promptulate.client.pne:main"
-pne-chat = "promptulate.client.chat:main"
-
-#[[tool.poetry.source]]
-#name = "tsinghua"
-#priority = "default"
-#url = "https://pypi.tuna.tsinghua.edu.cn/simple"
-
-[tool.ruff]
-# https://beta.ruff.rs/docs/settings/
-# https://docs.astral.sh/ruff/configuration/
-line-length = 88
-
-# https://beta.ruff.rs/docs/rules/
-extend-select = ["I"]
-ignore = ["F401"]
-select = ["E", "W", "F", "I"]
-
-# Exclude a variety of commonly ignored directories.
-exclude = [
-  ".bzr",
-  ".direnv",
-  ".eggs",
-  ".git",
-  ".git-rewrite",
-  ".hg",
-  ".mypy_cache",
-  ".nox",
-  ".pants.d",
-  ".pytype",
-  ".ruff_cache",
-  ".svn",
-  ".tox",
-  ".venv",
-  "__pypackages__",
-  "_build",
-  "buck-out",
-  "build",
-  "dist",
-  "node_modules",
-  "venv",
-]
-ignore-init-module-imports = true
-respect-gitignore = true
-
-[tool.ruff.format]
-# Like Black, use double quotes for strings.
-quote-style = "double"
-
-# Like Black, indent with spaces, rather than tabs.
-indent-style = "space"
-
-# Like Black, respect magic trailing commas.
-skip-magic-trailing-comma = false
-
-# Like Black, automatically detect the appropriate line ending.
-line-ending = "auto"
-
-[tool.coverage.run]
-source = ["tests"]
-
-[coverage.report]
-fail_under = 50
-show_missing = true
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.poetry]
+authors = ["Zeeland <zeeland4work@gmail.com>"]
+description = "A powerful LLM Application development framework."
+name = "promptulate"
+readme = "README.md"
+repository = "https://github.com/Undertone0809/promptulate"
+version = "1.15.1"
+keywords = [
+    "promptulate",
+    "pne",
+    "prompt",
+    "agent",
+    "openai",
+    "chatgpt",
+    "gpt",
+    "llm",
+    "openai",
+    "langchain",
+    "litellm"
+]
+
+[tool.poetry.dependencies]
+broadcast-service = "1.3.2"
+click = "^8.1.7"
+cushy-storage = "^1.3.7"
+litellm = "^1.16.19"
+numexpr = "^2.8.4"
+pydantic = ">=1,<3"
+python = ">=3.8.1,<4.0"
+python-dotenv = "^1.0.0"
+questionary = "^2.0.1"
+requests = "^2.31.0"
+jinja2 = "^3.1.3"
+typing-extensions = "^4.10.0"
+
+
+[tool.poetry.group.dev.dependencies]
+coverage = "^7.3.4"
+coverage-badge = "^1.1.0"
+pre-commit = "^3.5.0"
+pytest = "^7.4.4"
+pytest-cov = "^4.1.0"
+pytest-html = ">=3.1.1,<5.0.0"
+ruff = "^0.1.13"
+pytest-mock = "^3.12.0"
+
+[tool.poetry.group.test_integration.dependencies]
+langchain = "^0.1.1"
+arxiv = "^1.4.7"
+duckduckgo_search = "^3.9.11"
+pyjwt = "^2.8.0"
+
+[tool.poetry.scripts]
+pne = "promptulate.client.pne:main"
+pne-chat = "promptulate.client.chat:main"
+
+#[[tool.poetry.source]]
+#name = "tsinghua"
+#priority = "default"
+#url = "https://pypi.tuna.tsinghua.edu.cn/simple"
+
+[tool.ruff]
+# https://beta.ruff.rs/docs/settings/
+# https://docs.astral.sh/ruff/configuration/
+line-length = 88
+
+# https://beta.ruff.rs/docs/rules/
+extend-select = ["I"]
+ignore = ["F401"]
+select = ["E", "W", "F", "I"]
+
+# Exclude a variety of commonly ignored directories.
+exclude = [
+  ".bzr",
+  ".direnv",
+  ".eggs",
+  ".git",
+  ".git-rewrite",
+  ".hg",
+  ".mypy_cache",
+  ".nox",
+  ".pants.d",
+  ".pytype",
+  ".ruff_cache",
+  ".svn",
+  ".tox",
+  ".venv",
+  "__pypackages__",
+  "_build",
+  "buck-out",
+  "build",
+  "dist",
+  "node_modules",
+  "venv",
+]
+ignore-init-module-imports = true
+respect-gitignore = true
+
+[tool.ruff.format]
+# Like Black, use double quotes for strings.
+quote-style = "double"
+
+# Like Black, indent with spaces, rather than tabs.
+indent-style = "space"
+
+# Like Black, respect magic trailing commas.
+skip-magic-trailing-comma = false
+
+# Like Black, automatically detect the appropriate line ending.
+line-ending = "auto"
+
+[tool.coverage.run]
+source = ["tests"]
+
+[coverage.report]
+fail_under = 50
+show_missing = true
diff --git a/tests/agents/test_tool_agent.py b/tests/agents/test_tool_agent.py
index 1b0e787f..7b35230b 100644
--- a/tests/agents/test_tool_agent.py
+++ b/tests/agents/test_tool_agent.py
@@ -1,5 +1,6 @@
 from promptulate.agents.tool_agent.agent import ToolAgent
 from promptulate.llms.base import BaseLLM
+from promptulate.tools.base import BaseToolKit
 
 
 class FakeLLM(BaseLLM):
@@ -35,3 +36,20 @@ def test_init():
     assert len(agent.tool_manager.tools) == 2
     assert agent.tool_manager.tools[0].name == "fake_tool_1"
     assert agent.tool_manager.tools[1].name == "fake_tool_2"
+
+
+class MockToolKit(BaseToolKit):
+    def get_tools(self) -> list:
+        return [fake_tool_1, fake_tool_2]
+
+
+def test_init_by_toolkits():
+    llm = FakeLLM()
+    agent = ToolAgent(llm=llm, tools=[MockToolKit()])
+    assert len(agent.tool_manager.tools) == 2
+
+
+def test_init_by_tool_and_kit():
+    llm = FakeLLM()
+    agent = ToolAgent(llm=llm, tools=[MockToolKit(), fake_tool_1, fake_tool_2])
+    assert len(agent.tool_manager.tools) == 4

From 83fc13d888f8de43b61479f2144ce6150cd2a258 Mon Sep 17 00:00:00 2001
From: zeeland <zeeland4work@gmail.com>
Date: Thu, 18 Apr 2024 22:39:21 +0800
Subject: [PATCH 4/4] test: add file tools test

---
 Makefile | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/Makefile b/Makefile
index 318910e8..3a083012 100644
--- a/Makefile
+++ b/Makefile
@@ -2,9 +2,8 @@ SHELL := /usr/bin/env bash
 OS := $(shell python -c "import sys; print(sys.platform)")
 
 # all test files define here
-DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py
+DEV_TEST_TOOL_FILES := ./tests/tools/test_human_feedback_tool.py ./tests/tools/test_calculator.py ./tests/tools/test_python_repl_tools.py ./tests/tools/test_sleep_tool.py ./tests/tools/test_arxiv_tools.py ./tests/tools/test_tool_manager.py ./tests/tools/test_file_tools.py
 DEV_TEST_HOOK_FILES := ./tests/hook/test_llm.py ./tests/hook/test_tool_hook.py
-
 DEV_TEST_LLM_FILES := ./tests/llms/test_openai.py ./tests/llms/test_factory.py
 DEV_TEST_AGENT_FILES := ./tests/agents/test_tool_agent.py ./tests/agents/test_assistant_agent.py
 DEV_TEST_FILES := $(DEV_TEST_TOOL_FILES) $(DEV_TEST_HOOK_FILES) $(DEV_TEST_LLM_FILES) $(DEV_TEST_AGENT_FILES) ./tests/test_chat.py ./tests/output_formatter ./tests/test_import.py ./tests/utils/test_string_template.py