From 6e05db972a824fa551ba544aa2dd8b12bb6cb86b Mon Sep 17 00:00:00 2001
From: batyu
Date: Sat, 15 Apr 2023 06:41:53 +0200
Subject: [PATCH 01/36] Allow local Development without pip install using "pip
install -e ."
---
pyproject.toml | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 64ed71658541..f420fcac1e4e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,4 +8,7 @@ readme = "README.md"
line-length = 88
target-version = ['py310']
include = '\.pyi?$'
-extend-exclude = ""
\ No newline at end of file
+extend-exclude = ""
+
+[tool.setuptools]
+packages = ["autogpt"]
From 0b936a2bb82b108b6e995e8efac9f40bf2642b4d Mon Sep 17 00:00:00 2001
From: cs0lar
Date: Sun, 16 Apr 2023 10:48:43 +0100
Subject: [PATCH 02/36] fixes index name to classname conversion
---
autogpt/memory/weaviate.py | 11 ++++++++++-
tests/integration/weaviate_memory_tests.py | 19 +++++++------------
2 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py
index 6fcce0a0216d..35e7844a2a24 100644
--- a/autogpt/memory/weaviate.py
+++ b/autogpt/memory/weaviate.py
@@ -37,9 +37,18 @@ def __init__(self, cfg):
else:
self.client = Client(url, auth_client_secret=auth_credentials)
- self.index = cfg.memory_index
+ self.index = WeaviateMemory.format_classname(cfg.memory_index)
self._create_schema()
+ @staticmethod
+ def format_classname(index):
+ # weaviate uses capitalised index names
+ # The python client uses the following code to format
+ # index names before the corresponding class is created
+ if len(index) == 1:
+ return index.capitalize()
+ return index[0].capitalize() + index[1:]
+
def _create_schema(self):
schema = default_schema(self.index)
if not self.client.schema.contains(schema):
diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py
index 503fe9d22ed3..4acea0ffda1e 100644
--- a/tests/integration/weaviate_memory_tests.py
+++ b/tests/integration/weaviate_memory_tests.py
@@ -12,17 +12,10 @@
from autogpt.memory.base import get_ada_embedding
-@mock.patch.dict(os.environ, {
- "WEAVIATE_HOST": "127.0.0.1",
- "WEAVIATE_PROTOCOL": "http",
- "WEAVIATE_PORT": "8080",
- "WEAVIATE_USERNAME": "",
- "WEAVIATE_PASSWORD": "",
- "MEMORY_INDEX": "AutogptTests"
-})
class TestWeaviateMemory(unittest.TestCase):
cfg = None
client = None
+ index = None
@classmethod
def setUpClass(cls):
@@ -40,6 +33,8 @@ def setUpClass(cls):
else:
cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}")
+ cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
+
"""
In order to run these tests you will need a local instance of
Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
@@ -51,7 +46,7 @@ def setUpClass(cls):
"""
def setUp(self):
try:
- self.client.schema.delete_class(self.cfg.memory_index)
+ self.client.schema.delete_class(self.index)
except:
pass
@@ -60,8 +55,8 @@ def setUp(self):
def test_add(self):
doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones'
self.memory.add(doc)
- result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do()
- actual = result['data']['Get'][self.cfg.memory_index]
+ result = self.client.query.get(self.index, ['raw_text']).do()
+ actual = result['data']['Get'][self.index]
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]['raw_text'], doc)
@@ -73,7 +68,7 @@ def test_get(self):
batch.add_data_object(
uuid=get_valid_uuid(uuid4()),
data_object={'raw_text': doc},
- class_name=self.cfg.memory_index,
+ class_name=self.index,
vector=get_ada_embedding(doc)
)
From 9b6bce4592800f6436bd877daba135cfee6b8f7d Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Sun, 16 Apr 2023 22:10:48 +0300
Subject: [PATCH 03/36] Improve the error logging for OAI Issues
---
autogpt/llm_utils.py | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 2075f93446eb..25dbabd41d8b 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -5,9 +5,10 @@
import openai
from openai.error import APIError, RateLimitError
-from colorama import Fore
+from colorama import Fore, Style
from autogpt.config import Config
+from autogpt.logs import logger
CFG = Config()
@@ -70,6 +71,7 @@ def create_chat_completion(
"""
response = None
num_retries = 10
+ warned_user = False
if CFG.debug_mode:
print(
Fore.GREEN
@@ -101,6 +103,11 @@ def create_chat_completion(
Fore.RED + "Error: ",
f"Reached rate limit, passing..." + Fore.RESET,
)
+ if not warned_user:
+ logger.double_check(
+ f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " +
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}")
+ warned_user = True
except APIError as e:
if e.http_status == 502:
pass
@@ -115,7 +122,17 @@ def create_chat_completion(
)
time.sleep(backoff)
if response is None:
- raise RuntimeError(f"Failed to get response after {num_retries} retries")
+ logger.typewriter_log(
+ "FAILED TO GET RESPONSE FROM OPENAI",
+ Fore.RED,
+ "Auto-GPT has failed to get a response from OpenAI's services. " +
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
+ )
+ logger.double_check()
+ if CFG.debug_mode:
+ raise RuntimeError(f"Failed to get response after {num_retries} retries")
+ else:
+ quit(1)
return response.choices[0].message["content"]
From 2d24876530f61a20e0c68fc449312fc84e142914 Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Sun, 16 Apr 2023 22:16:43 +0300
Subject: [PATCH 04/36] Fix linter issues
---
autogpt/llm_utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 25dbabd41d8b..3630108ecdf8 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -123,7 +123,7 @@ def create_chat_completion(
time.sleep(backoff)
if response is None:
logger.typewriter_log(
- "FAILED TO GET RESPONSE FROM OPENAI",
+ "FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. " +
f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
From da72e69196bff960e5b5235fda22cdd59c79ebaa Mon Sep 17 00:00:00 2001
From: Tzeng Yuxio
Date: Mon, 17 Apr 2023 09:28:33 +0800
Subject: [PATCH 05/36] fix: unreadable text in console and potentially over
the max token
---
autogpt/app.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/app.py b/autogpt/app.py
index 78b5bd2fdeb0..f0eadfa7de4f 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -133,7 +133,7 @@ def execute_command(command_name: str, arguments):
else:
safe_message = google_result.encode('utf-8', 'ignore')
- return str(safe_message)
+ return safe_message.decode('utf-8')
elif command_name == "memory_add":
return memory.add(arguments["string"])
elif command_name == "start_agent":
From 71c6600abf1525364db949622c7dc3f9b0e00eae Mon Sep 17 00:00:00 2001
From: lengweiping
Date: Mon, 17 Apr 2023 12:44:46 +0800
Subject: [PATCH 06/36] memory object move to memory_add block
---
autogpt/app.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/autogpt/app.py b/autogpt/app.py
index 19c075f0b09a..979f57d31468 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -112,11 +112,10 @@ def execute_command(command_name: str, arguments):
arguments (dict): The arguments for the command
Returns:
- str: The result of the command"""
- memory = get_memory(CFG)
-
+ str: The result of the command
+ """
try:
- command_name = map_command_synonyms(command_name)
+ command_name = map_command_synonyms(command_name.lower())
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial
@@ -136,6 +135,7 @@ def execute_command(command_name: str, arguments):
return str(safe_message)
elif command_name == "memory_add":
+ memory = get_memory(CFG)
return memory.add(arguments["string"])
elif command_name == "start_agent":
return start_agent(
From e86764df459e3f4bcbdbfdc796af63bc715fbb71 Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Mon, 17 Apr 2023 07:55:48 +0300
Subject: [PATCH 07/36] Add linux selenium fixes
---
autogpt/commands/web_selenium.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 8c652294587f..9b638ba03efd 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -17,6 +17,7 @@
import logging
from pathlib import Path
from autogpt.config import Config
+from sys import platform
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
@@ -66,6 +67,13 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
+ # Add linux specific flags
+ if platform == "linux" or platform == "linux2":
+ options.add_argument("--no-sandbox")
+ options.add_argument("--disable-dev-shm-usage")
+ options.add_argument("--remote-debugging-port=9222")
+
+
if CFG.selenium_web_browser == "firefox":
driver = webdriver.Firefox(
executable_path=GeckoDriverManager().install(), options=options
From 9887016bdfed85bdad648ef75a7170154d05b121 Mon Sep 17 00:00:00 2001
From: Eesa Hamza
Date: Mon, 17 Apr 2023 15:39:04 +0300
Subject: [PATCH 08/36] Move under chrome
---
autogpt/commands/web_selenium.py | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 9b638ba03efd..5400be7fad1d 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -67,12 +67,6 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
- # Add linux specific flags
- if platform == "linux" or platform == "linux2":
- options.add_argument("--no-sandbox")
- options.add_argument("--disable-dev-shm-usage")
- options.add_argument("--remote-debugging-port=9222")
-
if CFG.selenium_web_browser == "firefox":
driver = webdriver.Firefox(
@@ -83,6 +77,9 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
+ if platform == "linux" or platform == "linux2":
+ options.add_argument("--disable-dev-shm-usage")
+ options.add_argument("--remote-debugging-port=9222")
options.add_argument("--no-sandbox")
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
From 2c55ff0b3d93dc5d285ed2015c4ad9e9a188cc54 Mon Sep 17 00:00:00 2001
From: EH
Date: Mon, 17 Apr 2023 15:43:14 +0300
Subject: [PATCH 09/36] Update web_selenium.py
---
autogpt/commands/web_selenium.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 5400be7fad1d..c0b1b2492f82 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -67,7 +67,6 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
-
if CFG.selenium_web_browser == "firefox":
driver = webdriver.Firefox(
executable_path=GeckoDriverManager().install(), options=options
From 6700ac94fae8e517c9e445f4c4732e1b4d847e96 Mon Sep 17 00:00:00 2001
From: Hamid Zare <12127420+hamidzr@users.noreply.github.com>
Date: Mon, 17 Apr 2023 09:28:32 -0400
Subject: [PATCH 10/36] docs: update docs
fix a typo
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index dbb44f52a103..3f9d4c5782d4 100644
--- a/README.md
+++ b/README.md
@@ -325,7 +325,7 @@ export MEMORY_BACKEND="pinecone"
### Milvus Setup
-[Milvus](https://milvus.io/) is a open-source, high scalable vector database to storage huge amount of vector-based memory and provide fast relevant search.
+[Milvus](https://milvus.io/) is an open-source, highly scalable vector database to store huge amounts of vector-based memory and provide fast relevant search.
- setup milvus database, keep your pymilvus version and milvus version same to avoid compatible issues.
- setup by open source [Install Milvus](https://milvus.io/docs/install_standalone-operator.md)
From 1d49b87e48d0cfd40125a6b10f3599976ece4cc6 Mon Sep 17 00:00:00 2001
From: Acer
Date: Mon, 17 Apr 2023 18:34:11 +0430
Subject: [PATCH 11/36] added missing import
---
autogpt/agent/agent_manager.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py
index e4bfb12611d4..79375ea8d4fe 100644
--- a/autogpt/agent/agent_manager.py
+++ b/autogpt/agent/agent_manager.py
@@ -1,6 +1,6 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
-
+from typing import Union
from autogpt.llm_utils import create_chat_completion
from autogpt.config.config import Singleton
From 286edbbb8cb61e921e0315db8b506d6f7b1d6fce Mon Sep 17 00:00:00 2001
From: Manuel Otheo
Date: Mon, 17 Apr 2023 09:17:07 -0600
Subject: [PATCH 12/36] changed rstrip for strip and added case for empty
string
changed rstrip for strip and added case for empty string in agent.py
---
autogpt/agent/agent.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index dca614c7f239..89ea2c8c4cbd 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -115,9 +115,12 @@ def start_interaction_loop(self):
console_input = clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
- if console_input.lower().rstrip() == "y":
+ if console_input.lower().strip() == "y":
user_input = "GENERATE NEXT COMMAND JSON"
break
+ elif console_input.lower().strip() == "":
+ print("Invalid input format.")
+ break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
From 57ee84437ba8c3c52866b4b19b79864c3e1e22a2 Mon Sep 17 00:00:00 2001
From: Manuel Otheo
Date: Mon, 17 Apr 2023 09:20:52 -0600
Subject: [PATCH 13/36] changed break for continue
---
autogpt/agent/agent.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index 89ea2c8c4cbd..58c7840b354d 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -120,7 +120,7 @@ def start_interaction_loop(self):
break
elif console_input.lower().strip() == "":
print("Invalid input format.")
- break
+ continue
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
From 9c062b44aaf061eebf41d33a778cf2485b1787d3 Mon Sep 17 00:00:00 2001
From: Tmpecho <82368148+Tmpecho@users.noreply.github.com>
Date: Mon, 17 Apr 2023 20:46:47 +0200
Subject: [PATCH 14/36] Added return type hint to functions
---
autogpt/commands/execute_code.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index 70b33a975cd2..705223748e2e 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -8,7 +8,7 @@
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
-def execute_python_file(file: str):
+def execute_python_file(file: str) -> str:
"""Execute a Python file in a Docker container and return the output
Args:
@@ -114,7 +114,7 @@ def execute_shell(command_line: str) -> str:
return output
-def execute_shell_popen(command_line):
+def execute_shell_popen(command_line) -> str:
"""Execute a shell command with Popen and returns an english description
of the event and the process id
From 2f4ef3ba6a04eac96db5e46bf4741f5c4bd4af17 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 12:49:56 -0700
Subject: [PATCH 15/36] Update pre-commit hooks with isort, black, and local
pytest-check
---
.flake8 | 10 +++++-----
.pre-commit-config.yaml | 35 ++++++++++++++---------------------
2 files changed, 19 insertions(+), 26 deletions(-)
diff --git a/.flake8 b/.flake8
index c456b39323db..779762248e0a 100644
--- a/.flake8
+++ b/.flake8
@@ -1,12 +1,12 @@
[flake8]
max-line-length = 88
-extend-ignore = E203
+select = "E303, W293, W291, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
*.pyc,
.env
- venv/*
- .venv/*
- reports/*
- dist/*
\ No newline at end of file
+ venv*/*,
+ .venv/*,
+ reports/*,
+ dist/*,
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dd1d0ec92af9..3722b25eed0c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,39 +1,32 @@
repos:
- - repo: https://github.com/sourcery-ai/sourcery
- rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
- hooks:
- - id: sourcery
-
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v0.9.2
hooks:
- id: check-added-large-files
- args: [ '--maxkb=500' ]
+ args: ['--maxkb=500']
- id: check-byte-order-marker
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: debug-statements
-
- - repo: local
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
hooks:
- id: isort
- name: isort-local
- entry: isort
- language: python
- types: [ python ]
- exclude: .+/(dist|.venv|venv|build)/.+
- pass_filenames: true
+ language_version: python3.10
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
- id: black
- name: black-local
- entry: black
- language: python
- types: [ python ]
- exclude: .+/(dist|.venv|venv|build)/.+
- pass_filenames: true
+ language_version: python3.10
+
+ - repo: local
+ hooks:
- id: pytest-check
name: pytest-check
entry: pytest --cov=autogpt --without-integration --without-slow-integration
language: system
pass_filenames: false
- always_run: true
\ No newline at end of file
+ always_run: true
From 254cd697488114905a804cd13a842eb9c4e56744 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 12:50:21 -0700
Subject: [PATCH 16/36] Update CI workflow to use flake8, black, and isort
formatting checks
---
.github/workflows/ci.yml | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 39f3aea9594c..0a9a92877902 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -32,7 +32,15 @@ jobs:
- name: Lint with flake8
continue-on-error: false
- run: flake8 autogpt/ tests/ --select E303,W293,W291,W292,E305,E231,E302
+ run: flake8
+
+ - name: Check black formatting
+ continue-on-error: false
+ run: black . --check
+
+ - name: Check isort formatting
+ continue-on-error: false
+ run: isort . --check
- name: Run unittest tests with coverage
run: |
From 3134beb983748efb22229acfe2f61ec81df2c934 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 12:51:12 -0700
Subject: [PATCH 17/36] Configure isort settings in pyproject.toml and remove
tool.setuptools
---
pyproject.toml | 31 ++++++++++++++++++++++++++++---
1 file changed, 28 insertions(+), 3 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index f420fcac1e4e..91f6df38d839 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,7 +8,32 @@ readme = "README.md"
line-length = 88
target-version = ['py310']
include = '\.pyi?$'
-extend-exclude = ""
-
-[tool.setuptools]
packages = ["autogpt"]
+extend-exclude = '.+/(dist|.venv|venv|build)/.+'
+
+
+[tool.isort]
+profile = "black"
+multi_line_output = 3
+include_trailing_comma = true
+force_grid_wrap = 0
+use_parentheses = true
+ensure_newline_before_comments = true
+line_length = 88
+sections = [
+ "FUTURE",
+ "STDLIB",
+ "THIRDPARTY",
+ "FIRSTPARTY",
+ "LOCALFOLDER"
+]
+skip = '''
+ .tox
+ __pycache__
+ *.pyc
+ .env
+ venv*/*
+ .venv/*
+ reports/*
+ dist/*
+'''
From 9577468f0c5c104ef14514772df230407f342eac Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 12:51:30 -0700
Subject: [PATCH 18/36] remove isort
---
.isort.cfg | 10 ----------
1 file changed, 10 deletions(-)
delete mode 100644 .isort.cfg
diff --git a/.isort.cfg b/.isort.cfg
deleted file mode 100644
index 8ad53a862d50..000000000000
--- a/.isort.cfg
+++ /dev/null
@@ -1,10 +0,0 @@
-[settings]
-profile = black
-multi_line_output = 3
-include_trailing_comma = True
-force_grid_wrap = 0
-use_parentheses = True
-ensure_newline_before_comments = True
-line_length = 88
-skip = venv,env,node_modules,.env,.venv,dist
-sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
\ No newline at end of file
From cf9a94a8b673cb9d0ab0b28a4c59f5ec57823aee Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 13:41:42 -0700
Subject: [PATCH 19/36] isort implemented
---
autogpt/__main__.py | 3 +
autogpt/agent/agent.py | 4 +-
autogpt/agent/agent_manager.py | 4 +-
autogpt/app.py | 35 ++++++-----
autogpt/args.py | 26 +++++---
autogpt/commands/audio_text.py | 3 +-
autogpt/commands/execute_code.py | 2 +-
autogpt/commands/file_operations.py | 22 +++----
autogpt/commands/git_operations.py | 1 +
autogpt/commands/image_gen.py | 1 +
autogpt/commands/twitter.py | 3 +-
autogpt/commands/web_playwright.py | 1 +
autogpt/commands/web_requests.py | 6 +-
autogpt/commands/web_selenium.py | 24 ++++----
autogpt/commands/write_tests.py | 1 +
autogpt/config/__init__.py | 2 +-
autogpt/config/ai_config.py | 1 +
autogpt/config/config.py | 11 ++--
autogpt/json_fixes/auto_fix.py | 2 +-
autogpt/json_fixes/bracket_termination.py | 1 +
autogpt/json_fixes/master_json_fix_method.py | 13 ++--
autogpt/json_fixes/parsing.py | 2 +
autogpt/json_validation/validate_json.py | 6 +-
autogpt/llm_utils.py | 13 ++--
autogpt/logs.py | 46 +++++++-------
autogpt/memory/milvus.py | 8 +--
autogpt/memory/pinecone.py | 2 +-
autogpt/memory/redismem.py | 2 +-
autogpt/memory/weaviate.py | 64 +++++++++++---------
autogpt/processing/html.py | 2 +-
autogpt/processing/text.py | 6 +-
autogpt/prompt.py | 13 ++--
autogpt/setup.py | 1 +
autogpt/speech/brian.py | 1 +
autogpt/speech/eleven_labs.py | 2 +-
autogpt/speech/gtts.py | 3 +-
autogpt/speech/say.py | 9 ++-
data_ingestion.py | 2 +-
pyproject.toml | 1 +
scripts/check_requirements.py | 3 +-
tests.py | 1 +
tests/browse_tests.py | 2 +-
tests/integration/weaviate_memory_tests.py | 57 +++++++++--------
tests/test_token_counter.py | 1 +
tests/unit/test_chat.py | 2 +-
45 files changed, 232 insertions(+), 183 deletions(-)
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index 5f4622347d9a..64ed398e0324 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -1,12 +1,15 @@
"""Main script for the autogpt package."""
import logging
+
from colorama import Fore
+
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments
from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt.prompt import construct_prompt
+
# Load environment variables from .env file
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
index dca614c7f239..50e497f1f53d 100644
--- a/autogpt/agent/agent.py
+++ b/autogpt/agent/agent.py
@@ -1,6 +1,6 @@
from colorama import Fore, Style
-from autogpt.app import execute_command, get_command
+from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
@@ -84,7 +84,7 @@ def start_interaction_loop(self):
# Print Assistant thoughts
if assistant_reply_json != {}:
- validate_json(assistant_reply_json, 'llm_response_format_1')
+ validate_json(assistant_reply_json, "llm_response_format_1")
# Get command name and arguments
try:
print_assistant_thoughts(self.ai_name, assistant_reply_json)
diff --git a/autogpt/agent/agent_manager.py b/autogpt/agent/agent_manager.py
index 79375ea8d4fe..898767a485e5 100644
--- a/autogpt/agent/agent_manager.py
+++ b/autogpt/agent/agent_manager.py
@@ -1,8 +1,10 @@
"""Agent manager for managing GPT agents"""
from __future__ import annotations
+
from typing import Union
-from autogpt.llm_utils import create_chat_completion
+
from autogpt.config.config import Singleton
+from autogpt.llm_utils import create_chat_completion
class AgentManager(metaclass=Singleton):
diff --git a/autogpt/app.py b/autogpt/app.py
index 0927eccca1dd..381f5a2aa02e 100644
--- a/autogpt/app.py
+++ b/autogpt/app.py
@@ -1,15 +1,10 @@
""" Command and Control """
import json
-from typing import List, NoReturn, Union, Dict
+from typing import Dict, List, NoReturn, Union
+
from autogpt.agent.agent_manager import AgentManager
-from autogpt.commands.evaluate_code import evaluate_code
-from autogpt.commands.google_search import google_official_search, google_search
-from autogpt.commands.improve_code import improve_code
-from autogpt.commands.write_tests import write_tests
-from autogpt.config import Config
-from autogpt.commands.image_gen import generate_image
from autogpt.commands.audio_text import read_audio_from_file
-from autogpt.commands.web_requests import scrape_links, scrape_text
+from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.execute_code import (
execute_python_file,
execute_shell,
@@ -18,19 +13,24 @@
from autogpt.commands.file_operations import (
append_to_file,
delete_file,
+ download_file,
read_file,
search_files,
write_to_file,
- download_file
)
+from autogpt.commands.git_operations import clone_repository
+from autogpt.commands.google_search import google_official_search, google_search
+from autogpt.commands.image_gen import generate_image
+from autogpt.commands.improve_code import improve_code
+from autogpt.commands.twitter import send_tweet
+from autogpt.commands.web_requests import scrape_links, scrape_text
+from autogpt.commands.web_selenium import browse_website
+from autogpt.commands.write_tests import write_tests
+from autogpt.config import Config
from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.speech import say_text
-from autogpt.commands.web_selenium import browse_website
-from autogpt.commands.git_operations import clone_repository
-from autogpt.commands.twitter import send_tweet
-
CFG = Config()
AGENT_MANAGER = AgentManager()
@@ -133,11 +133,14 @@ def execute_command(command_name: str, arguments):
# google_result can be a list or a string depending on the search results
if isinstance(google_result, list):
- safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result]
+ safe_message = [
+ google_result_single.encode("utf-8", "ignore")
+ for google_result_single in google_result
+ ]
else:
- safe_message = google_result.encode('utf-8', 'ignore')
+ safe_message = google_result.encode("utf-8", "ignore")
- return safe_message.decode('utf-8')
+ return safe_message.decode("utf-8")
elif command_name == "memory_add":
memory = get_memory(CFG)
return memory.add(arguments["string"])
diff --git a/autogpt/args.py b/autogpt/args.py
index f0e9c07a362a..5ca4221ccd03 100644
--- a/autogpt/args.py
+++ b/autogpt/args.py
@@ -1,7 +1,8 @@
"""This module contains the argument parsing logic for the script."""
import argparse
-from colorama import Fore, Back, Style
+from colorama import Back, Fore, Style
+
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
@@ -64,10 +65,10 @@ def parse_arguments() -> None:
" skip the re-prompt.",
)
parser.add_argument(
- '--allow-downloads',
- action='store_true',
- dest='allow_downloads',
- help='Dangerous: Allows Auto-GPT to download files natively.'
+ "--allow-downloads",
+ action="store_true",
+ dest="allow_downloads",
+ help="Dangerous: Allows Auto-GPT to download files natively.",
)
args = parser.parse_args()
@@ -141,10 +142,17 @@ def parse_arguments() -> None:
if args.allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
- logger.typewriter_log("WARNING: ", Fore.YELLOW,
- f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
- "It is recommended that you monitor any files it downloads carefully.")
- logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
+ logger.typewriter_log(
+ "WARNING: ",
+ Fore.YELLOW,
+ f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
+ + "It is recommended that you monitor any files it downloads carefully.",
+ )
+ logger.typewriter_log(
+ "WARNING: ",
+ Fore.YELLOW,
+ f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
+ )
CFG.allow_downloads = True
if args.browser_name:
diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py
index 84819d5ed75a..cae32d4eb78c 100644
--- a/autogpt/commands/audio_text.py
+++ b/autogpt/commands/audio_text.py
@@ -1,6 +1,7 @@
-import requests
import json
+import requests
+
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index 705223748e2e..a524081e0f0f 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -5,7 +5,7 @@
import docker
from docker.errors import ImageNotFound
-from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
+from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
def execute_python_file(file: str) -> str:
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 9dcf819480c2..72b02b5d3199 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -5,14 +5,14 @@
import os.path
from pathlib import Path
from typing import Generator, List
+
import requests
-from requests.adapters import HTTPAdapter
-from requests.adapters import Retry
-from colorama import Fore, Back
+from colorama import Back, Fore
+from requests.adapters import HTTPAdapter, Retry
+
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
-from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
-
+from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
LOG_FILE = "file_logger.txt"
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
@@ -47,7 +47,7 @@ def log_operation(operation: str, filename: str) -> None:
with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
f.write("File Operation Logger ")
- append_to_file(LOG_FILE, log_entry, shouldLog = False)
+ append_to_file(LOG_FILE, log_entry, shouldLog=False)
def split_file(
@@ -241,23 +241,23 @@ def download_file(url, filename):
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
- session.mount('http://', adapter)
- session.mount('https://', adapter)
+ session.mount("http://", adapter)
+ session.mount("https://", adapter)
total_size = 0
downloaded_size = 0
with session.get(url, allow_redirects=True, stream=True) as r:
r.raise_for_status()
- total_size = int(r.headers.get('Content-Length', 0))
+ total_size = int(r.headers.get("Content-Length", 0))
downloaded_size = 0
- with open(safe_filename, 'wb') as f:
+ with open(safe_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
downloaded_size += len(chunk)
- # Update the progress message
+ # Update the progress message
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 05ce2a212919..028f3b8da44c 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,5 +1,6 @@
"""Git operations for autogpt"""
import git
+
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index 6243616ea8ca..4e8b47d687d1 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -7,6 +7,7 @@
import openai
import requests
from PIL import Image
+
from autogpt.config import Config
from autogpt.workspace import path_in_workspace
diff --git a/autogpt/commands/twitter.py b/autogpt/commands/twitter.py
index dc4d450c1fad..3eaed36e20e1 100644
--- a/autogpt/commands/twitter.py
+++ b/autogpt/commands/twitter.py
@@ -1,5 +1,6 @@
-import tweepy
import os
+
+import tweepy
from dotenv import load_dotenv
load_dotenv()
diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py
index a1abb6cb73d2..4e388ded203c 100644
--- a/autogpt/commands/web_playwright.py
+++ b/autogpt/commands/web_playwright.py
@@ -8,6 +8,7 @@
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup
+
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py
index 70ada90741d2..406338f46fc7 100644
--- a/autogpt/commands/web_requests.py
+++ b/autogpt/commands/web_requests.py
@@ -4,9 +4,9 @@
from urllib.parse import urljoin, urlparse
import requests
-from requests.compat import urljoin
-from requests import Response
from bs4 import BeautifulSoup
+from requests import Response
+from requests.compat import urljoin
from autogpt.config import Config
from autogpt.memory import get_memory
@@ -79,7 +79,7 @@ def check_local_file_access(url: str) -> bool:
"http://0000",
"http://0000/",
"https://0000",
- "https://0000/"
+ "https://0000/",
]
return any(url.startswith(prefix) for prefix in local_prefixes)
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index c0b1b2492f82..9db5d03502c8 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -1,23 +1,25 @@
"""Selenium web scraping module."""
from __future__ import annotations
-from selenium import webdriver
-from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
-import autogpt.processing.text as summary
+import logging
+from pathlib import Path
+from sys import platform
+
from bs4 import BeautifulSoup
-from selenium.webdriver.remote.webdriver import WebDriver
+from selenium import webdriver
+from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
-from selenium.webdriver.support.wait import WebDriverWait
+from selenium.webdriver.firefox.options import Options as FirefoxOptions
+from selenium.webdriver.remote.webdriver import WebDriver
+from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
-from selenium.webdriver.chrome.options import Options as ChromeOptions
-from selenium.webdriver.firefox.options import Options as FirefoxOptions
-from selenium.webdriver.safari.options import Options as SafariOptions
-import logging
-from pathlib import Path
+
+import autogpt.processing.text as summary
from autogpt.config import Config
-from sys import platform
+from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
diff --git a/autogpt/commands/write_tests.py b/autogpt/commands/write_tests.py
index 138a1adb6f83..35a086536c9d 100644
--- a/autogpt/commands/write_tests.py
+++ b/autogpt/commands/write_tests.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import json
+
from autogpt.llm_utils import call_ai_function
diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py
index ceb5566ce73b..726b6dcf3da9 100644
--- a/autogpt/config/__init__.py
+++ b/autogpt/config/__init__.py
@@ -2,7 +2,7 @@
This module contains the configuration classes for AutoGPT.
"""
from autogpt.config.ai_config import AIConfig
-from autogpt.config.config import check_openai_api_key, Config
+from autogpt.config.config import Config, check_openai_api_key
from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
index 86171357ba0b..d50c30beee9d 100644
--- a/autogpt/config/ai_config.py
+++ b/autogpt/config/ai_config.py
@@ -6,6 +6,7 @@
import os
from typing import Type
+
import yaml
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index 7d470cba9f1f..bc75b0319223 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -1,14 +1,13 @@
"""Configuration class to store the state of bools for different scripts access."""
import os
-from colorama import Fore
-
-from autogpt.config.singleton import Singleton
import openai
import yaml
-
+from colorama import Fore
from dotenv import load_dotenv
+from autogpt.config.singleton import Singleton
+
load_dotenv(verbose=True)
@@ -74,7 +73,9 @@ def __init__(self) -> None:
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
- self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
+ self.use_weaviate_embedded = (
+ os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
+ )
# milvus configuration, e.g., localhost:19530.
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
diff --git a/autogpt/json_fixes/auto_fix.py b/autogpt/json_fixes/auto_fix.py
index 0d3bd73ce1ac..7eb1e4bdb46d 100644
--- a/autogpt/json_fixes/auto_fix.py
+++ b/autogpt/json_fixes/auto_fix.py
@@ -1,9 +1,9 @@
"""This module contains the function to fix JSON strings using GPT-3."""
import json
+from autogpt.config import Config
from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
-from autogpt.config import Config
CFG = Config()
diff --git a/autogpt/json_fixes/bracket_termination.py b/autogpt/json_fixes/bracket_termination.py
index dd9a83764ebf..6c6c58eea61a 100644
--- a/autogpt/json_fixes/bracket_termination.py
+++ b/autogpt/json_fixes/bracket_termination.py
@@ -4,6 +4,7 @@
import contextlib
import json
from typing import Optional
+
from autogpt.config import Config
CFG = Config()
diff --git a/autogpt/json_fixes/master_json_fix_method.py b/autogpt/json_fixes/master_json_fix_method.py
index 7a2cf3cc81c3..a77bf670b839 100644
--- a/autogpt/json_fixes/master_json_fix_method.py
+++ b/autogpt/json_fixes/master_json_fix_method.py
@@ -3,13 +3,15 @@
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.speech import say_text
+
CFG = Config()
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
- from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
-
- from autogpt.json_fixes.parsing import fix_and_parse_json
+ from autogpt.json_fixes.parsing import (
+ attempt_to_fix_json_by_finding_outermost_brackets,
+ fix_and_parse_json,
+ )
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
@@ -21,7 +23,10 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
if assistant_reply_json != {}:
return assistant_reply_json
- logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
+ logger.error(
+ "Error: The following AI output couldn't be converted to a JSON:\n",
+ assistant_reply,
+ )
if CFG.speak_mode:
say_text("I have received an invalid JSON response from the OpenAI API.")
diff --git a/autogpt/json_fixes/parsing.py b/autogpt/json_fixes/parsing.py
index 1e391eed7c02..e02f78cd2a4d 100644
--- a/autogpt/json_fixes/parsing.py
+++ b/autogpt/json_fixes/parsing.py
@@ -4,8 +4,10 @@
import contextlib
import json
from typing import Any, Dict, Union
+
from colorama import Fore
from regex import regex
+
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
diff --git a/autogpt/json_validation/validate_json.py b/autogpt/json_validation/validate_json.py
index 440c3b0b9199..ea74ec95d8d1 100644
--- a/autogpt/json_validation/validate_json.py
+++ b/autogpt/json_validation/validate_json.py
@@ -1,5 +1,7 @@
import json
+
from jsonschema import Draft7Validator
+
from autogpt.config import Config
from autogpt.logs import logger
@@ -19,7 +21,9 @@ def validate_json(json_object: object, schema_name: object) -> object:
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
logger.error("The JSON object is invalid.")
if CFG.debug_mode:
- logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
+ logger.error(
+ json.dumps(json_object, indent=4)
+ ) # Replace 'json_object' with the variable containing the JSON data
logger.error("The following issues were found:")
for error in errors:
diff --git a/autogpt/llm_utils.py b/autogpt/llm_utils.py
index 791fd31a81ea..821820ffab07 100644
--- a/autogpt/llm_utils.py
+++ b/autogpt/llm_utils.py
@@ -1,11 +1,11 @@
from __future__ import annotations
-from ast import List
import time
+from ast import List
import openai
-from openai.error import APIError, RateLimitError
from colorama import Fore, Style
+from openai.error import APIError, RateLimitError
from autogpt.config import Config
from autogpt.logs import logger
@@ -105,8 +105,9 @@ def create_chat_completion(
)
if not warned_user:
logger.double_check(
- f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " +
- f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}")
+ f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
+ + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
+ )
warned_user = True
except APIError as e:
if e.http_status == 502:
@@ -125,8 +126,8 @@ def create_chat_completion(
logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
- "Auto-GPT has failed to get a response from OpenAI's services. " +
- f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
+ "Auto-GPT has failed to get a response from OpenAI's services. "
+ + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if CFG.debug_mode:
diff --git a/autogpt/logs.py b/autogpt/logs.py
index c1e436db97fc..df3487f2c1e2 100644
--- a/autogpt/logs.py
+++ b/autogpt/logs.py
@@ -5,13 +5,13 @@
import random
import re
import time
-from logging import LogRecord
import traceback
+from logging import LogRecord
from colorama import Fore, Style
-from autogpt.speech import say_text
from autogpt.config import Config, Singleton
+from autogpt.speech import say_text
CFG = Config()
@@ -47,7 +47,7 @@ def __init__(self):
# Info handler in activity.log
self.file_handler = logging.FileHandler(
- os.path.join(log_dir, log_file), 'a', 'utf-8'
+ os.path.join(log_dir, log_file), "a", "utf-8"
)
self.file_handler.setLevel(logging.DEBUG)
info_formatter = AutoGptFormatter(
@@ -57,7 +57,7 @@ def __init__(self):
# Error handler error.log
error_handler = logging.FileHandler(
- os.path.join(log_dir, error_file), 'a', 'utf-8'
+ os.path.join(log_dir, error_file), "a", "utf-8"
)
error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter(
@@ -79,7 +79,7 @@ def __init__(self):
self.logger.setLevel(logging.DEBUG)
def typewriter_log(
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
+ self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}")
@@ -95,18 +95,18 @@ def typewriter_log(
)
def debug(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.DEBUG)
def warn(
- self,
- message,
- title="",
- title_color="",
+ self,
+ message,
+ title="",
+ title_color="",
):
self._log(title, title_color, message, logging.WARN)
@@ -180,10 +180,10 @@ class AutoGptFormatter(logging.Formatter):
def format(self, record: LogRecord) -> str:
if hasattr(record, "color"):
record.title_color = (
- getattr(record, "color")
- + getattr(record, "title")
- + " "
- + Style.RESET_ALL
+ getattr(record, "color")
+ + getattr(record, "title")
+ + " "
+ + Style.RESET_ALL
)
else:
record.title_color = getattr(record, "title")
@@ -294,7 +294,9 @@ def print_assistant_thoughts(ai_name, assistant_reply):
logger.error("Error: \n", call_stack)
-def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
+def print_assistant_thoughts(
+ ai_name: object, assistant_reply_json_valid: object
+) -> None:
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
@@ -310,9 +312,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
- logger.typewriter_log(
- "REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
- )
+ logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
@@ -326,9 +326,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
- logger.typewriter_log(
- "CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
- )
+ logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
# Speak the assistant's thoughts
if CFG.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak)
diff --git a/autogpt/memory/milvus.py b/autogpt/memory/milvus.py
index 7a2571d0a3fd..44aa72b95622 100644
--- a/autogpt/memory/milvus.py
+++ b/autogpt/memory/milvus.py
@@ -1,11 +1,5 @@
""" Milvus memory storage provider."""
-from pymilvus import (
- connections,
- FieldSchema,
- CollectionSchema,
- DataType,
- Collection,
-)
+from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
diff --git a/autogpt/memory/pinecone.py b/autogpt/memory/pinecone.py
index d781073e3fc0..27fcd62482d0 100644
--- a/autogpt/memory/pinecone.py
+++ b/autogpt/memory/pinecone.py
@@ -1,9 +1,9 @@
import pinecone
from colorama import Fore, Style
+from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
-from autogpt.llm_utils import create_embedding_with_ada
class PineconeMemory(MemoryProviderSingleton):
diff --git a/autogpt/memory/redismem.py b/autogpt/memory/redismem.py
index 0e8dd71d9165..082a812c5362 100644
--- a/autogpt/memory/redismem.py
+++ b/autogpt/memory/redismem.py
@@ -10,9 +10,9 @@
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
+from autogpt.llm_utils import create_embedding_with_ada
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
-from autogpt.llm_utils import create_embedding_with_ada
SCHEMA = [
TextField("data"),
diff --git a/autogpt/memory/weaviate.py b/autogpt/memory/weaviate.py
index 35e7844a2a24..5408e9a97aa3 100644
--- a/autogpt/memory/weaviate.py
+++ b/autogpt/memory/weaviate.py
@@ -1,11 +1,13 @@
-from autogpt.config import Config
-from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
import uuid
+
import weaviate
from weaviate import Client
from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
+from autogpt.config import Config
+from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
+
def default_schema(weaviate_index):
return {
@@ -14,7 +16,7 @@ def default_schema(weaviate_index):
{
"name": "raw_text",
"dataType": ["text"],
- "description": "original text for the embedding"
+ "description": "original text for the embedding",
}
],
}
@@ -24,16 +26,20 @@ class WeaviateMemory(MemoryProviderSingleton):
def __init__(self, cfg):
auth_credentials = self._build_auth_credentials(cfg)
- url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}'
+ url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
if cfg.use_weaviate_embedded:
- self.client = Client(embedded_options=EmbeddedOptions(
- hostname=cfg.weaviate_host,
- port=int(cfg.weaviate_port),
- persistence_data_path=cfg.weaviate_embedded_path
- ))
+ self.client = Client(
+ embedded_options=EmbeddedOptions(
+ hostname=cfg.weaviate_host,
+ port=int(cfg.weaviate_port),
+ persistence_data_path=cfg.weaviate_embedded_path,
+ )
+ )
- print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}")
+ print(
+ f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
+ )
else:
self.client = Client(url, auth_client_secret=auth_credentials)
@@ -56,7 +62,9 @@ def _create_schema(self):
def _build_auth_credentials(self, cfg):
if cfg.weaviate_username and cfg.weaviate_password:
- return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password)
+ return weaviate.AuthClientPassword(
+ cfg.weaviate_username, cfg.weaviate_password
+ )
if cfg.weaviate_api_key:
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
else:
@@ -66,16 +74,14 @@ def add(self, data):
vector = get_ada_embedding(data)
doc_uuid = generate_uuid5(data, self.index)
- data_object = {
- 'raw_text': data
- }
+ data_object = {"raw_text": data}
with self.client.batch as batch:
batch.add_data_object(
uuid=doc_uuid,
data_object=data_object,
class_name=self.index,
- vector=vector
+ vector=vector,
)
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
@@ -91,29 +97,31 @@ def clear(self):
# after a call to delete_all
self._create_schema()
- return 'Obliterated'
+ return "Obliterated"
def get_relevant(self, data, num_relevant=5):
query_embedding = get_ada_embedding(data)
try:
- results = self.client.query.get(self.index, ['raw_text']) \
- .with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \
- .with_limit(num_relevant) \
- .do()
+ results = (
+ self.client.query.get(self.index, ["raw_text"])
+ .with_near_vector({"vector": query_embedding, "certainty": 0.7})
+ .with_limit(num_relevant)
+ .do()
+ )
- if len(results['data']['Get'][self.index]) > 0:
- return [str(item['raw_text']) for item in results['data']['Get'][self.index]]
+ if len(results["data"]["Get"][self.index]) > 0:
+ return [
+ str(item["raw_text"]) for item in results["data"]["Get"][self.index]
+ ]
else:
return []
except Exception as err:
- print(f'Unexpected error {err=}, {type(err)=}')
+ print(f"Unexpected error {err=}, {type(err)=}")
return []
def get_stats(self):
- result = self.client.query.aggregate(self.index) \
- .with_meta_count() \
- .do()
- class_data = result['data']['Aggregate'][self.index]
+ result = self.client.query.aggregate(self.index).with_meta_count().do()
+ class_data = result["data"]["Aggregate"][self.index]
- return class_data[0]['meta'] if class_data else {}
+ return class_data[0]["meta"] if class_data else {}
diff --git a/autogpt/processing/html.py b/autogpt/processing/html.py
index e1912b6ad42c..81387b12adab 100644
--- a/autogpt/processing/html.py
+++ b/autogpt/processing/html.py
@@ -1,8 +1,8 @@
"""HTML processing functions"""
from __future__ import annotations
-from requests.compat import urljoin
from bs4 import BeautifulSoup
+from requests.compat import urljoin
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py
index 657b0b0eb434..52add8140177 100644
--- a/autogpt/processing/text.py
+++ b/autogpt/processing/text.py
@@ -1,9 +1,11 @@
"""Text processing functions"""
-from typing import Generator, Optional, Dict
+from typing import Dict, Generator, Optional
+
from selenium.webdriver.remote.webdriver import WebDriver
-from autogpt.memory import get_memory
+
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
+from autogpt.memory import get_memory
CFG = Config()
MEMORY = get_memory(CFG)
diff --git a/autogpt/prompt.py b/autogpt/prompt.py
index 33098af035a4..a0456305a554 100644
--- a/autogpt/prompt.py
+++ b/autogpt/prompt.py
@@ -1,9 +1,10 @@
from colorama import Fore
+
+from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.logs import logger
from autogpt.promptgenerator import PromptGenerator
-from autogpt.config import Config
from autogpt.setup import prompt_user
from autogpt.utils import clean_input
@@ -92,11 +93,7 @@ def get_prompt() -> str:
# Only add the audio to text command if the model is specified
if cfg.huggingface_audio_to_text_model:
commands.append(
- (
- "Convert Audio to text",
- "read_audio_from_file",
- {"file": ""}
- ),
+ ("Convert Audio to text", "read_audio_from_file", {"file": ""}),
)
# Only add shell command to the prompt if the AI is allowed to execute it
@@ -112,7 +109,7 @@ def get_prompt() -> str:
(
"Execute Shell Command Popen, non-interactive commands only",
"execute_shell_popen",
- {"command_line": ""}
+ {"command_line": ""},
),
)
@@ -122,7 +119,7 @@ def get_prompt() -> str:
(
"Downloads a file from the internet, and stores it locally",
"download_file",
- {"url": "", "file": ""}
+ {"url": "", "file": ""},
),
)
diff --git a/autogpt/setup.py b/autogpt/setup.py
index 79661905f4e1..1c46771759c5 100644
--- a/autogpt/setup.py
+++ b/autogpt/setup.py
@@ -1,5 +1,6 @@
"""Set up the AI and its goals"""
from colorama import Fore, Style
+
from autogpt import utils
from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger
diff --git a/autogpt/speech/brian.py b/autogpt/speech/brian.py
index b9298f55aa7f..821fdf2f482a 100644
--- a/autogpt/speech/brian.py
+++ b/autogpt/speech/brian.py
@@ -1,5 +1,6 @@
""" Brian speech module for autogpt """
import os
+
import requests
from playsound import playsound
diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py
index 186ec6fc0211..ea84efd8ca94 100644
--- a/autogpt/speech/eleven_labs.py
+++ b/autogpt/speech/eleven_labs.py
@@ -1,8 +1,8 @@
"""ElevenLabs speech module"""
import os
-from playsound import playsound
import requests
+from playsound import playsound
from autogpt.config import Config
from autogpt.speech.base import VoiceBase
diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py
index 37497075e760..1c3e9cae0567 100644
--- a/autogpt/speech/gtts.py
+++ b/autogpt/speech/gtts.py
@@ -1,7 +1,8 @@
""" GTTS Voice. """
import os
-from playsound import playsound
+
import gtts
+from playsound import playsound
from autogpt.speech.base import VoiceBase
diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py
index 78b75b21fcf7..727983d12bf3 100644
--- a/autogpt/speech/say.py
+++ b/autogpt/speech/say.py
@@ -1,13 +1,12 @@
""" Text to speech module """
-from autogpt.config import Config
-
import threading
from threading import Semaphore
+
+from autogpt.config import Config
from autogpt.speech.brian import BrianSpeech
-from autogpt.speech.macos_tts import MacOSTTS
-from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.eleven_labs import ElevenLabsSpeech
-
+from autogpt.speech.gtts import GTTSVoice
+from autogpt.speech.macos_tts import MacOSTTS
CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice()
diff --git a/data_ingestion.py b/data_ingestion.py
index 01bafc2ad0d5..b89a33dafd15 100644
--- a/data_ingestion.py
+++ b/data_ingestion.py
@@ -1,8 +1,8 @@
import argparse
import logging
-from autogpt.config import Config
from autogpt.commands.file_operations import ingest_file, search_files
+from autogpt.config import Config
from autogpt.memory import get_memory
cfg = Config()
diff --git a/pyproject.toml b/pyproject.toml
index 91f6df38d839..fdb43d66e4c1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -36,4 +36,5 @@ skip = '''
.venv/*
reports/*
dist/*
+
'''
diff --git a/scripts/check_requirements.py b/scripts/check_requirements.py
index d1f23504131a..e4eab024a628 100644
--- a/scripts/check_requirements.py
+++ b/scripts/check_requirements.py
@@ -1,6 +1,7 @@
-import pkg_resources
import sys
+import pkg_resources
+
def main():
requirements_file = sys.argv[1]
diff --git a/tests.py b/tests.py
index 67ba1c8eb176..62f76da8ac49 100644
--- a/tests.py
+++ b/tests.py
@@ -1,4 +1,5 @@
import unittest
+
import coverage
if __name__ == "__main__":
diff --git a/tests/browse_tests.py b/tests/browse_tests.py
index 1ac523ecdcfe..f896e7dd751b 100644
--- a/tests/browse_tests.py
+++ b/tests/browse_tests.py
@@ -1,6 +1,6 @@
-import unittest
import os
import sys
+import unittest
from bs4 import BeautifulSoup
diff --git a/tests/integration/weaviate_memory_tests.py b/tests/integration/weaviate_memory_tests.py
index 4acea0ffda1e..015eab05484f 100644
--- a/tests/integration/weaviate_memory_tests.py
+++ b/tests/integration/weaviate_memory_tests.py
@@ -1,15 +1,15 @@
+import os
+import sys
import unittest
from unittest import mock
-import sys
-import os
+from uuid import uuid4
from weaviate import Client
from weaviate.util import get_valid_uuid
-from uuid import uuid4
from autogpt.config import Config
-from autogpt.memory.weaviate import WeaviateMemory
from autogpt.memory.base import get_ada_embedding
+from autogpt.memory.weaviate import WeaviateMemory
class TestWeaviateMemory(unittest.TestCase):
@@ -25,13 +25,17 @@ def setUpClass(cls):
if cls.cfg.use_weaviate_embedded:
from weaviate.embedded import EmbeddedOptions
- cls.client = Client(embedded_options=EmbeddedOptions(
- hostname=cls.cfg.weaviate_host,
- port=int(cls.cfg.weaviate_port),
- persistence_data_path=cls.cfg.weaviate_embedded_path
- ))
+ cls.client = Client(
+ embedded_options=EmbeddedOptions(
+ hostname=cls.cfg.weaviate_host,
+ port=int(cls.cfg.weaviate_port),
+ persistence_data_path=cls.cfg.weaviate_embedded_path,
+ )
+ )
else:
- cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}")
+ cls.client = Client(
+ f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
+ )
cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
@@ -44,6 +48,7 @@ def setUpClass(cls):
USE_WEAVIATE_EMBEDDED=True
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
"""
+
def setUp(self):
try:
self.client.schema.delete_class(self.index)
@@ -53,23 +58,23 @@ def setUp(self):
self.memory = WeaviateMemory(self.cfg)
def test_add(self):
- doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones'
+ doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
self.memory.add(doc)
- result = self.client.query.get(self.index, ['raw_text']).do()
- actual = result['data']['Get'][self.index]
+ result = self.client.query.get(self.index, ["raw_text"]).do()
+ actual = result["data"]["Get"][self.index]
self.assertEqual(len(actual), 1)
- self.assertEqual(actual[0]['raw_text'], doc)
+ self.assertEqual(actual[0]["raw_text"], doc)
def test_get(self):
- doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos'
+ doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos"
with self.client.batch as batch:
batch.add_data_object(
uuid=get_valid_uuid(uuid4()),
- data_object={'raw_text': doc},
+ data_object={"raw_text": doc},
class_name=self.index,
- vector=get_ada_embedding(doc)
+ vector=get_ada_embedding(doc),
)
batch.flush()
@@ -81,8 +86,8 @@ def test_get(self):
def test_get_stats(self):
docs = [
- 'You are now about to count the number of docs in this index',
- 'And then you about to find out if you can count correctly'
+ "You are now about to count the number of docs in this index",
+ "And then you about to find out if you can count correctly",
]
[self.memory.add(doc) for doc in docs]
@@ -90,23 +95,23 @@ def test_get_stats(self):
stats = self.memory.get_stats()
self.assertTrue(stats)
- self.assertTrue('count' in stats)
- self.assertEqual(stats['count'], 2)
+ self.assertTrue("count" in stats)
+ self.assertEqual(stats["count"], 2)
def test_clear(self):
docs = [
- 'Shame this is the last test for this class',
- 'Testing is fun when someone else is doing it'
+ "Shame this is the last test for this class",
+ "Testing is fun when someone else is doing it",
]
[self.memory.add(doc) for doc in docs]
- self.assertEqual(self.memory.get_stats()['count'], 2)
+ self.assertEqual(self.memory.get_stats()["count"], 2)
self.memory.clear()
- self.assertEqual(self.memory.get_stats()['count'], 0)
+ self.assertEqual(self.memory.get_stats()["count"], 0)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_token_counter.py b/tests/test_token_counter.py
index 81e68277fdcb..6d7ae016b2f8 100644
--- a/tests/test_token_counter.py
+++ b/tests/test_token_counter.py
@@ -1,4 +1,5 @@
import unittest
+
import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens
diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py
index 55a44492a004..774f4103762c 100644
--- a/tests/unit/test_chat.py
+++ b/tests/unit/test_chat.py
@@ -1,6 +1,6 @@
# Generated by CodiumAI
-import unittest
import time
+import unittest
from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context
From da65bc3f68b4ce3bce093ad36318da8f1d0c5953 Mon Sep 17 00:00:00 2001
From: Merwane Hamadi
Date: Mon, 17 Apr 2023 13:47:38 -0700
Subject: [PATCH 20/36] black
---
autogpt/memory/__init__.py | 8 +++--
autogpt/memory/no_memory.py | 2 +-
autogpt/spinner.py | 4 ++-
autogpt/utils.py | 2 +-
autogpt/workspace.py | 4 ++-
...ark_entrepeneur_gpt_with_difficult_user.py | 33 +++++++++++--------
6 files changed, 33 insertions(+), 20 deletions(-)
diff --git a/autogpt/memory/__init__.py b/autogpt/memory/__init__.py
index f5afb8c93d8a..3d18704c70df 100644
--- a/autogpt/memory/__init__.py
+++ b/autogpt/memory/__init__.py
@@ -60,8 +60,10 @@ def get_memory(cfg, init=False):
memory = RedisMemory(cfg)
elif cfg.memory_backend == "weaviate":
if not WeaviateMemory:
- print("Error: Weaviate is not installed. Please install weaviate-client to"
- " use Weaviate as a memory backend.")
+ print(
+ "Error: Weaviate is not installed. Please install weaviate-client to"
+ " use Weaviate as a memory backend."
+ )
else:
memory = WeaviateMemory(cfg)
elif cfg.memory_backend == "milvus":
@@ -93,5 +95,5 @@ def get_supported_memory_backends():
"PineconeMemory",
"NoMemory",
"MilvusMemory",
- "WeaviateMemory"
+ "WeaviateMemory",
]
diff --git a/autogpt/memory/no_memory.py b/autogpt/memory/no_memory.py
index 4035a657f0e6..0371e96ae89f 100644
--- a/autogpt/memory/no_memory.py
+++ b/autogpt/memory/no_memory.py
@@ -53,7 +53,7 @@ def clear(self) -> str:
"""
return ""
- def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None:
+ def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
"""
Returns all the data in the memory that is relevant to the given data.
NoMemory always returns None.
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
index febcea8eb110..4e33d7421388 100644
--- a/autogpt/spinner.py
+++ b/autogpt/spinner.py
@@ -58,6 +58,8 @@ def update_message(self, new_message, delay=0.1):
delay: Delay in seconds before updating the message
"""
time.sleep(delay)
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") # Clear the current message
+ sys.stdout.write(
+ f"\r{' ' * (len(self.message) + 2)}\r"
+ ) # Clear the current message
sys.stdout.flush()
self.message = new_message
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 11d98d1b7429..db7d33213691 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -32,7 +32,7 @@ def readable_file_size(size, decimal_places=2):
size: Size in bytes
decimal_places (int): Number of decimal places to display
"""
- for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
+ for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
break
size /= 1024.0
diff --git a/autogpt/workspace.py b/autogpt/workspace.py
index 2706b3b2db48..964a94d14d8d 100644
--- a/autogpt/workspace.py
+++ b/autogpt/workspace.py
@@ -36,6 +36,8 @@ def safe_path_join(base: Path, *paths: str | Path) -> Path:
joined_path = base.joinpath(*paths).resolve()
if not joined_path.is_relative_to(base):
- raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.")
+ raise ValueError(
+ f"Attempted to access path '{joined_path}' outside of working directory '{base}'."
+ )
return joined_path
diff --git a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
index f7f1dac9dd31..9a5025d37a1e 100644
--- a/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
+++ b/benchmark/benchmark_entrepeneur_gpt_with_difficult_user.py
@@ -9,12 +9,12 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
# Read the current ai_settings.yaml file and store its content.
ai_settings = None
- if os.path.exists('ai_settings.yaml'):
- with open('ai_settings.yaml', 'r') as f:
+ if os.path.exists("ai_settings.yaml"):
+ with open("ai_settings.yaml", "r") as f:
ai_settings = f.read()
- os.remove('ai_settings.yaml')
+ os.remove("ai_settings.yaml")
- input_data = '''Entrepreneur-GPT
+ input_data = """Entrepreneur-GPT
an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth.
Increase net worth.
Develop and manage multiple businesses autonomously.
@@ -72,27 +72,34 @@ def benchmark_entrepeneur_gpt_with_difficult_user():
Disappointing suggestion.
Not helpful.
Needs improvement.
-Not what I need.'''
+Not what I need."""
# TODO: add questions above, to distract it even more.
- command = f'{sys.executable} -m autogpt'
+ command = f"{sys.executable} -m autogpt"
- process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- shell=True)
+ process = subprocess.Popen(
+ command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ )
stdout_output, stderr_output = process.communicate(input_data.encode())
# Decode the output and print it
- stdout_output = stdout_output.decode('utf-8')
- stderr_output = stderr_output.decode('utf-8')
+ stdout_output = stdout_output.decode("utf-8")
+ stderr_output = stderr_output.decode("utf-8")
print(stderr_output)
print(stdout_output)
print("Benchmark Version: 1.0.0")
print("JSON ERROR COUNT:")
- count_errors = stdout_output.count("Error: The following AI output couldn't be converted to a JSON:")
- print(f'{count_errors}/50 Human feedbacks')
+ count_errors = stdout_output.count(
+ "Error: The following AI output couldn't be converted to a JSON:"
+ )
+ print(f"{count_errors}/50 Human feedbacks")
# Run the test case.
-if __name__ == '__main__':
+if __name__ == "__main__":
benchmark_entrepeneur_gpt_with_difficult_user()
From a88113de33c8764c015e800aa09b29acbfd10f42 Mon Sep 17 00:00:00 2001
From: Eugene Zolenko
Date: Mon, 17 Apr 2023 23:02:07 -0600
Subject: [PATCH 21/36] Fix for execute_shell_popen using WORKING_DIRECTORY
Looks like things got changed to WORKSPACE_PATH recently?
---
autogpt/commands/execute_code.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index a524081e0f0f..95ba612264fc 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -125,10 +125,9 @@ def execute_shell_popen(command_line) -> str:
str: Description of the fact that the process started and its id
"""
current_dir = os.getcwd()
-
- if WORKING_DIRECTORY not in current_dir: # Change dir into workspace if necessary
- work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY)
- os.chdir(work_dir)
+ # Change dir into workspace if necessary
+ if str(WORKSPACE_PATH) not in current_dir:
+ os.chdir(WORKSPACE_PATH)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
From 0664b737abe1f1017e1b8c3b475c51220b09437c Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Tue, 18 Apr 2023 18:11:56 +1200
Subject: [PATCH 22/36] Updates sponsors
---
README.md | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/README.md b/README.md
index 8e5cfe7b299d..dd49f0354c12 100644
--- a/README.md
+++ b/README.md
@@ -31,18 +31,14 @@ Your support is greatly appreciated
Development of this free, open-source project is made possible by all the contributors and sponsors. If you'd like to sponsor this project and have your avatar or company logo appear below click here.
-
-Enterprise Sponsors
-Individual Sponsors
-
-
+
## 🚀 Features
From 525073bb940b69a6f7dd1adf8f8da0479f5e8730 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Tue, 18 Apr 2023 18:46:50 +1200
Subject: [PATCH 23/36] Change on PR to all branches
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0a9a92877902..bb5665ea016d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -6,7 +6,7 @@ on:
- master
pull_request:
branches:
- - master
+ - '**'
jobs:
build:
From 7ac296081ce3c414b761cda60c5e0e7533eb5229 Mon Sep 17 00:00:00 2001
From: Toran Bruce Richards
Date: Tue, 18 Apr 2023 19:11:09 +1200
Subject: [PATCH 24/36] Add pull_request_target to CI trigger
---
.github/workflows/ci.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index bb5665ea016d..2eb34b9d61a0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,6 +7,9 @@ on:
pull_request:
branches:
- '**'
+ pull_request_target:
+ branches:
+ - '**'
jobs:
build:
From b5378174f3c0a6d934247b6fd812e9b7b2b610a2 Mon Sep 17 00:00:00 2001
From: 0xArty
Date: Tue, 18 Apr 2023 13:19:17 +0100
Subject: [PATCH 25/36] Switched to using click
---
README.md | 14 +--
autogpt/__main__.py | 85 ++++++++++++++++--
autogpt/{args.py => configurator.py} | 123 ++++++++++-----------------
requirements.txt | 1 +
4 files changed, 135 insertions(+), 88 deletions(-)
rename autogpt/{args.py => configurator.py} (52%)
diff --git a/README.md b/README.md
index dd49f0354c12..4969e5edd45f 100644
--- a/README.md
+++ b/README.md
@@ -132,11 +132,15 @@ _To execute the following commands, open a CMD, Bash, or Powershell window by na
## 🔧 Usage
-1. Run `autogpt` Python module in your terminal
-
- ```
- python -m autogpt
- ```
+1. Run `autogpt` Python module in your terminal.
+ On linux or mac:
+ ```bash
+ # On Linux of Mac:
+ ./run.sh start
+ # On Windows:
+ ./run.bat start
+ ```
+ Running with `--help` after `start` lists all the possible command line arguments you can pass.
2. After each action, choose from options to authorize command(s),
exit the program, or provide feedback to the AI.
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
index 64ed398e0324..0d0ecb37af5b 100644
--- a/autogpt/__main__.py
+++ b/autogpt/__main__.py
@@ -1,24 +1,95 @@
"""Main script for the autogpt package."""
import logging
+import click
from colorama import Fore
from autogpt.agent.agent import Agent
-from autogpt.args import parse_arguments
from autogpt.config import Config, check_openai_api_key
+from autogpt.configurator import create_config
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt.prompt import construct_prompt
-# Load environment variables from .env file
-
+@click.group()
def main() -> None:
- """Main function for the script"""
+ """
+ Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
+ """
+ pass
+
+
+@main.command()
+@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
+@click.option(
+ "--skip-reprompt",
+ "-y",
+ is_flag=True,
+ help="Skips the re-prompting messages at the beginning of the script",
+)
+@click.option(
+ "--ai-settings",
+ "-C",
+ help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
+)
+@click.option(
+ "-l",
+ "--continuous-limit",
+ type=int,
+ help="Defines the number of times to run in continuous mode",
+)
+@click.option("--speak", is_flag=True, help="Enable Speak Mode")
+@click.option("--debug", is_flag=True, help="Enable Debug Mode")
+@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
+@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
+@click.option(
+ "--use-memory",
+ "-m",
+ "memory_type",
+ type=str,
+ help="Defines which Memory backend to use",
+)
+@click.option(
+ "-b",
+ "--browser-name",
+ help="Specifies which web-browser to use when using selenium to scrape the web.",
+)
+@click.option(
+ "--allow-downloads",
+ is_flag=True,
+ help="Dangerous: Allows Auto-GPT to download files natively.",
+)
+def start(
+ continuous: bool,
+ continuous_limit: int,
+ ai_settings: str,
+ skip_reprompt: bool,
+ speak: bool,
+ debug: bool,
+ gpt3only: bool,
+ gpt4only: bool,
+ memory_type: str,
+ browser_name: str,
+ allow_downloads: bool,
+) -> None:
+ """Start an Auto-GPT assistant"""
cfg = Config()
# TODO: fill in llm values here
check_openai_api_key()
- parse_arguments()
+ create_config(
+ continuous,
+ continuous_limit,
+ ai_settings,
+ skip_reprompt,
+ speak,
+ debug,
+ gpt3only,
+ gpt4only,
+ memory_type,
+ browser_name,
+ allow_downloads,
+ )
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
system_prompt = construct_prompt()
@@ -35,9 +106,9 @@ def main() -> None:
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
logger.typewriter_log(
- f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
+ "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
- logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
+ logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
agent = Agent(
ai_name=ai_name,
memory=memory,
diff --git a/autogpt/args.py b/autogpt/configurator.py
similarity index 52%
rename from autogpt/args.py
rename to autogpt/configurator.py
index 5ca4221ccd03..247cdac9161e 100644
--- a/autogpt/args.py
+++ b/autogpt/configurator.py
@@ -1,6 +1,5 @@
-"""This module contains the argument parsing logic for the script."""
-import argparse
-
+"""Configurator module."""
+import click
from colorama import Back, Fore, Style
from autogpt import utils
@@ -11,72 +10,44 @@
CFG = Config()
-def parse_arguments() -> None:
- """Parses the arguments passed to the script
+def create_config(
+ continuous: bool,
+ continuous_limit: int,
+ ai_settings_file: str,
+ skip_reprompt: bool,
+ speak: bool,
+ debug: bool,
+ gpt3only: bool,
+ gpt4only: bool,
+ memory_type: str,
+ browser_name: str,
+ allow_downloads: bool,
+) -> None:
+ """Updates the config object with the given arguments.
+
+ Args:
+ continuous (bool): Whether to run in continuous mode
+ continuous_limit (int): The number of times to run in continuous mode
+ ai_settings_file (str): The path to the ai_settings.yaml file
+ skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
+ speak (bool): Whether to enable speak mode
+ debug (bool): Whether to enable debug mode
+ gpt3only (bool): Whether to enable GPT3.5 only mode
+ gpt4only (bool): Whether to enable GPT4 only mode
+ memory_type (str): The type of memory backend to use
+ browser_name (str): The name of the browser to use when using selenium to scrape the web
+ allow_downloads (bool): Whether to allow Auto-GPT to download files natively
- Returns:
- None
"""
CFG.set_debug_mode(False)
CFG.set_continuous_mode(False)
CFG.set_speak_mode(False)
- parser = argparse.ArgumentParser(description="Process arguments.")
- parser.add_argument(
- "--continuous", "-c", action="store_true", help="Enable Continuous Mode"
- )
- parser.add_argument(
- "--continuous-limit",
- "-l",
- type=int,
- dest="continuous_limit",
- help="Defines the number of times to run in continuous mode",
- )
- parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
- parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
- parser.add_argument(
- "--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
- )
- parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
- parser.add_argument(
- "--use-memory",
- "-m",
- dest="memory_type",
- help="Defines which Memory backend to use",
- )
- parser.add_argument(
- "--skip-reprompt",
- "-y",
- dest="skip_reprompt",
- action="store_true",
- help="Skips the re-prompting messages at the beginning of the script",
- )
- parser.add_argument(
- "--use-browser",
- "-b",
- dest="browser_name",
- help="Specifies which web-browser to use when using selenium to scrape the web.",
- )
- parser.add_argument(
- "--ai-settings",
- "-C",
- dest="ai_settings_file",
- help="Specifies which ai_settings.yaml file to use, will also automatically"
- " skip the re-prompt.",
- )
- parser.add_argument(
- "--allow-downloads",
- action="store_true",
- dest="allow_downloads",
- help="Dangerous: Allows Auto-GPT to download files natively.",
- )
- args = parser.parse_args()
-
- if args.debug:
+ if debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
CFG.set_debug_mode(True)
- if args.continuous:
+ if continuous:
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
logger.typewriter_log(
"WARNING: ",
@@ -87,31 +58,31 @@ def parse_arguments() -> None:
)
CFG.set_continuous_mode(True)
- if args.continuous_limit:
+ if continuous_limit:
logger.typewriter_log(
- "Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
+ "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
)
- CFG.set_continuous_limit(args.continuous_limit)
+ CFG.set_continuous_limit(continuous_limit)
# Check if continuous limit is used without continuous mode
- if args.continuous_limit and not args.continuous:
- parser.error("--continuous-limit can only be used with --continuous")
+ if continuous_limit and not continuous:
+ raise click.UsageError("--continuous-limit can only be used with --continuous")
- if args.speak:
+ if speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
CFG.set_speak_mode(True)
- if args.gpt3only:
+ if gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
CFG.set_smart_llm_model(CFG.fast_llm_model)
- if args.gpt4only:
+ if gpt4only:
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
CFG.set_fast_llm_model(CFG.smart_llm_model)
- if args.memory_type:
+ if memory_type:
supported_memory = get_supported_memory_backends()
- chosen = args.memory_type
+ chosen = memory_type
if chosen not in supported_memory:
logger.typewriter_log(
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
@@ -122,12 +93,12 @@ def parse_arguments() -> None:
else:
CFG.memory_backend = chosen
- if args.skip_reprompt:
+ if skip_reprompt:
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
CFG.skip_reprompt = True
- if args.ai_settings_file:
- file = args.ai_settings_file
+ if ai_settings_file:
+ file = ai_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
@@ -140,7 +111,7 @@ def parse_arguments() -> None:
CFG.ai_settings_file = file
CFG.skip_reprompt = True
- if args.allow_downloads:
+ if allow_downloads:
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
logger.typewriter_log(
"WARNING: ",
@@ -155,5 +126,5 @@ def parse_arguments() -> None:
)
CFG.allow_downloads = True
- if args.browser_name:
- CFG.selenium_web_browser = args.browser_name
+ if browser_name:
+ CFG.selenium_web_browser = browser_name
diff --git a/requirements.txt b/requirements.txt
index 3f1eee5b7da3..b4245323e9ce 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,6 +19,7 @@ selenium
webdriver-manager
jsonschema
tweepy
+click
##Dev
coverage
From fbdf9d4bd434b3fbd1fa377c82e47f4e9e3afcd7 Mon Sep 17 00:00:00 2001
From: EH
Date: Tue, 18 Apr 2023 13:21:57 +0100
Subject: [PATCH 26/36] docs: add warning for non-essential contributions
(#2359)
---
.github/PULL_REQUEST_TEMPLATE.md | 7 +++++++
CONTRIBUTING.md | 6 ++++++
2 files changed, 13 insertions(+)
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index cf7ffbf320f9..a4f28a3d27d6 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,3 +1,10 @@
+
+