diff --git a/gpt_engineer/applications/cli/cli_agent.py b/gpt_engineer/applications/cli/cli_agent.py index 31673412de..a81b82b1bb 100644 --- a/gpt_engineer/applications/cli/cli_agent.py +++ b/gpt_engineer/applications/cli/cli_agent.py @@ -1,12 +1,17 @@ from gpt_engineer.core.code import Code from gpt_engineer.core.base_version_manager import BaseVersionManager from gpt_engineer.core.ai import AI -from gpt_engineer.core.default.steps import gen_code, gen_entrypoint, execute_entrypoint +from gpt_engineer.core.default.steps import ( + gen_code, + gen_entrypoint, + execute_entrypoint, + improve, +) from gpt_engineer.core.base_repository import BaseRepository from gpt_engineer.core.default.on_disk_repository import OnDiskRepository from gpt_engineer.core.base_execution_env import BaseExecutionEnv from gpt_engineer.core.default.on_disk_execution_env import OnDiskExecutionEnv -from gpt_engineer.core.default.paths import memory_path +from gpt_engineer.core.default.paths import memory_path, ENTRYPOINT_FILE from gpt_engineer.core.base_agent import BaseAgent from gpt_engineer.applications.cli.learning import human_review @@ -78,5 +83,11 @@ def init(self, prompt: str) -> Code: human_review(self.memory) return code - def improve(self, prompt: str, code) -> Code: - pass + def improve(self, prompt: str, code: Code) -> Code: + code = improve(self.ai, prompt, code) + if not ENTRYPOINT_FILE in code: + entrypoint = gen_entrypoint(self.ai, code, self.memory) + code = Code(code | entrypoint) + execute_entrypoint(self.execution_env, code) + human_review(self.memory) + return code diff --git a/gpt_engineer/applications/cli/learning.py b/gpt_engineer/applications/cli/learning.py index 5d25ac8571..e61b5c372e 100644 --- a/gpt_engineer/applications/cli/learning.py +++ b/gpt_engineer/applications/cli/learning.py @@ -56,6 +56,7 @@ OnDiskRepository, FileRepositories, ) +from gpt_engineer.core.base_repository import BaseRepository from gpt_engineer.core.domain import Step @@ -282,7 +283,7 @@ def get_session() -> str: return "ephemeral_" + str(random.randint(0, 2**32)) -def human_review(memory: OnDiskRepository): +def human_review(memory: BaseRepository): """ Collects human feedback on the code and stores it in memory. diff --git a/gpt_engineer/core/ai.py b/gpt_engineer/core/ai.py index f4cb535ee2..e001dcd1e9 100644 --- a/gpt_engineer/core/ai.py +++ b/gpt_engineer/core/ai.py @@ -89,7 +89,13 @@ class AI: """ - def __init__(self, model_name="gpt-4", temperature=0.1, azure_endpoint=""): + def __init__( + self, + model_name="gpt-4-1106-preview", + temperature=0.1, + azure_endpoint="", + streaming=True, + ): """ Initialize the AI class. @@ -103,7 +109,7 @@ def __init__(self, model_name="gpt-4", temperature=0.1, azure_endpoint=""): self.temperature = temperature self.azure_endpoint = azure_endpoint self.model_name = self._check_model_access_and_fallback(model_name) - + self.streaming = streaming self.llm = self._create_chat_model() self.token_usage_log = TokenUsageLog(model_name) @@ -306,13 +312,13 @@ def _create_chat_model(self) -> BaseChatModel: openai_api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"), deployment_name=self.model_name, openai_api_type="azure", - streaming=True, + streaming=self.streaming, ) return ChatOpenAI( model=self.model_name, temperature=self.temperature, - streaming=True, + streaming=self.streaming, client=openai.ChatCompletion, ) diff --git a/gpt_engineer/core/base_agent.py b/gpt_engineer/core/base_agent.py index 2c637e925a..eec77f60da 100644 --- a/gpt_engineer/core/base_agent.py +++ b/gpt_engineer/core/base_agent.py @@ -5,46 +5,6 @@ class BaseAgent(ABC): - """ - The `Agent` class is responsible for managing the lifecycle of code generation and improvement. - - Attributes: - path (str): The file path where the `Agent` will operate, used for version management and - file operations. - version_manager (BaseVersionManager): An object that adheres to the VersionManagerInterface, - responsible for version control of the generated code. Defaults to `VersionManager` - if not provided. PROBABLY GIT SHOULD BE USED IN THE DEFAULT - step_bundle (StepBundleInterface): Workflows of code generation steps that define the behavior of gen_code and - improve. - ai (AI): Manages calls to the LLM. - - Methods: - __init__(self, path: str, version_manager: VersionManagerInterface = None, - step_bundle: StepBundleInterface = None, ai: AI = None): - Initializes a new instance of the Agent class with the provided path, version manager, - step bundle, and AI. It falls back to default instances if specific components are not provided. - - init(self, prompt: str) -> Code: - Generates a new piece of code using the AI and step bundle based on the provided prompt. - It also snapshots the generated code using the version manager. - - Parameters: - prompt (str): A string prompt that guides the code generation process. - - Returns: - Code: An instance of the `Code` class containing the generated code. - - improve(self, prompt: str) -> Code: - Improves an existing piece of code using the AI and step bundle based on the provided prompt. - It also snapshots the improved code using the version manager. - - Parameters: - prompt (str): A string prompt that guides the code improvement process. - - Returns: - Code: An instance of the `Code` class containing the improved code. - """ - @abstractmethod def init(self, prompt: str) -> Code: pass diff --git a/gpt_engineer/core/chat_to_files.py b/gpt_engineer/core/chat_to_files.py index 9d343425c5..fd1aae3587 100644 --- a/gpt_engineer/core/chat_to_files.py +++ b/gpt_engineer/core/chat_to_files.py @@ -26,7 +26,6 @@ - apply_edits: Applies file edits to a workspace. """ -import os import re import logging @@ -89,10 +88,7 @@ def parse_chat(chat) -> List[Tuple[str, str]]: return files - - - -def overwrite_files_with_edits(chat: str, code: Code): +def overwrite_code_with_edits(chat: str, code: Code): edits = parse_edits(chat) apply_edits(edits, code) diff --git a/gpt_engineer/core/code.py b/gpt_engineer/core/code.py index 625f45a4a2..b921927296 100644 --- a/gpt_engineer/core/code.py +++ b/gpt_engineer/core/code.py @@ -7,10 +7,10 @@ class Code(dict): def __setitem__(self, key, value): if not isinstance(key, str | Path): - raise TypeError("Keys must be strings") + raise TypeError("Keys must be strings or Path's") if not isinstance(value, str): raise TypeError("Values must be strings") - super()[key] = value + super().__setitem__(key, value) def to_chat(self): def format_file_to_input(file_name: str, file_content: str) -> str: @@ -30,10 +30,16 @@ def format_file_to_input(file_name: str, file_content: str) -> str: The formatted file string. """ file_str = f""" - {file_name} - ``` - {file_content} +{file_name} +``` +{file_content} ``` """ return file_str - return "\n".join([format_file_to_input(file_name, file_content) + "\n" for file_name, file_content in self.items()]) + + return "\n".join( + [ + format_file_to_input(file_name, file_content) + "\n" + for file_name, file_content in self.items() + ] + ) diff --git a/gpt_engineer/core/default/lean_agent.py b/gpt_engineer/core/default/lean_agent.py index adef734383..98d6775fb4 100644 --- a/gpt_engineer/core/default/lean_agent.py +++ b/gpt_engineer/core/default/lean_agent.py @@ -1,56 +1,19 @@ from gpt_engineer.core.code import Code -from gpt_engineer.core.base_version_manager import BaseVersionManager from gpt_engineer.core.ai import AI -from gpt_engineer.core.default.steps import gen_code, gen_entrypoint, execute_entrypoint +from gpt_engineer.core.default.steps import ( + gen_code, + gen_entrypoint, + improve, +) from gpt_engineer.core.base_repository import BaseRepository from gpt_engineer.core.default.on_disk_repository import OnDiskRepository from gpt_engineer.core.base_execution_env import BaseExecutionEnv from gpt_engineer.core.default.on_disk_execution_env import OnDiskExecutionEnv -from gpt_engineer.core.default.paths import memory_path +from gpt_engineer.core.default.paths import memory_path, ENTRYPOINT_FILE from gpt_engineer.core.base_agent import BaseAgent -class Agent(BaseAgent): - """ - The `Agent` class is responsible for managing the lifecycle of code generation and improvement. - - Attributes: - path (str): The file path where the `Agent` will operate, used for version management and - file operations. - version_manager (BaseVersionManager): An object that adheres to the VersionManagerInterface, - responsible for version control of the generated code. Defaults to `VersionManager` - if not provided. PROBABLY GIT SHOULD BE USED IN THE DEFAULT - step_bundle (StepBundleInterface): Workflows of code generation steps that define the behavior of gen_code and - improve. - ai (AI): Manages calls to the LLM. - - Methods: - __init__(self, path: str, version_manager: VersionManagerInterface = None, - step_bundle: StepBundleInterface = None, ai: AI = None): - Initializes a new instance of the Agent class with the provided path, version manager, - step bundle, and AI. It falls back to default instances if specific components are not provided. - - init(self, prompt: str) -> Code: - Generates a new piece of code using the AI and step bundle based on the provided prompt. - It also snapshots the generated code using the version manager. - - Parameters: - prompt (str): A string prompt that guides the code generation process. - - Returns: - Code: An instance of the `Code` class containing the generated code. - - improve(self, prompt: str) -> Code: - Improves an existing piece of code using the AI and step bundle based on the provided prompt. - It also snapshots the improved code using the version manager. - - Parameters: - prompt (str): A string prompt that guides the code improvement process. - - Returns: - Code: An instance of the `Code` class containing the improved code. - """ - +class LeanAgent(BaseAgent): def __init__( self, memory: BaseRepository, @@ -73,8 +36,13 @@ def init(self, prompt: str) -> Code: code = gen_code(self.ai, prompt, self.memory) entrypoint = gen_entrypoint(self.ai, code, self.memory) code = Code(code | entrypoint) - execute_entrypoint(self.execution_env, code) + self.execution_env.execute_program(code) return code def improve(self, prompt: str, code: Code) -> Code: - pass + code = improve(self.ai, prompt, code) + if not ENTRYPOINT_FILE in code: + entrypoint = gen_entrypoint(self.ai, code, self.memory) + code = Code(code | entrypoint) + self.execution_env.execute_program(code) + return code diff --git a/gpt_engineer/core/default/steps.py b/gpt_engineer/core/default/steps.py index 9920b13623..f9d870da88 100644 --- a/gpt_engineer/core/default/steps.py +++ b/gpt_engineer/core/default/steps.py @@ -1,6 +1,6 @@ from gpt_engineer.core.code import Code from gpt_engineer.core.ai import AI -from gpt_engineer.core.chat_to_files import parse_chat, overwrite_files_with_edits#, format_file_to_input +from gpt_engineer.core.chat_to_files import parse_chat, overwrite_code_with_edits from gpt_engineer.core.default.paths import ( ENTRYPOINT_FILE, CODE_GEN_LOG_FILE, @@ -35,7 +35,7 @@ def curr_fn() -> str: return inspect.stack()[1].function -def setup_sys_prompt(db: OnDiskRepository) -> str: +def setup_sys_prompt(preprompts: OnDiskRepository) -> str: """ Constructs a system prompt for the AI based on predefined instructions and philosophies. @@ -45,16 +45,16 @@ def setup_sys_prompt(db: OnDiskRepository) -> str: "philosophy" taken from the given DBs object. Parameters: - - dbs (DBs): The database object containing pre-defined prompts and instructions. + - preprompts (DBs): The database object containing pre-defined prompts and instructions. Returns: - str: The constructed system prompt for the AI. """ return ( - db["roadmap"] - + db["generate"].replace("FILE_FORMAT", db["file_format"]) + preprompts["roadmap"] + + preprompts["generate"].replace("FILE_FORMAT", preprompts["file_format"]) + "\nUseful to know:\n" - + db["philosophy"] + + preprompts["philosophy"] ) @@ -69,7 +69,7 @@ def gen_code(ai: AI, prompt: str, memory: BaseRepository) -> Code: Parameters: - ai (AI): An instance of the AI model. - - dbs (DBs): An instance containing the database configurations, including system and + - preprompts (DBs): An instance containing the database configurations, including system and input prompts, and file formatting preferences. Returns: @@ -79,8 +79,8 @@ def gen_code(ai: AI, prompt: str, memory: BaseRepository) -> Code: The function assumes the `ai.start` method and the `to_files` utility are correctly set up and functional. Ensure these prerequisites are in place before invoking `simple_gen`. """ - db = OnDiskRepository(PREPROMPTS_PATH) - messages = ai.start(setup_sys_prompt(db), prompt, step_name=curr_fn()) + preprompts = OnDiskRepository(PREPROMPTS_PATH) + messages = ai.start(setup_sys_prompt(preprompts), prompt, step_name=curr_fn()) chat = messages[-1].content.strip() memory[CODE_GEN_LOG_FILE] = chat files = parse_chat(chat) @@ -99,7 +99,7 @@ def gen_entrypoint(ai: AI, code: Code, memory: BaseRepository) -> Code: Parameters: - ai (AI): An instance of the AI model. - - dbs (DBs): An instance containing the database configurations and workspace + - prepromptss (DBs): An instance containing the database configurations and workspace information, particularly the 'all_output.txt' which contains details about the codebase on disk. @@ -155,8 +155,6 @@ def execute_entrypoint(execution_env: BaseExecutionEnv, code: Code) -> None: Parameters: - ai (AI): An instance of the AI model, not directly used in this function but included for consistency with other functions. - - dbs (DBs): An instance containing the database configurations and workspace - information. Returns: - List[dict]: An empty list. This function does not produce a list of messages @@ -205,7 +203,7 @@ def execute_entrypoint(execution_env: BaseExecutionEnv, code: Code) -> None: execution_env.execute_program(code) -def setup_sys_prompt_existing_code(db: OnDiskRepository) -> str: +def setup_sys_prompt_existing_code(preprompts: OnDiskRepository) -> str: """ Constructs a system prompt for the AI focused on improving an existing codebase. @@ -215,17 +213,18 @@ def setup_sys_prompt_existing_code(db: OnDiskRepository) -> str: "philosophy" taken from the given DBs object. Parameters: - - dbs (DBs): The database object containing pre-defined prompts and instructions. + - preprompts (DBs): The database object containing pre-defined prompts and instructions. Returns: - str: The constructed system prompt focused on existing code improvement for the AI. """ return ( - db.preprompts["improve"].replace("FILE_FORMAT", db.preprompts["file_format"]) + preprompts["improve"].replace("FILE_FORMAT", preprompts["file_format"]) + "\nUseful to know:\n" - + db.preprompts["philosophy"] + + preprompts["philosophy"] ) + def improve(ai: AI, prompt: str, code: Code) -> Code: """ Process and improve the code from a specified set of existing files based on a user prompt. @@ -271,5 +270,5 @@ def improve(ai: AI, prompt: str, code: Code) -> Code: messages = ai.next(messages, step_name=curr_fn()) - overwrite_files_with_edits(messages[-1].content.strip(), code) - return messages + overwrite_code_with_edits(messages[-1].content.strip(), code) + return code diff --git a/gpt_engineer/legacy/steps.py b/gpt_engineer/legacy/steps.py index 7626df2a71..2af1d3040b 100644 --- a/gpt_engineer/legacy/steps.py +++ b/gpt_engineer/legacy/steps.py @@ -62,7 +62,7 @@ from gpt_engineer.core.chat_to_files import ( # format_file_to_input, # get_code_strings, - overwrite_files_with_edits, + overwrite_code_with_edits, # to_files_and_memory, ) from gpt_engineer.core.default.on_disk_repository import FileRepositories @@ -522,7 +522,7 @@ def vector_improve(ai: AI, dbs: FileRepositories): messages = ai.next(messages, step_name=curr_fn()) - overwrite_files_with_edits(messages[-1].content.strip(), dbs) + overwrite_code_with_edits(messages[-1].content.strip(), dbs) return messages @@ -641,7 +641,7 @@ def improve_existing_code(ai: AI, dbs: FileRepositories): messages = ai.next(messages, step_name=curr_fn()) - overwrite_files_with_edits(messages[-1].content.strip(), dbs) + overwrite_code_with_edits(messages[-1].content.strip(), dbs) return messages diff --git a/gpt_engineer/preprompts/improve b/gpt_engineer/preprompts/improve index fda1e808e4..c1dfa4d8cd 100644 --- a/gpt_engineer/preprompts/improve +++ b/gpt_engineer/preprompts/improve @@ -24,8 +24,8 @@ Remember, you can use multiple *edit blocks* per file. Here is an example response: --- PLANNING: -We need to change ... because ..., therefore I will add the line `a=a+1` to the function `add_one`. -Also, in the class `DB`, we need to update the ... +We need to change "SOMETHING" because "SOMETHING", therefore I will add the line `a=a+1` to the function `add_one`. +Also, in the class `DB`, we need to update the "SOMETHING" OUTPUT: ```python @@ -63,7 +63,7 @@ some/dir/example_2.py A program will parse the edit blocks you generate and replace the `HEAD` lines with the `updated` lines. So edit blocks must be precise and unambiguous! -Every *edit block* must be fenced with ```...``` with the correct code language. +Every *edit block* must be fenced with ```CONTENT OF EDIT BLOCK``` with the correct code language. The `HEAD` section must be an *exact set of sequential lines* from the file! This is very important. Otherwise the parser won't work. NEVER SKIP LINES in the `HEAD` section! diff --git a/tests/applications/cli/test_collect.py b/tests/applications/cli/test_collect.py index cc2068b051..afb2382856 100644 --- a/tests/applications/cli/test_collect.py +++ b/tests/applications/cli/test_collect.py @@ -10,7 +10,10 @@ import rudderstack.analytics as rudder_analytics from gpt_engineer.applications.cli import collect_learnings, steps_file_hash -from gpt_engineer.core.default.on_disk_repository import OnDiskRepository, FileRepositories +from gpt_engineer.core.default.on_disk_repository import ( + OnDiskRepository, + FileRepositories, +) from gpt_engineer.applications.cli import extract_learning from gpt_engineer.legacy.steps import simple_gen diff --git a/tests/core/default/__init__.py b/tests/core/default/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/default/test_lean_agent.py b/tests/core/default/test_lean_agent.py new file mode 100644 index 0000000000..1ef86eff1d --- /dev/null +++ b/tests/core/default/test_lean_agent.py @@ -0,0 +1,51 @@ +import pytest +import tempfile +from gpt_engineer.core.ai import AI +from gpt_engineer.core.default.lean_agent import LeanAgent +from gpt_engineer.core.code import Code +import os + +from gpt_engineer.core.chat_to_files import parse_chat, Edit, parse_edits, apply_edits +from gpt_engineer.core.chat_to_files import logger as parse_logger +import logging + + +def test_init(): + temp_dir = tempfile.mkdtemp() + lean_agent = LeanAgent.with_default_config(temp_dir, AI(streaming=False)) + outfile = "output.txt" + file_path = os.path.join(temp_dir, outfile) + code = lean_agent.init( + f"Make a program that prints 'Hello World!' to a file called '{outfile}'" + ) + assert os.path.isfile(file_path) + with open(file_path, "r") as file: + assert file.read().strip() == "Hello World!" + + +def test_improve(): + temp_dir = tempfile.mkdtemp() + code = Code( + { + "main.py": "def write_hello_world_to_file(filename):\n \"\"\"\n Writes 'Hello World!' to the specified file.\n \n :param filename: The name of the file to write to.\n \"\"\"\n with open(filename, 'w') as file:\n file.write('Hello World!')\n\nif __name__ == \"__main__\":\n output_filename = 'output.txt'\n write_hello_world_to_file(output_filename)", + "requirements.txt": "# No dependencies required", + "run.sh": "python3 main.py\n", + } + ) + lean_agent = LeanAgent.with_default_config(temp_dir, AI(streaming=False)) + lean_agent.improve( + "Change the program so that it prints '!dlroW olleH' instead of 'Hello World!'", + code, + ) + outfile = "output.txt" + file_path = os.path.join(temp_dir, outfile) + assert os.path.isfile(file_path) + with open(file_path, "r") as file: + file_content = file.read().strip() + assert file_content == "!dlroW olleH" + + +if __name__ == "__main__": + pytest.main() + # test_improve() + # test_init() diff --git a/tests/core/test_chat_to_files.py b/tests/core/test_chat_to_files.py index 20d44bfbd5..aa55c13a20 100644 --- a/tests/core/test_chat_to_files.py +++ b/tests/core/test_chat_to_files.py @@ -3,6 +3,7 @@ from gpt_engineer.core.chat_to_files import logger as parse_logger import logging + def test_standard_input(): chat = """ Some text describing the code @@ -19,15 +20,17 @@ def add(a, b): """ expected = [ ("file1.py", 'print("Hello, World!")'), - ("file2.py", 'def add(a, b):\n return a + b') + ("file2.py", "def add(a, b):\n return a + b"), ] assert parse_chat(chat) == expected + def test_no_code_blocks(): chat = "Just some regular chat without code." expected = [] assert parse_chat(chat) == expected + def test_special_characters_in_filename(): chat = """ file[1].py @@ -40,24 +43,21 @@ def test_special_characters_in_filename(): print("File 2") ``` """ - expected = [ - ("file[1].py", 'print("File 1")'), - ("file`2`.py", 'print("File 2")') - ] + expected = [("file[1].py", 'print("File 1")'), ("file`2`.py", 'print("File 2")')] parsed = parse_chat(chat) assert parsed == expected + def test_empty_code_blocks(): chat = """ empty.py ``` ``` """ - expected = [ - ("empty.py", '') - ] + expected = [("empty.py", "")] assert parse_chat(chat) == expected + def test_mixed_content(): chat = """ script.sh @@ -70,12 +70,10 @@ def test_mixed_content(): print("World") ``` """ - expected = [ - ("script.sh", 'echo "Hello"'), - ("script.py", 'print("World")') - ] + expected = [("script.sh", 'echo "Hello"'), ("script.py", 'print("World")')] assert parse_chat(chat) == expected + def test_filename_line_break(): chat = """ file1.py @@ -84,11 +82,10 @@ def test_filename_line_break(): print("Hello, World!") ``` """ - expected = [ - ("file1.py", 'print("Hello, World!")') - ] + expected = [("file1.py", 'print("Hello, World!")')] assert parse_chat(chat) == expected + def test_filename_in_backticks(): chat = """ `file1.py` @@ -96,11 +93,10 @@ def test_filename_in_backticks(): print("Hello, World!") ``` """ - expected = [ - ("file1.py", 'print("Hello, World!")') - ] + expected = [("file1.py", 'print("Hello, World!")')] assert parse_chat(chat) == expected + def test_filename_with_file_tag(): chat = """ [FILE: file1.py] @@ -108,11 +104,10 @@ def test_filename_with_file_tag(): print("Hello, World!") ``` """ - expected = [ - ("file1.py", 'print("Hello, World!")') - ] + expected = [("file1.py", 'print("Hello, World!")')] assert parse_chat(chat) == expected + def test_filename_with_different_extension(): chat = """ [id].jsx @@ -120,11 +115,10 @@ def test_filename_with_different_extension(): console.log("Hello, World!") ``` """ - expected = [ - ("[id].jsx", 'console.log("Hello, World!")') - ] + expected = [("[id].jsx", 'console.log("Hello, World!")')] assert parse_chat(chat) == expected + # Helper function to capture log messages @pytest.fixture def log_capture(): @@ -141,6 +135,7 @@ def emit(self, record): yield handler parse_logger.removeHandler(handler) + def test_parse_with_additional_text(): chat = """ Some introductory text. @@ -171,11 +166,16 @@ class DBS: """ expected = [ Edit("some/dir/example_1.py", "def mul(a,b)", "def add(a,b):"), - Edit("some/dir/example_2.py", "class DBS:\n db = 'aaa'", "class DBS:\n db = 'bbb'") + Edit( + "some/dir/example_2.py", + "class DBS:\n db = 'aaa'", + "class DBS:\n db = 'bbb'", + ), ] parsed = parse_edits(chat) assert parsed == expected + def test_apply_edit_new_file(log_capture): edits = [Edit("new_file.py", "", "print('Hello, World!')")] code = {"new_file.py": "some content"} @@ -183,6 +183,7 @@ def test_apply_edit_new_file(log_capture): assert code == {"new_file.py": "print('Hello, World!')"} assert "file will be overwritten" in log_capture.messages[0] + def test_apply_edit_no_match(log_capture): edits = [Edit("file.py", "non-existent content", "new content")] code = {"file.py": "some content"} @@ -190,6 +191,7 @@ def test_apply_edit_no_match(log_capture): assert code == {"file.py": "some content"} # No change assert "code block to be replaced was not found" in log_capture.messages[0] + def test_apply_edit_multiple_matches(log_capture): edits = [Edit("file.py", "repeat", "new")] code = {"file.py": "repeat repeat repeat"} @@ -197,5 +199,6 @@ def test_apply_edit_multiple_matches(log_capture): assert code == {"file.py": "new new new"} assert "code block to be replaced was found multiple times" in log_capture.messages[0] + if __name__ == "__main__": pytest.main() diff --git a/tests/legacy_steps/test_archive.py b/tests/legacy_steps/test_archive.py index 330f3a0aac..a9160bf159 100644 --- a/tests/legacy_steps/test_archive.py +++ b/tests/legacy_steps/test_archive.py @@ -3,7 +3,11 @@ from unittest.mock import MagicMock -from gpt_engineer.core.default.on_disk_repository import OnDiskRepository, FileRepositories, archive +from gpt_engineer.core.default.on_disk_repository import ( + OnDiskRepository, + FileRepositories, + archive, +) def freeze_at(monkeypatch, time): diff --git a/tests/tools/test_file_repository.py b/tests/tools/test_file_repository.py index 2f3874d99e..4bbee3630f 100644 --- a/tests/tools/test_file_repository.py +++ b/tests/tools/test_file_repository.py @@ -1,6 +1,9 @@ import pytest -from gpt_engineer.core.default.on_disk_repository import OnDiskRepository, FileRepositories +from gpt_engineer.core.default.on_disk_repository import ( + OnDiskRepository, + FileRepositories, +) def test_DB_operations(tmp_path):