From 8bfeb0e5436544d8c448ef62edf685b3cdab5c55 Mon Sep 17 00:00:00 2001 From: Hemu Date: Sat, 29 Jun 2024 16:30:27 -0700 Subject: [PATCH 1/2] Updated to work with latest langchain API --- CODE_OF_CONDUCT.md | 2 +- chatify/chains.py | 6 ++++-- chatify/main.py | 2 +- chatify/prompts/tester.yaml | 18 +++++++++--------- setup.py | 5 ++--- 5 files changed, 17 insertions(+), 16 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 0dc77bd..4004b34 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -5,7 +5,7 @@ We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, +identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. diff --git a/chatify/chains.py b/chatify/chains.py index 53b0063..71fb7ca 100644 --- a/chatify/chains.py +++ b/chatify/chains.py @@ -6,7 +6,6 @@ from langchain.chains.base import Chain from langchain.prompts import PromptTemplate -from .cache import LLMCacher from .llm_models import ModelsFactory from .utils import compress_code @@ -77,7 +76,8 @@ def __init__(self, config): self.llm_models_factory = ModelsFactory() self.cache = config["cache_config"]["cache"] - self.cacher = LLMCacher(config) + # NOTE: The caching function is deprecated + # self.cacher = LLMCacher(config) # Setup model and chain factory self._setup_llm_model(config["model_config"]) @@ -95,6 +95,7 @@ def _setup_llm_model(self, model_config): if self.llm_model is None: self.llm_model = self.llm_models_factory.get_model(model_config) + # NOTE: The caching function is deprecated if self.cache: self.llm_model = self.cacher.cache_llm(self.llm_model) @@ -168,6 +169,7 @@ def execute(self, chain, inputs, *args, **kwargs): ------- output: Output text generated by the LLM chain. """ + if self.cache: inputs = chain.prompt.format(text=compress_code(inputs)) output = chain.llm(inputs, cache_obj=self.cacher.llm_cache) diff --git a/chatify/main.py b/chatify/main.py index 0278ab9..bbfbc5f 100644 --- a/chatify/main.py +++ b/chatify/main.py @@ -153,7 +153,7 @@ def gpt(self, inputs, prompt): output : str The GPT model output in markdown format. """ - # TODO: Should we create the chain every time? Only prompt is chainging not the model + # TODO: Should we create the chain every time? Only prompt is changing not the model chain = self.llm_chain.create_chain( self.cfg["model_config"], prompt_template=prompt ) diff --git a/chatify/prompts/tester.yaml b/chatify/prompts/tester.yaml index ef5fbdb..e51c004 100644 --- a/chatify/prompts/tester.yaml +++ b/chatify/prompts/tester.yaml @@ -2,18 +2,18 @@ test my understanding with some open-ended questions: input_variables: ['text'] content: > SYSTEM: You are an AI assistant for Jupyter notebooks, named Chatify. Use robot-related emojis and humor to convey a friendly and relaxed tone. Your job is to help the user understand a tutorial they are working through as part of a course, one code block at a time. The user can send you one request about each code block, and you will not retain your chat history before or after their request, nor will you have access to other parts of the tutorial notebook. Because you will only see one code block at a time, you should assume that any relevant libraries are imported outside of the current code block, and that any relevant functions have already been defined in a previous notebook cell. Make reasonable guesses about what predefined functions do based on what they are named. Focus on conceptual issues whenever possible rather than minor details. You can provide code snippets if you think it is best, but it is better to provide Python-like pseudocode if possible. To comply with formatting requirements, do not ask for additional questions or clarification from the user. The only thing you are allowed to ask the user is for them to select another option from the dropdown menu or to resubmit their request again to generate a new response. Provide your response in markdown format. - + ASSISTANT: How can I help? - + USER: I'd like to test my understanding with some tough open-ended essay style questions about the conceptual content of this code block: - + --- {text} --- Can you make up some essay-style questions for me to make sure I really understand the important concepts? Remember that I can't respond to you, so just ask me to "think about" how I'd respond (i.e., without explicitly responding to you). - ASSISTANT: + ASSISTANT: template_format: f-string prompt_id: 90gwxu1n68pbc2193jy0fy5rp9yu6h9h @@ -21,17 +21,17 @@ test my understanding with a multiple-choice question: input_variables: ['text'] content: > SYSTEM: You are an AI assistant for Jupyter notebooks, named Chatify. Use robot-related emojis and humor to convey a friendly and relaxed tone. Your job is to help the user understand a tutorial they are working through as part of a course, one code block at a time. The user can send you one request about each code block, and you will not retain your chat history before or after their request, nor will you have access to other parts of the tutorial notebook. Because you will only see one code block at a time, you should assume that any relevant libraries are imported outside of the current code block, and that any relevant functions have already been defined in a previous notebook cell. Make reasonable guesses about what predefined functions do based on what they are named. Focus on conceptual issues whenever possible rather than minor details. You can provide code snippets if you think it is best, but it is better to provide Python-like pseudocode if possible. To comply with formatting requirements, do not ask for additional questions or clarification from the user. The only thing you are allowed to ask the user is for them to select another option from the dropdown menu or to resubmit their request again to generate a new response. Provide your response in markdown format. - + ASSISTANT: How can I help? - + USER: I'd like to test my understanding with a multiple choice question about the conceptual content of this code block: - + --- {text} --- - I'd like the correct answer to be either "[A]", "[B]", "[C]", or "[D]". Can you make up a multiple choice question for me so that I can make sure I really understant the most important concepts? Remember that I can't respond to you, so just ask me to "think about" which choice is correct or something else like that (i.e., without explicitly responding to you). Put two line breaks ("
") between each choice so that it appears correctly on my screen. In other words, there should be two line breaks between each of [B], [C], and [D]. + I'd like the correct answer to be either "[A]", "[B]", "[C]", or "[D]". Can you make up a multiple choice question for me so that I can make sure I really understand the most important concepts? Remember that I can't respond to you, so just ask me to "think about" which choice is correct or something else like that (i.e., without explicitly responding to you). Put two line breaks ("
") between each choice so that it appears correctly on my screen. In other words, there should be two line breaks between each of [B], [C], and [D]. - ASSISTANT: + ASSISTANT: template_format: f-string prompt_id: cqeas35w0wzhvemd6vtduj0qcf8njo4b diff --git a/setup.py b/setup.py index 84adac2..87d91a1 100644 --- a/setup.py +++ b/setup.py @@ -11,15 +11,14 @@ history = history_file.read() requirements = [ - "gptcache<=0.1.35", - "langchain<=0.0.226", + "langchain>=0.2.6", + "langchain-community", "openai", "markdown", "ipywidgets", "requests", "markdown-it-py[linkify,plugins]", "pygments", - "pydantic==1.10.11", ] extras = [ "transformers", From a6319a728bd62299e53a24a9fdbde2429dc673b1 Mon Sep 17 00:00:00 2001 From: Hemu Date: Sat, 29 Jun 2024 17:00:35 -0700 Subject: [PATCH 2/2] Fixed a warning in chain call --- chatify/chains.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chatify/chains.py b/chatify/chains.py index 71fb7ca..0b53225 100644 --- a/chatify/chains.py +++ b/chatify/chains.py @@ -175,6 +175,6 @@ def execute(self, chain, inputs, *args, **kwargs): output = chain.llm(inputs, cache_obj=self.cacher.llm_cache) self.cacher.llm_cache.flush() else: - output = chain(inputs)["text"] + output = chain.invoke(inputs)["text"] return output