From 9b897d324d64f787d07871c5eea88971f826cc69 Mon Sep 17 00:00:00 2001 From: Omar Solano Date: Thu, 22 Feb 2024 13:00:53 -0500 Subject: [PATCH] add query validation --- requirements.txt | 1 + scripts/call_openai.py | 79 ++++++++++++++++++++++++++++ scripts/gradio-ui.py | 26 +++++++-- scripts/{cfg.py => tutor_prompts.py} | 32 ++++++++++- 4 files changed, 133 insertions(+), 5 deletions(-) create mode 100644 scripts/call_openai.py rename scripts/{cfg.py => tutor_prompts.py} (52%) diff --git a/requirements.txt b/requirements.txt index f13776f..15b19b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ openai llama-index llama-index-vector-stores-chroma +pydantic numpy cohere tiktoken diff --git a/scripts/call_openai.py b/scripts/call_openai.py new file mode 100644 index 0000000..62ee607 --- /dev/null +++ b/scripts/call_openai.py @@ -0,0 +1,79 @@ +import os +import logging + +import instructor +import openai +from openai import OpenAI, AsyncOpenAI +from dotenv import load_dotenv + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +load_dotenv(".env") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") + + +def api_function_call( + system_message, + query: str, + model: str = "gpt-4-0125-preview", + response_model=None, + max_retries: int = 0, + stream: bool = False, +): + + client = instructor.patch(OpenAI()) + try: + message_data = { + "model": model, + "messages": [ + {"role": "system", "content": system_message}, + {"role": "user", "content": query}, + ], + "max_retries": max_retries, + "stream": stream, + } + if response_model is not None: + message_data["response_model"] = response_model + + response = client.chat.completions.create(**message_data) + error = False + + except openai.BadRequestError: + error = True + logger.exception("Invalid request to OpenAI API. See traceback:") + error_message = ( + "Something went wrong while connecting with OpenAI, try again soon!" + ) + return error_message, error + + except openai.RateLimitError: + error = True + logger.exception("RateLimit error from OpenAI. See traceback:") + error_message = "OpenAI servers seem to be overloaded, try again later!" + return error_message, error + + except Exception as e: + error = True + logger.exception( + "Some kind of error happened trying to generate the response. See traceback:" + ) + error_message = ( + "Something went wrong with connecting with OpenAI, try again soon!" + ) + return error_message, error + + if stream is True and response_model is None: + + def answer_generator(): + for chunk in response: + token = chunk.choices[0].delta.content + + token = "" if token is None else token + + yield token + + return answer_generator(), error + + else: + return response, error diff --git a/scripts/gradio-ui.py b/scripts/gradio-ui.py index ad067b0..e101c55 100644 --- a/scripts/gradio-ui.py +++ b/scripts/gradio-ui.py @@ -19,7 +19,12 @@ ) from utils import init_mongo_db -from cfg import TEXT_QA_TEMPLATE +from scripts.tutor_prompts import ( + TEXT_QA_TEMPLATE, + QueryValidation, + system_message_validation, +) +from call_openai import api_function_call logging.getLogger("httpx").setLevel(logging.WARNING) logger = logging.getLogger(__name__) @@ -156,7 +161,8 @@ def format_sources(completion) -> str: def add_sources(history, completion): - if history[-1][1] == "No sources selected. Please select sources to search.": + # if history[-1][1] == "No sources selected. Please select sources to search.": + if completion is None: return history formatted_sources = format_sources(completion) @@ -176,7 +182,21 @@ def get_answer(history, sources: Optional[list[str]] = None): if len(sources) == 0: history[-1][1] = "No sources selected. Please select sources to search." - yield history, "No sources selected. Please select sources to search." + yield history, None + return + + response_validation, error = api_function_call( + system_message=system_message_validation, + query=user_input, + response_model=QueryValidation, + stream=False, + model="gpt-3.5-turbo-0125", + ) + if response_validation.is_valid is False: + history[-1][ + 1 + ] = "I'm sorry, but I am a chatbot designed to assist you with questions related to AI. I cannot answer that question as it is outside my expertise. Is there anything else I can assist you with?" + yield history, None return # Dynamically create filters list diff --git a/scripts/cfg.py b/scripts/tutor_prompts.py similarity index 52% rename from scripts/cfg.py rename to scripts/tutor_prompts.py index 686d6ee..ccba41f 100644 --- a/scripts/cfg.py +++ b/scripts/tutor_prompts.py @@ -1,5 +1,6 @@ from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate +from pydantic import BaseModel, Field default_user_prompt = ( "Context information is below.\n" @@ -32,10 +33,10 @@ "* Do not reference any links, urls or hyperlinks in your answers.\n" "* Make sure to format your answers in Markdown format, including code block and snippets.\n" "* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n" - "'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?'" + "'I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?'" "For example:\n" "What is the meaning of life for a qa bot?\n" - "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?" + "I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?" "Now answer the following question: \n" ) @@ -48,3 +49,30 @@ ] TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs) + + +system_message_validation = """You are a witty AI teacher, helpfully answering questions from students studying the field of applied artificial intelligence. +Your job is to determine whether user's question is valid or not. Users will not always submit a question either. +Users will ask all sorts of questions, and some might be tangentially related to artificial intelligence (AI), machine learning (ML), natural language processing (NLP), computer vision (CV) or generative AI. +Users can ask how to build LLM-powered apps, with LangChain, LlamaIndex, Deep Lake, Chroma DB among other technologies including OpenAI, RAG and more. +As long as a question is somewhat related to the topic of AI, ML, NLP, RAG, data and techniques used in AI like vector embeddings, memories, embeddings, tokenization, encoding, databases, RAG (Retrieval-Augmented Generation), Langchain, LlamaIndex, LLMs (Large Language Models), Preprocessing techniques, Document loading, Chunking, Indexing of document segments, Embedding models, Chains, Memory modules, Vector stores, Chat models, Sequential chains, Information Retrieval, Data connectors, LlamaHub, Node objects, Query engines, Fine-tuning, Activeloop’s Deep Memory, Prompt engineering, Synthetic training dataset, Inference, Recall rates, Query construction, Query expansion, Query transformation, Re-ranking, Cohere Reranker, Recursive retrieval, Small-to-big retrieval, Hybrid searches, Hit Rate, Mean Reciprocal Rank (MRR), GPT-4, Agents, OpenGPTs, Zero-shot ReAct, Conversational Agent, OpenAI Assistants API, Hugging Face Inference API, Code Interpreter, Knowledge Retrieval, Function Calling, Whisper, Dall-E 3, GPT-4 Vision, Unstructured, Deep Lake, FaithfulnessEvaluator, RAGAS, LangSmith, LangChain Hub, LangServe, REST API, respond 'true'. If a question is on a different subject or unrelated, respond 'false'. +Make sure the question is a valid question. +Here is a list of acronyms and concepts related to Artificial Intelligence AI that you can accept from users, they can be uppercase or lowercase: +[TQL, Deep Memory, LLM, Llama, llamaindex, llama-index, lang chain, langchain, llama index, GPT, NLP, RLHF, RLAIF, Mistral, SFT, Cohere, NanoGPT, ReAct, LoRA, QLoRA, LMMOps, Alpaca, Flan, Weights and Biases, W&B, IDEFICS, Flamingo, LLaVA, BLIP, Falcon] +""" + + +class QueryValidation(BaseModel): + """ + Validate the user query. Ensure the query is for an AI tutor, related the field of artificial intelligence in a broad sense. + """ + + chain_of_thought: str = Field( + description="Is the user query related to AI or for an AI Tutor? Think step-by-step. Write down your chain of thought here.", + ) + is_valid: bool = Field( + description="Based on the previous reasoning, answer with True if the query is related to AI. Answer False otherwise.", + ) + reason: str = Field( + description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?", + )