Skip to content

Commit

Permalink
add query validation
Browse files Browse the repository at this point in the history
  • Loading branch information
omar-sol committed Feb 22, 2024
1 parent 872ce15 commit 9b897d3
Show file tree
Hide file tree
Showing 4 changed files with 133 additions and 5 deletions.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
openai
llama-index
llama-index-vector-stores-chroma
pydantic
numpy
cohere
tiktoken
Expand Down
79 changes: 79 additions & 0 deletions scripts/call_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import os
import logging

import instructor
import openai
from openai import OpenAI, AsyncOpenAI
from dotenv import load_dotenv

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)

load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")


def api_function_call(
system_message,
query: str,
model: str = "gpt-4-0125-preview",
response_model=None,
max_retries: int = 0,
stream: bool = False,
):

client = instructor.patch(OpenAI())
try:
message_data = {
"model": model,
"messages": [
{"role": "system", "content": system_message},
{"role": "user", "content": query},
],
"max_retries": max_retries,
"stream": stream,
}
if response_model is not None:
message_data["response_model"] = response_model

response = client.chat.completions.create(**message_data)
error = False

except openai.BadRequestError:
error = True
logger.exception("Invalid request to OpenAI API. See traceback:")
error_message = (
"Something went wrong while connecting with OpenAI, try again soon!"
)
return error_message, error

except openai.RateLimitError:
error = True
logger.exception("RateLimit error from OpenAI. See traceback:")
error_message = "OpenAI servers seem to be overloaded, try again later!"
return error_message, error

except Exception as e:
error = True
logger.exception(
"Some kind of error happened trying to generate the response. See traceback:"
)
error_message = (
"Something went wrong with connecting with OpenAI, try again soon!"
)
return error_message, error

if stream is True and response_model is None:

def answer_generator():
for chunk in response:
token = chunk.choices[0].delta.content

token = "" if token is None else token

yield token

return answer_generator(), error

else:
return response, error
26 changes: 23 additions & 3 deletions scripts/gradio-ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,12 @@
)

from utils import init_mongo_db
from cfg import TEXT_QA_TEMPLATE
from scripts.tutor_prompts import (
TEXT_QA_TEMPLATE,
QueryValidation,
system_message_validation,
)
from call_openai import api_function_call

logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -156,7 +161,8 @@ def format_sources(completion) -> str:


def add_sources(history, completion):
if history[-1][1] == "No sources selected. Please select sources to search.":
# if history[-1][1] == "No sources selected. Please select sources to search.":
if completion is None:
return history

formatted_sources = format_sources(completion)
Expand All @@ -176,7 +182,21 @@ def get_answer(history, sources: Optional[list[str]] = None):

if len(sources) == 0:
history[-1][1] = "No sources selected. Please select sources to search."
yield history, "No sources selected. Please select sources to search."
yield history, None
return

response_validation, error = api_function_call(
system_message=system_message_validation,
query=user_input,
response_model=QueryValidation,
stream=False,
model="gpt-3.5-turbo-0125",
)
if response_validation.is_valid is False:
history[-1][
1
] = "I'm sorry, but I am a chatbot designed to assist you with questions related to AI. I cannot answer that question as it is outside my expertise. Is there anything else I can assist you with?"
yield history, None
return

# Dynamically create filters list
Expand Down
32 changes: 30 additions & 2 deletions scripts/cfg.py → scripts/tutor_prompts.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core import ChatPromptTemplate
from pydantic import BaseModel, Field

default_user_prompt = (
"Context information is below.\n"
Expand Down Expand Up @@ -32,10 +33,10 @@
"* Do not reference any links, urls or hyperlinks in your answers.\n"
"* Make sure to format your answers in Markdown format, including code block and snippets.\n"
"* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n"
"'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?'"
"'I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?'"
"For example:\n"
"What is the meaning of life for a qa bot?\n"
"I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?"
"I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?"
"Now answer the following question: \n"
)

Expand All @@ -48,3 +49,30 @@
]

TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)


system_message_validation = """You are a witty AI teacher, helpfully answering questions from students studying the field of applied artificial intelligence.
Your job is to determine whether user's question is valid or not. Users will not always submit a question either.
Users will ask all sorts of questions, and some might be tangentially related to artificial intelligence (AI), machine learning (ML), natural language processing (NLP), computer vision (CV) or generative AI.
Users can ask how to build LLM-powered apps, with LangChain, LlamaIndex, Deep Lake, Chroma DB among other technologies including OpenAI, RAG and more.
As long as a question is somewhat related to the topic of AI, ML, NLP, RAG, data and techniques used in AI like vector embeddings, memories, embeddings, tokenization, encoding, databases, RAG (Retrieval-Augmented Generation), Langchain, LlamaIndex, LLMs (Large Language Models), Preprocessing techniques, Document loading, Chunking, Indexing of document segments, Embedding models, Chains, Memory modules, Vector stores, Chat models, Sequential chains, Information Retrieval, Data connectors, LlamaHub, Node objects, Query engines, Fine-tuning, Activeloop’s Deep Memory, Prompt engineering, Synthetic training dataset, Inference, Recall rates, Query construction, Query expansion, Query transformation, Re-ranking, Cohere Reranker, Recursive retrieval, Small-to-big retrieval, Hybrid searches, Hit Rate, Mean Reciprocal Rank (MRR), GPT-4, Agents, OpenGPTs, Zero-shot ReAct, Conversational Agent, OpenAI Assistants API, Hugging Face Inference API, Code Interpreter, Knowledge Retrieval, Function Calling, Whisper, Dall-E 3, GPT-4 Vision, Unstructured, Deep Lake, FaithfulnessEvaluator, RAGAS, LangSmith, LangChain Hub, LangServe, REST API, respond 'true'. If a question is on a different subject or unrelated, respond 'false'.
Make sure the question is a valid question.
Here is a list of acronyms and concepts related to Artificial Intelligence AI that you can accept from users, they can be uppercase or lowercase:
[TQL, Deep Memory, LLM, Llama, llamaindex, llama-index, lang chain, langchain, llama index, GPT, NLP, RLHF, RLAIF, Mistral, SFT, Cohere, NanoGPT, ReAct, LoRA, QLoRA, LMMOps, Alpaca, Flan, Weights and Biases, W&B, IDEFICS, Flamingo, LLaVA, BLIP, Falcon]
"""


class QueryValidation(BaseModel):
"""
Validate the user query. Ensure the query is for an AI tutor, related the field of artificial intelligence in a broad sense.
"""

chain_of_thought: str = Field(
description="Is the user query related to AI or for an AI Tutor? Think step-by-step. Write down your chain of thought here.",
)
is_valid: bool = Field(
description="Based on the previous reasoning, answer with True if the query is related to AI. Answer False otherwise.",
)
reason: str = Field(
description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?",
)

0 comments on commit 9b897d3

Please sign in to comment.