Skip to content

Commit

Permalink
update gpt-3.5 version
Browse files Browse the repository at this point in the history
  • Loading branch information
omar-sol committed Feb 22, 2024
1 parent 08b8fbf commit a393007
Show file tree
Hide file tree
Showing 15 changed files with 52 additions and 44 deletions.
8 changes: 4 additions & 4 deletions notebooks/01-Basic_Tutor.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
},
"outputs": [],
"source": [
"# Defining a function to answer a question using \"gpt-3.5-turbo-16k\" model.\n",
"# Defining a function to answer a question using \"gpt-3.5-turbo-0125\" model.\n",
"def ask_ai_tutor(question):\n",
" try:\n",
" # Formulating the system prompt and condition the model to answer only AI-related questions.\n",
Expand All @@ -113,8 +113,8 @@
"\n",
" # Call the OpenAI API\n",
" response = client.chat.completions.create(\n",
" model='gpt-3.5-turbo-16k',\n",
" temperature=0.0,\n",
" model='gpt-3.5-turbo-0125',\n",
" temperature=0,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
Expand Down Expand Up @@ -218,7 +218,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions notebooks/02-Basic_RAG.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@
"\n",
" # Call the OpenAI API\n",
" response = client.chat.completions.create(\n",
" model='gpt-3.5-turbo-16k',\n",
" model='gpt-3.5-turbo-0125',\n",
" temperature=0.0,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
Expand Down Expand Up @@ -610,7 +610,7 @@
"\n",
"# Call the OpenAI API\n",
"response = client.chat.completions.create(\n",
" model='gpt-3.5-turbo-16k',\n",
" model='gpt-3.5-turbo-0125',\n",
" temperature=.9,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
Expand Down Expand Up @@ -680,7 +680,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
6 changes: 4 additions & 2 deletions notebooks/03-RAG_with_LlamaIndex.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,11 @@
},
"outputs": [],
"source": [
"from llama_index.llms.openai import OpenAI\n",
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
"# and using a LLM to formulate the final answer.\n",
"query_engine = index.as_query_engine()"
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)\n",
"query_engine = index.as_query_engine(llm=llm)"
]
},
{
Expand Down Expand Up @@ -290,7 +292,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
}
},
"nbformat": 4,
Expand Down
7 changes: 5 additions & 2 deletions notebooks/04-RAG_with_VectorStore.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -275,9 +275,12 @@
},
"outputs": [],
"source": [
"from llama_index.llms.openai import OpenAI\n",
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
"# and using a LLM to formulate the final answer.\n",
"query_engine = index.as_query_engine()"
"\n",
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)\n",
"query_engine = index.as_query_engine(llm=llm)"
]
},
{
Expand Down Expand Up @@ -334,7 +337,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
}
},
"nbformat": 4,
Expand Down
4 changes: 2 additions & 2 deletions notebooks/05-Improve_Prompts_+_Add_Source.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -724,7 +724,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
6 changes: 3 additions & 3 deletions notebooks/06-Evaluate_RAG.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -589,7 +589,7 @@
"from llama_index.core.evaluation import generate_question_context_pairs\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down Expand Up @@ -795,7 +795,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.11.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
Expand Down
8 changes: 4 additions & 4 deletions notebooks/07-RAG_Improve_Chunking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -758,7 +758,7 @@
"\n",
"index_no_metadata = VectorStoreIndex(\n",
" nodes=nodes_no_meta,\n",
" service_context=ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo\")),\n",
" service_context=ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo-0125\")),\n",
")"
]
},
Expand Down Expand Up @@ -876,7 +876,7 @@
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down Expand Up @@ -1035,7 +1035,7 @@
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
"\n",
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-1106-preview\")\n",
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
"\n",
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
Expand Down
2 changes: 1 addition & 1 deletion notebooks/08-Finetune_Embedding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@
"from llama_index.llms.openai import OpenAI\n",
"\n",
"# Load the OpenAI API with the \"gpt-3.5-turbo\" model\n",
"llm = OpenAI()\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"\n",
"# Generate questions for each chunk.\n",
"TRAIN_DATASET = generate_qa_embedding_pairs(TRAIN_NODEs, llm=llm)\n",
Expand Down
6 changes: 3 additions & 3 deletions notebooks/09-Better_Embedding_Model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -665,7 +665,7 @@
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down Expand Up @@ -824,7 +824,7 @@
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
"\n",
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-1106-preview\")\n",
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
"\n",
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/10-Adding_Reranking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -617,7 +617,7 @@
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/11-Adding_Hybrid_Search.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -807,7 +807,7 @@
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down
6 changes: 3 additions & 3 deletions notebooks/12-Improve_Query.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -501,7 +501,7 @@
"source": [
"from llama_index.core import ServiceContext\n",
"\n",
"gpt4 = OpenAI(temperature=0, model=\"gpt-4\")\n",
"gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
"service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)"
]
},
Expand Down Expand Up @@ -766,7 +766,7 @@
"from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
"from llama_index.core.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
"\n",
"gpt3 = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"gpt3 = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\")\n",
"service_context_gpt3 = ServiceContext.from_defaults(llm=gpt3)\n",
"\n",
"step_decompose_transform_gpt3 = StepDecomposeQueryTransform(llm=gpt3, verbose=True)\n",
Expand Down
4 changes: 2 additions & 2 deletions notebooks/14-Adding_Chat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
]
},
{
Expand Down Expand Up @@ -740,7 +740,7 @@
"outputs": [],
"source": [
"# Define GPT-4 model that will be used by the chat_engine to improve the query.\n",
"gpt4 = OpenAI(temperature=0.9, model=\"gpt-4\")"
"gpt4 = OpenAI(temperature=0.9, model=\"gpt-4-0125-preview\")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion notebooks/15-Use_OpenSource_Models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,7 @@
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
Expand Down
23 changes: 13 additions & 10 deletions scripts/basic_tutor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
from openai import OpenAI

# Retrieve your OpenAI API key from the environment variables and activate the OpenAI client
openai_api_key = os.environ.get('OPENAI_API_KEY')
openai_api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(api_key=openai_api_key)


def ask_ai_tutor(question):

# Check if OpenAI key has been correctly added
Expand All @@ -25,19 +26,20 @@ def ask_ai_tutor(question):

# Call the OpenAI API
response = client.chat.completions.create(
model='gpt-3.5-turbo-16k',
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
)
model="gpt-3.5-turbo-0125",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
)

# Return the AI's response
return response.choices[0].message.content.strip()

except Exception as e:
return f"An error occurred: {e}"


def main():
# Check if a question was provided as a command-line argument
if len(sys.argv) != 2:
Expand All @@ -46,12 +48,13 @@ def main():

# The user's question is the first command-line argument
user_question = sys.argv[1]

# Get the AI's response
ai_response = ask_ai_tutor(user_question)

# Print the AI's response
print(f"AI Tutor says: {ai_response}")


if __name__ == "__main__":
main()

0 comments on commit a393007

Please sign in to comment.