Skip to content

Commit

Permalink
Update README and some examples.
Browse files Browse the repository at this point in the history
  • Loading branch information
rohitprasad15 committed Sep 26, 2024
1 parent 2bca4de commit 5951c0c
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 906 deletions.
38 changes: 18 additions & 20 deletions examples/client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"source": [
"# Client Examples\n",
"\n",
"Client provides a uniform interface for interacting with LLMs from various providers. It adapts the official python libraries from providers such as Mistral, OpenAI, Groq, Anthropic, Fireworks, Replicate, etc. to conform to the OpenAI chat completion interface.\n",
"Client provides a uniform interface for interacting with LLMs from various providers. It adapts the official python libraries from providers such as Mistral, OpenAI, Groq, Anthropic, AWS, etc to conform to the OpenAI chat completion interface. It directly calls the REST endpoints in some cases.\n",
"\n",
"Below are some examples of how to use Client to interact with different LLMs."
]
Expand Down Expand Up @@ -55,15 +55,12 @@
" for key, value in additional_env_vars.items():\n",
" os.environ[key] = value\n",
"\n",
"# Define additional API keys and AWS credentials\n",
"# Define additional API keys and credentials\n",
"additional_keys = {\n",
" 'GROQ_API_KEY': 'xxx',\n",
" 'FIREWORKS_API_KEY': 'xxx', \n",
" 'REPLICATE_API_KEY': 'xxx', \n",
" 'TOGETHER_API_KEY': 'xxx', \n",
" 'OCTO_API_KEY': 'xxx',\n",
" 'AWS_ACCESS_KEY_ID': 'xxx',\n",
" 'AWS_SECRET_ACCESS_KEY': 'xxx',\n",
" 'ANTHROPIC_API_KEY': 'xxx',\n",
"}\n",
"\n",
"# Configure environment\n",
Expand Down Expand Up @@ -111,9 +108,6 @@
"metadata": {},
"outputs": [],
"source": [
"# print(os.environ['AWS_SECRET_ACCESS_KEY'])\n",
"# print(os.environ['AWS_ACCESS_KEY_ID'])\n",
"# print(os.environ['AWS_REGION'])\n",
"aws_bedrock_llama3_8b = \"aws-bedrock:meta.llama3-1-8b-instruct-v1:0\"\n",
"response = client.chat.completions.create(model=aws_bedrock_llama3_8b, messages=messages)\n",
"print(response.choices[0].message.content)"
Expand All @@ -126,15 +120,16 @@
"metadata": {},
"outputs": [],
"source": [
"# client2 = ai.Client({\"azure\" : {\n",
"# \"api_key\": os.environ[\"AZURE_API_KEY\"],\n",
"# }});\n",
"# IMP NOTE: Azure expects model endpoint to be passed in the format of \"azure:<model_name>\".\n",
"# The model name is the deployment name in Project/Deployments.\n",
"# In the exmaple below, the model is \"mistral-large-2407\", but the name given to the\n",
"# deployment is \"aisuite-mistral-large-2407\" under the deployments section in Azure.\n",
"client2 = ai.Client()\n",
"client2.configure({\"azure\" : {\n",
" \"api_key\": os.environ[\"AZURE_API_KEY\"],\n",
" \"base_url\": \"https://mistral-large-2407.westus3.models.ai.azure.com/v1/\",\n",
" \"base_url\": \"https://aisuite-mistral-large-2407.westus3.models.ai.azure.com/v1/\",\n",
"}});\n",
"azure_model = \"azure:mistral-large-2407\"\n",
"azure_model = \"azure:aisuite-mistral-large-2407\"\n",
"response = client2.chat.completions.create(model=azure_model, messages=messages)\n",
"print(response.choices[0].message.content)"
]
Expand All @@ -146,6 +141,10 @@
"metadata": {},
"outputs": [],
"source": [
"# HuggingFace expects the model to be passed in the format of \"huggingface:<model_name>\".\n",
"# The model name is the full name of the model in HuggingFace.\n",
"# In the exmaple below, the model is \"mistralai/Mistral-7B-Instruct-v0.3\".\n",
"# The model is deployed as serverless inference endpoint in HuggingFace.\n",
"client3 = ai.Client()\n",
"hf_model = \"huggingface:mistralai/Mistral-7B-Instruct-v0.3\"\n",
"response = client3.chat.completions.create(model=hf_model, messages=messages)\n",
Expand All @@ -159,11 +158,14 @@
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Groq expects the model to be passed in the format of \"groq:<model_name>\".\n",
"# The model name is the full name of the model in Groq.\n",
"# In the exmaple below, the model is \"llama3-8b-8192\".\n",
"groq_llama3_8b = \"groq:llama3-8b-8192\"\n",
"# groq_llama3_70b = \"groq:llama3-70b-8192\"\n",
"\n",
"response = client.chat.completions.create(model=groq_llama3_8b, messages=messages)\n",
"\n",
"print(response.choices[0].message.content)"
]
},
Expand Down Expand Up @@ -193,9 +195,7 @@
"outputs": [],
"source": [
"mistral_7b = \"mistral:open-mistral-7b\"\n",
"\n",
"response = client.chat.completions.create(model=mistral_7b, messages=messages, temperature=0.2)\n",
"\n",
"print(response.choices[0].message.content)"
]
},
Expand All @@ -207,9 +207,7 @@
"outputs": [],
"source": [
"openai_gpt35 = \"openai:gpt-3.5-turbo\"\n",
"\n",
"response = client.chat.completions.create(model=openai_gpt35, messages=messages, temperature=0.75)\n",
"\n",
"print(response.choices[0].message.content)"
]
}
Expand All @@ -230,7 +228,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
"version": "3.12.6"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit 5951c0c

Please sign in to comment.