Skip to content

Commit

Permalink
Update README and some examples. (#37)
Browse files Browse the repository at this point in the history
* Update README and some examples.

* Removing a line from README.
  • Loading branch information
rohitprasad15 authored Sep 26, 2024
1 parent 2bca4de commit afe2af1
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 907 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ for model in models:
```
Note that the model name in the create() call needs to be replaced with `<provider>:<model-name>`
aisuite will call the appropriate provider with the right parameters based on the provider value.
The current list of supported providers can be found by executing `aisuite.ProviderNames.values()`

For more examples, check out the `examples` directory where you will find several
notebooks that you can run to experiment with the interface.
Expand Down
38 changes: 18 additions & 20 deletions examples/client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"source": [
"# Client Examples\n",
"\n",
"Client provides a uniform interface for interacting with LLMs from various providers. It adapts the official python libraries from providers such as Mistral, OpenAI, Groq, Anthropic, Fireworks, Replicate, etc. to conform to the OpenAI chat completion interface.\n",
"Client provides a uniform interface for interacting with LLMs from various providers. It adapts the official python libraries from providers such as Mistral, OpenAI, Groq, Anthropic, AWS, etc to conform to the OpenAI chat completion interface. It directly calls the REST endpoints in some cases.\n",
"\n",
"Below are some examples of how to use Client to interact with different LLMs."
]
Expand Down Expand Up @@ -55,15 +55,12 @@
" for key, value in additional_env_vars.items():\n",
" os.environ[key] = value\n",
"\n",
"# Define additional API keys and AWS credentials\n",
"# Define additional API keys and credentials\n",
"additional_keys = {\n",
" 'GROQ_API_KEY': 'xxx',\n",
" 'FIREWORKS_API_KEY': 'xxx', \n",
" 'REPLICATE_API_KEY': 'xxx', \n",
" 'TOGETHER_API_KEY': 'xxx', \n",
" 'OCTO_API_KEY': 'xxx',\n",
" 'AWS_ACCESS_KEY_ID': 'xxx',\n",
" 'AWS_SECRET_ACCESS_KEY': 'xxx',\n",
" 'ANTHROPIC_API_KEY': 'xxx',\n",
"}\n",
"\n",
"# Configure environment\n",
Expand Down Expand Up @@ -111,9 +108,6 @@
"metadata": {},
"outputs": [],
"source": [
"# print(os.environ['AWS_SECRET_ACCESS_KEY'])\n",
"# print(os.environ['AWS_ACCESS_KEY_ID'])\n",
"# print(os.environ['AWS_REGION'])\n",
"aws_bedrock_llama3_8b = \"aws-bedrock:meta.llama3-1-8b-instruct-v1:0\"\n",
"response = client.chat.completions.create(model=aws_bedrock_llama3_8b, messages=messages)\n",
"print(response.choices[0].message.content)"
Expand All @@ -126,15 +120,16 @@
"metadata": {},
"outputs": [],
"source": [
"# client2 = ai.Client({\"azure\" : {\n",
"# \"api_key\": os.environ[\"AZURE_API_KEY\"],\n",
"# }});\n",
"# IMP NOTE: Azure expects model endpoint to be passed in the format of \"azure:<model_name>\".\n",
"# The model name is the deployment name in Project/Deployments.\n",
"# In the exmaple below, the model is \"mistral-large-2407\", but the name given to the\n",
"# deployment is \"aisuite-mistral-large-2407\" under the deployments section in Azure.\n",
"client2 = ai.Client()\n",
"client2.configure({\"azure\" : {\n",
" \"api_key\": os.environ[\"AZURE_API_KEY\"],\n",
" \"base_url\": \"https://mistral-large-2407.westus3.models.ai.azure.com/v1/\",\n",
" \"base_url\": \"https://aisuite-mistral-large-2407.westus3.models.ai.azure.com/v1/\",\n",
"}});\n",
"azure_model = \"azure:mistral-large-2407\"\n",
"azure_model = \"azure:aisuite-mistral-large-2407\"\n",
"response = client2.chat.completions.create(model=azure_model, messages=messages)\n",
"print(response.choices[0].message.content)"
]
Expand All @@ -146,6 +141,10 @@
"metadata": {},
"outputs": [],
"source": [
"# HuggingFace expects the model to be passed in the format of \"huggingface:<model_name>\".\n",
"# The model name is the full name of the model in HuggingFace.\n",
"# In the exmaple below, the model is \"mistralai/Mistral-7B-Instruct-v0.3\".\n",
"# The model is deployed as serverless inference endpoint in HuggingFace.\n",
"client3 = ai.Client()\n",
"hf_model = \"huggingface:mistralai/Mistral-7B-Instruct-v0.3\"\n",
"response = client3.chat.completions.create(model=hf_model, messages=messages)\n",
Expand All @@ -159,11 +158,14 @@
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Groq expects the model to be passed in the format of \"groq:<model_name>\".\n",
"# The model name is the full name of the model in Groq.\n",
"# In the exmaple below, the model is \"llama3-8b-8192\".\n",
"groq_llama3_8b = \"groq:llama3-8b-8192\"\n",
"# groq_llama3_70b = \"groq:llama3-70b-8192\"\n",
"\n",
"response = client.chat.completions.create(model=groq_llama3_8b, messages=messages)\n",
"\n",
"print(response.choices[0].message.content)"
]
},
Expand Down Expand Up @@ -193,9 +195,7 @@
"outputs": [],
"source": [
"mistral_7b = \"mistral:open-mistral-7b\"\n",
"\n",
"response = client.chat.completions.create(model=mistral_7b, messages=messages, temperature=0.2)\n",
"\n",
"print(response.choices[0].message.content)"
]
},
Expand All @@ -207,9 +207,7 @@
"outputs": [],
"source": [
"openai_gpt35 = \"openai:gpt-3.5-turbo\"\n",
"\n",
"response = client.chat.completions.create(model=openai_gpt35, messages=messages, temperature=0.75)\n",
"\n",
"print(response.choices[0].message.content)"
]
}
Expand All @@ -230,7 +228,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
"version": "3.12.6"
}
},
"nbformat": 4,
Expand Down
Loading

0 comments on commit afe2af1

Please sign in to comment.