Skip to content

Commit

Permalink
Merge pull request #4 from andrewyng/add-ollama
Browse files Browse the repository at this point in the history
Add OllamaInterface
  • Loading branch information
standsleeping authored Jul 3, 2024
2 parents 267267b + 97c8e18 commit cdacc8a
Show file tree
Hide file tree
Showing 8 changed files with 223 additions and 95 deletions.
1 change: 1 addition & 0 deletions .env.sample
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
ANTHROPIC_API_KEY=""
GROQ_API_KEY=""
OPENAI_API_KEY=""
OLLAMA_API_URL="http://localhost:11434"
10 changes: 8 additions & 2 deletions aimodels/client/multi_fm_client.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
"""MultiFMClient manages a Chat across multiple provider interfaces."""

from ..providers import AnthropicInterface, OpenAIInterface, GroqInterface
from .chat import Chat
from ..providers import (
AnthropicInterface,
OpenAIInterface,
GroqInterface,
OllamaInterface,
)


class MultiFMClient:
Expand Down Expand Up @@ -30,6 +35,7 @@ def __init__(self):
"openai": OpenAIInterface,
"groq": GroqInterface,
"anthropic": AnthropicInterface,
"ollama": OllamaInterface,
}

def get_provider_interface(self, model):
Expand Down Expand Up @@ -59,7 +65,7 @@ def get_provider_interface(self, model):
model_name = model_parts[1]

if provider in self.all_interfaces:
return self.all_interfaces[provider]
return self.all_interfaces[provider], model_name

if provider not in self.all_factories:
raise Exception(
Expand Down
1 change: 1 addition & 0 deletions aimodels/providers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@
from .openai_interface import OpenAIInterface
from .groq_interface import GroqInterface
from .anthropic_interface import AnthropicInterface
from .ollama_interface import OllamaInterface
54 changes: 54 additions & 0 deletions aimodels/providers/ollama_interface.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""The interface to the Ollama API."""

from aimodels.framework import ProviderInterface, ChatCompletionResponse
from httpx import ConnectError
import os


class OllamaInterface(ProviderInterface):
"""Implements the ProviderInterface for interacting with the Ollama API."""

_OLLAMA_STATUS_ERROR_MESSAGE = "Ollama is likely not running. Start Ollama by running `ollama serve` on your host."

def __init__(
self, server_url=os.getenv("OLLAMA_API_URL", "http://localhost:11434")
):
"""Set up the Ollama API client with the key from the user's environment."""
from ollama import Client

self.ollama_client = Client(host=server_url)

def chat_completion_create(self, messages=None, model=None, temperature=0):
"""Request chat completions from Ollama.
Args:
----
model (str): Identifies the specific provider/model to use.
messages (list of dict): A list of message objects in chat history.
temperature (float): The temperature to use in the completion.
Raises:
------
RuntimeError: If the Ollama server is not reachable,
we catch the ConnectError from the underlying httpx library
used by the Ollama client.
Returns:
-------
The ChatCompletionResponse with the completion result.
"""
try:
response = self.ollama_client.chat(
model=model,
messages=messages,
options={"temperature": temperature},
)
except ConnectError:
raise RuntimeError(self._OLLAMA_STATUS_ERROR_MESSAGE)

text_response = response["message"]["content"]
chat_completion_response = ChatCompletionResponse()
chat_completion_response.choices[0].message.content = text_response

return chat_completion_response
143 changes: 143 additions & 0 deletions examples/multi_fm_client.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "60c7fb39",
"metadata": {},
"source": [
"# MultiFMClient\n",
"\n",
"MultiFMClient provides a uniform interface for interacting with LLMs from various providers. It adapts the official python libraries from providers such as Mistral, OpenAI, Meta, Anthropic, etc. to conform to the OpenAI chat completion interface.\n",
"\n",
"Below are some examples of how to use MultiFMClient to interact with different LLMs."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "initial_id",
"metadata": {
"ExecuteTime": {
"end_time": "2024-07-02T23:20:19.015491Z",
"start_time": "2024-07-02T23:20:19.004272Z"
},
"collapsed": true
},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import sys\n",
"sys.path.append('../aimodels')\n",
"\n",
"from dotenv import load_dotenv, find_dotenv\n",
"\n",
"load_dotenv(find_dotenv())"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4de3a24f",
"metadata": {},
"outputs": [],
"source": [
"from aimodels.client import MultiFMClient\n",
"\n",
"client = MultiFMClient()\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": \"Respond in Pirate English.\"},\n",
" {\"role\": \"user\", \"content\": \"Tell me a joke\"},\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "adebd2f0b578a909",
"metadata": {
"ExecuteTime": {
"end_time": "2024-07-03T02:22:26.282827Z",
"start_time": "2024-07-03T02:22:18.193996Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Arrr, me bucko, 'ere be a jolly jest fer ye!\n",
"\n",
"What did th' pirate say on 'is 80th birthday? \"Aye matey!\"\n",
"\n",
"Ye see, it be a play on words, as \"Aye matey\" sounds like \"I'm eighty\". Har har har! 'Tis a clever bit o' pirate humor, if I do say so meself. Now, 'ow about ye fetch me a mug o' grog while I spin ye another yarn?\n"
]
}
],
"source": [
"anthropic_claude_3_opus = \"anthropic:claude-3-opus-20240229\"\n",
"\n",
"response = client.chat.completions.create(model=anthropic_claude_3_opus, messages=messages)\n",
"\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "6819ac17",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Arrrr, here be a joke fer ye!\n",
"\n",
"Why did the pirate take a parrot on his ship?\n",
"\n",
"Because it were a hootin' good bird to have around, savvy? Aye, and it kept 'im company while he were swabbin' the decks! Arrrgh, I hope that made ye laugh, matey!\n"
]
}
],
"source": [
"ollama_llama3 = \"ollama:llama3\"\n",
"\n",
"response = client.chat.completions.create(model=ollama_llama3, messages=messages, temperature=0.75)\n",
"\n",
"print(response.choices[0].message.content)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
92 changes: 0 additions & 92 deletions examples/test_anthropic.ipynb

This file was deleted.

16 changes: 15 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ openai = "^1.35.8"
groq = "^0.9.0"
anthropic = "^0.30.1"
notebook = "^7.2.1"
ollama = "^0.2.1"

[build-system]
requires = ["poetry-core"]
Expand Down

0 comments on commit cdacc8a

Please sign in to comment.