-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(llm): Use Ollama as the LLM Provider
- Loading branch information
1 parent
6a539bb
commit 532610d
Showing
8 changed files
with
183 additions
and
38 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,45 +1,37 @@ | ||
""" | ||
This module defines the BaseLLMClient class, serving as an interface for interactions with | ||
various Large Language Models (LLMs). It establishes a standardized method for generating text | ||
responses across different LLM providers, ensuring flexibility and extendability in integrating | ||
multiple LLM services. | ||
By implementing the `generate_response` method, subclasses can provide specific functionalities | ||
for any LLM provider, such as OpenAI, Google, Anthropic, or others, adhering to a unified API. | ||
This design promotes code reuse and simplifies the process of swapping or combining LLM services | ||
in applications requiring natural language generation. | ||
Example Usage: | ||
-------------- | ||
class MyLLMClient(BaseLLMClient): | ||
def generate_response(self, prompt: str, **kwargs): | ||
# Implementation for a specific LLM provider | ||
pass | ||
Implementing this interface allows for easy integration and maintenance of LLM-based features, | ||
supporting a wide range of applications from chatbots to content generation tools. | ||
""" | ||
# det/llm/base.py | ||
|
||
from abc import ABC, abstractmethod | ||
|
||
|
||
class BaseLLMClient(ABC): | ||
class LLMGeneratorInterface(ABC): | ||
""" | ||
Base class for LLM clients. | ||
Example Usage: | ||
-------------- | ||
class MyLLMClient(BaseLLMClient): | ||
def generate_response(self, prompt: str, **kwargs): | ||
def __init__(self, **kwargs): | ||
# Initialize any necessary variables or state | ||
pass | ||
def generate_response(self, prompt: str, **kwargs) -> str: | ||
# Implementation for a specific LLM provider | ||
pass | ||
""" | ||
|
||
def __init__(self, **kwargs): | ||
# Initialize any necessary variables or state | ||
pass | ||
|
||
@abstractmethod | ||
def generate_response(self, prompt: str, **kwargs): | ||
def generate_response(self, prompt: str, **kwargs) -> str: | ||
""" | ||
Generates a response to a given prompt using the LLM. | ||
:param prompt: The input prompt to generate text for. | ||
:param kwargs: Additional parameters specific to the LLM provider. | ||
- `temperature`: default is 0. | ||
- `max_tokens`: default is 256. | ||
:return: The generated text response. | ||
""" | ||
raise NotImplementedError("This method should be implemented by subclasses.") | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,117 @@ | ||
# det/llm/llm_ollama.py | ||
|
||
from abc import ABC, abstractmethod | ||
|
||
from ollama import Client | ||
|
||
import logging | ||
|
||
|
||
from det.llm.base import LLMGeneratorInterface | ||
|
||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
class OllamaClient(LLMGeneratorInterface): | ||
""" | ||
The `OllamaClient` class is a subclass of the `BaseLLMClient` abstract class. | ||
It is used to generate text responses using the Ollama language model (LLM). | ||
The class initializes with a specified model and host, and provides a method | ||
to generate a response to a given prompt using the Ollama LLM. | ||
""" | ||
|
||
def __init__(self, model: str = "llama2", host: str = "http://localhost:11434"): | ||
""" | ||
Initializes the `OllamaClient` class with the specified model and host. | ||
Parameters: | ||
- model (str): The specified model for the Ollama LLM. | ||
- host (str): The host URL for the Ollama LLM. | ||
Raises: | ||
- TypeError: If the model or host parameter is not a string. | ||
""" | ||
if not isinstance(model, str): | ||
raise TypeError("Model parameter must be a string.") | ||
if not isinstance(host, str): | ||
raise TypeError("Host parameter must be a string.") | ||
self.model = model | ||
self.client = Client(host=host) | ||
|
||
def generate_response(self, prompt: str, **kwargs) -> str: | ||
""" | ||
Generates a response to a given prompt using the Ollama LLM. | ||
Parameters: | ||
- prompt (str): The prompt for generating the response. | ||
- **kwargs: Additional parameters specific to the LLM provider. | ||
Raises: | ||
- ValueError: If the prompt is not a string. | ||
Returns: | ||
- str: The generated text response. | ||
""" | ||
if not isinstance(prompt, str): | ||
raise ValueError("Prompt must be a string.") | ||
try: | ||
response = self.client.chat( | ||
model=self.model, # Use the model specified during initialization | ||
messages=[{"role": "user", "content": prompt}], | ||
stream=False, | ||
options={"temperature": 0}, | ||
) | ||
return response["message"]["content"] | ||
except Exception as e: | ||
logging.error(f"An error occurred: {e}") | ||
|
||
|
||
class LLMAdapterInterface(ABC): | ||
""" | ||
Interface for LLM adapters. Defines the contract that all LLM adapters must follow. | ||
""" | ||
|
||
@abstractmethod | ||
def generate(self, prompt: str, **kwargs): | ||
""" | ||
Generates a response to a given prompt using the LLM. | ||
:param prompt: The input prompt to generate text for. | ||
:param kwargs: Additional parameters specific to the LLM provider. | ||
:return: The generated text response. | ||
""" | ||
pass | ||
|
||
|
||
class OllamaAdapter(LLMAdapterInterface): | ||
""" | ||
The `OllamaAdapter` class is used to adapt the Ollama LLM to the `LLMClient` interface, | ||
following the `LLMAdapterInterface`. | ||
""" | ||
|
||
def __init__(self, model: str = "mistral", host: str = "http://localhost:11434"): | ||
self.model = model | ||
self.client = Client(host=host) | ||
|
||
def generate(self, prompt: str, **kwargs): | ||
response = self.client.chat( | ||
model=self.model, # Use the model specified during initialization | ||
messages=[{"role": "user", "content": prompt}], | ||
stream=False, | ||
options={"temperature": 0}, | ||
) | ||
return response.message.content | ||
|
||
|
||
class OllamaGenerator(LLMGeneratorInterface): | ||
""" | ||
The `OllamaGenerator` class is a subclass of the `LLMClient` abstract class. | ||
It uses the `OllamaAdapter` to generate text responses using the Ollama LLM. | ||
""" | ||
|
||
def __init__(self, model: str = "llama2", host: str = "http://localhost:11434"): | ||
self.adapter = OllamaAdapter(model, host) | ||
|
||
def generate_response(self, prompt: str, **kwargs) -> str: | ||
return self.adapter.generate(prompt, **kwargs) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters