Skip to content
This repository was archived by the owner on Nov 13, 2024. It is now read-only.

llm docstring #133

Merged
merged 3 commits into from
Nov 2, 2023
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 71 additions & 1 deletion src/canopy/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,15 @@


class OpenAILLM(BaseLLM):

"""
OpenAI LLM wrapper built on top of the OpenAI Python client.

Note: OpenAI requires a valid API key to use this class.
You can set the "OPENAI_API_KEY" environment variable to your API key.
Or you can directly set it as follows:
>>> import openai
>>> openai.api_key = "YOUR_API_KEY"
"""
def __init__(self,
model_name: str = "gpt-3.5-turbo",
*,
Expand All @@ -42,6 +50,29 @@ def chat_completion(self,
max_tokens: Optional[int] = None,
model_params: Optional[ModelParams] = None,
) -> Union[ChatResponse, Iterable[StreamingChatChunk]]:
"""
Chat completion using the OpenAI API.

Note: this function is wrapped in a retry decorator to handle transient errors.

Args:
messages: Messages (chat history) to send to the model.
stream: Whether to stream the response or not.
max_tokens: Maximum number of tokens to generate. Defaults to None (generates until stop sequence or until hitting max context size).
model_params: Model parameters to use for this request. Defaults to None (uses the default model parameters).
see: https://platform.openai.com/docs/api-reference/chat/create
Returns:
ChatResponse or StreamingChatChunk

Usage:
>>> from canopy.llm import OpenAILLM
>>> from canopy.models.data_models import UserMessage
>>> llm = OpenAILLM()
>>> messages = [UserMessage(content="Hello! How are you?")]
>>> result = llm.chat_completion(messages)
>>> print(result.choices[0].message.content)
"I'm good, how are you?"
""" # noqa: E501

model_params_dict: Dict[str, Any] = {}
model_params_dict.update(
Expand Down Expand Up @@ -80,6 +111,45 @@ def enforced_function_call(self,
*,
max_tokens: Optional[int] = None,
model_params: Optional[ModelParams] = None) -> dict:
"""
This function enforces the model to response with a specific function call.

To read more about this feature, see: https://platform.openai.com/docs/guides/gpt/function-calling

Note: this function is wrapped in a retry decorator to handle transient errors.

Args:
messages: Messages (chat history) to send to the model.
function: Function to call. See canopy.llm.models.Function for more details.
max_tokens: Maximum number of tokens to generate. Defaults to None (generates until stop sequence or until hitting max context size).
model_params: Model parameters to use for this request. Defaults to None (uses the default model parameters).
see: https://platform.openai.com/docs/api-reference/chat/create

Returns:
dict: Function call arguments as a dictionary.

Usage:
>>> from canopy.llm import OpenAILLM
>>> from canopy.llm.models import Function, FunctionParameters, FunctionArrayProperty
>>> from canopy.models.data_models import UserMessage
>>> llm = OpenAILLM()
>>> messages = [UserMessage(content="I was wondering what is the capital of France?")]
>>> function = Function(
... name="query_knowledgebase",
... description="Query search engine for relevant information",
... parameters=FunctionParameters(
... required_properties=[
... FunctionArrayProperty(
... name="queries",
... items_type="string",
... description='List of queries to send to the search engine.',
... ),
... ]
... )
... )
>>> llm.enforced_function_call(messages, function)
{'queries': ['capital of France']}
""" # noqa: E501
# this enforces the model to call the function
function_call = {"name": function.name}

Expand Down