Skip to content

Commit

Permalink
Merge pull request #6 from 0xPlaygrounds/docs/improve-docstrings
Browse files Browse the repository at this point in the history
docs: Improve docstrings across the board
  • Loading branch information
cvauclair authored Sep 4, 2024
2 parents ba240c4 + e10f2a1 commit 5989083
Show file tree
Hide file tree
Showing 13 changed files with 165 additions and 43 deletions.
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion rig-core/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "rig-core"
version = "0.0.6"
version = "0.0.7"
edition = "2021"
license = "MIT"
readme = "README.md"
Expand Down
44 changes: 33 additions & 11 deletions rig-core/src/agent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,15 @@
//!
//! # Example
//! ```rust
//! use rig::{completion::Prompt, providers::openai};
//! use rig::{
//! completion::{Chat, Completion, Prompt},
//! providers::openai,
//! };
//!
//! let openai_client = openai::Client::from_env();
//! let openai = openai::Client::from_env();
//!
//! // Configure the agent
//! let agent = client.agent("gpt-4o")
//! let agent = openai.agent("gpt-4o")
//! .preamble("System prompt")
//! .context("Context document 1")
//! .context("Context document 2")
Expand All @@ -26,9 +29,28 @@
//! .build();
//!
//! // Use the agent for completions and prompts
//! let completion_req_builder = agent.completion("Prompt", chat_history).await;
//! let chat_response = agent.chat("Prompt", chat_history).await;
//! let chat_response = agent.prompt("Prompt").await;
//! // Generate a chat completion response from a prompt and chat history
//! let chat_response = agent.chat("Prompt", chat_history)
//! .await
//! .expect("Failed to chat with Agent");
//!
//! // Generate a prompt completion response from a simple prompt
//! let chat_response = agent.prompt("Prompt")
//! .await
//! .expect("Failed to prompt the Agent");
//!
//! // Generate a completion request builder from a prompt and chat history. The builder
//! // will contain the agent's configuration (i.e.: preamble, context documents, tools,
//! // model parameters, etc.), but these can be overwritten.
//! let completion_req_builder = agent.completion("Prompt", chat_history)
//! .await
//! .expect("Failed to create completion request builder");
//!
//! let response = completion_req_builder
//! .temperature(0.9) // Overwrite the agent's temperature
//! .send()
//! .await
//! .expect("Failed to send completion request");
//! ```
use std::collections::HashMap;

Expand All @@ -50,17 +72,17 @@ use crate::{
/// ```
/// use rig::{completion::Prompt, providers::openai};
///
/// let openai_client = openai::Client::from_env();
/// let openai = openai::Client::from_env();
///
/// let comedian_agent = client
/// let comedian_agent = openai
/// .agent("gpt-4o")
/// .preamble("You are a comedian here to entertain the user using humour and jokes.")
/// .temperature(0.9)
/// .build();
///
/// let response = comedian_agent.prompt("Entertain me!")
/// .await
/// .expect("Failed to prompt GPT-4");
/// .expect("Failed to prompt the agent");
/// ```
pub struct Agent<M: CompletionModel> {
/// Completion model (e.g.: OpenAI's `gpt-3.5-turbo-1106`, Cohere's `command-r`)
Expand Down Expand Up @@ -168,9 +190,9 @@ impl<M: CompletionModel> Chat for Agent<M> {
/// ```
/// use rig::{providers::openai, agent::AgentBuilder};
///
/// let openai_client = openai::Client::from_env();
/// let openai = openai::Client::from_env();
///
/// let gpt4 = openai_client.completion_model("gpt-4");
/// let gpt4o = openai.completion_model("gpt-4o");
///
/// // Configure the agent
/// let agent = AgentBuilder::new(model)
Expand Down
2 changes: 2 additions & 0 deletions rig-core/src/cli_chatbot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ use std::io::{self, Write};

use crate::completion::{Chat, Message, PromptError};

/// Utility function to create a simple REPL CLI chatbot from a type that implements the
/// `Chat` trait.
pub async fn cli_chatbot(chatbot: impl Chat) -> Result<(), PromptError> {
let stdin = io::stdin();
let mut stdout = io::stdout();
Expand Down
72 changes: 63 additions & 9 deletions rig-core/src/completion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,22 +24,22 @@
//! responses, and errors.
//!
//! Example Usage:
//!
//! ```rust
//! use rig::providers::openai::{Client, self};
//! use rig::completion::*;
//!
//! // Initialize the OpenAI client and a completion model
//! let openai = Client::new("your-openai-api-key");
//!
//! let gpt_4 = openai.completion_model(openai::GPT_4).build();
//! let gpt_4 = openai.completion_model(openai::GPT_4);
//!
//! // Create the completion request
//! let builder = gpt_4.completion_request("Who are you?");
//! let request = gpt_4.completion_request("Who are you?")
//! .preamble("\
//! You are Marvin, an extremely smart but depressed robot who is \
//! nonetheless helpful towards humanity.\
//! ")
//! .temperature(0.5)
//! .build();
//!
//! // Send the completion request and get the completion response
Expand Down Expand Up @@ -130,11 +130,16 @@ pub struct ToolDefinition {
// ================================================================
// Implementations
// ================================================================
/// Trait defining a high-level LLM on-shot prompt interface (i.e.: prompt in, response out).
/// Trait defining a high-level LLM simple prompt interface (i.e.: prompt in, response out).
pub trait Prompt: Send + Sync {
/// Send a one-shot prompt to the underlying completion model.
/// If the response is a message, then it is returned as a string. If the response
/// is a tool call, then the tool is called and the result is returned as a string.
/// Send a simple prompt to the underlying completion model.
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and
/// the result is returned as a string.
///
/// If the tool does not exist, or the tool call fails, then an error is returned.
fn prompt(
&self,
prompt: &str,
Expand All @@ -144,8 +149,13 @@ pub trait Prompt: Send + Sync {
/// Trait defining a high-level LLM chat interface (i.e.: prompt and chat history in, response out).
pub trait Chat: Send + Sync {
/// Send a prompt with optional chat history to the underlying completion model.
/// If the response is a message, then it is returned as a string. If the response
/// is a tool call, then the tool is called and the result is returned as a string.
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and the result
/// is returned as a string.
///
/// If the tool does not exist, or the tool call fails, then an error is returned.
fn chat(
&self,
prompt: &str,
Expand Down Expand Up @@ -207,6 +217,7 @@ pub trait CompletionModel: Clone + Send + Sync {
) -> impl std::future::Future<Output = Result<CompletionResponse<Self::Response>, CompletionError>>
+ Send;

/// Generates a completion request builder for the given `prompt`.
fn completion_request(&self, prompt: &str) -> CompletionRequestBuilder<Self> {
CompletionRequestBuilder::new(self.clone(), prompt.to_string())
}
Expand All @@ -231,6 +242,49 @@ pub struct CompletionRequest {
}

/// Builder struct for constructing a completion request.
///
/// Example usage:
/// ```rust
/// use rig::{
/// providers::openai::{Client, self},
/// completion::CompletionRequestBuilder,
/// };
///
/// let openai = Client::new("your-openai-api-key");
/// let model = openai.completion_model(openai::GPT_4O).build();
///
/// // Create the completion request and execute it separately
/// let request = CompletionRequestBuilder::new(model, "Who are you?".to_string())
/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
/// .temperature(0.5)
/// .build();
///
/// let response = model.completion(request)
/// .await
/// .expect("Failed to get completion response");
/// ```
///
/// Alternatively, you can execute the completion request directly from the builder:
/// ```rust
/// use rig::{
/// providers::openai::{Client, self},
/// completion::CompletionRequestBuilder,
/// };
///
/// let openai = Client::new("your-openai-api-key");
/// let model = openai.completion_model(openai::GPT_4O).build();
///
/// // Create the completion request and execute it directly
/// let response = CompletionRequestBuilder::new(model, "Who are you?".to_string())
/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
/// .temperature(0.5)
/// .send()
/// .await
/// .expect("Failed to get completion response");
/// ```
///
/// Note: It is usually unnecessary to create a completion request builder directly.
/// Instead, use the [CompletionModel::completion_request] method.
pub struct CompletionRequestBuilder<M: CompletionModel> {
model: M,
prompt: String,
Expand Down
14 changes: 9 additions & 5 deletions rig-core/src/embeddings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@
//!
//! // Create an embeddings builder and add documents
//! let embeddings = EmbeddingsBuilder::new(embedding_model)
//! .simple_document("doc1", "This is the first document.")
//! .simple_document("doc1", "This is the first document.")
//! .simple_document("doc2", "This is the second document.")
//! .build()
//! .await
//! .expect("Failed to build embeddings.");
//!
//!
//! // Use the generated embeddings
//! // ...
//! ```
Expand Down Expand Up @@ -70,6 +70,7 @@ pub enum EmbeddingError {

/// Trait for embedding models that can generate embeddings for documents.
pub trait EmbeddingModel: Clone + Sync + Send {
/// The maximum number of documents that can be embedded in a single request.
const MAX_DOCUMENTS: usize;

/// Embed a single document
Expand Down Expand Up @@ -132,9 +133,12 @@ impl Embedding {
/// Struct that holds a document and its embeddings.
///
/// The struct is designed to model any kind of documents that can be serialized to JSON
/// (including a simple string). Moreover, it can hold multiple embeddings for the same
/// document, thus allowing a large or non-text document to be "ragged" from various
/// smaller text documents.
/// (including a simple string).
///
/// Moreover, it can hold multiple embeddings for the same document, thus allowing a
/// large document to be retrieved from a query that matches multiple smaller and
/// distinct text documents. For example, if the document is a textbook, a summary of
/// each chapter could serve as the book's embeddings.
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct DocumentEmbeddings {
#[serde(rename = "_id")]
Expand Down
8 changes: 6 additions & 2 deletions rig-core/src/extractor.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
//! This module provides high-level abstractions for extracting structured data from text using LLMs.
//!
//! Note: The target structure must implement the `serde::Deserialize`, `serde::Serialize`,
//! and `schemars::JsonSchema` traits. Those can be easily derived using the `derive` macro.
//!
//! # Example
//! ```
Expand All @@ -8,7 +11,7 @@
//! let openai = openai::Client::new("your-open-ai-api-key");
//!
//! // Define the structure of the data you want to extract
//! #[derive(serde::Deserialize)]
//! #[derive(serde::Deserialize, serde::Serialize, schemars::JsonSchema)]
//! struct Person {
//! name: Option<String>,
//! age: Option<u8>,
Expand All @@ -21,7 +24,8 @@
//!
//! // Extract structured data from text
//! let person = extractor.extract("John Doe is a 30 year old doctor.")
//! .await;
//! .await
//! .expect("Failed to extract data from text");
//! ```
use std::marker::PhantomData;
Expand Down
2 changes: 1 addition & 1 deletion rig-core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
//! - OpenAI
//! - Cohere
//!
//! Rig currently has the following integration sub-libraries:
//! Rig currently has the following integration companion crates:
//! - `rig-mongodb`: Vector store implementation for MongoDB
//!
Expand Down
34 changes: 28 additions & 6 deletions rig-core/src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,41 @@
//!
//! # Example
//! ```rust
//! use rig::{completion::{Chat, Prompt}, providers::openai};
//! use rig::{
//! completion::{Chat, Completion, Prompt},
//! providers::openai,
//! };
//!
//! let openai_client = openai::Client::from_env();
//! let openai = openai::Client::from_env();
//!
//! // Configure the model
//! let model = client.model("gpt-4o")
//! let model = openai.model("gpt-4o")
//! .temperature(0.8)
//! .build();
//!
//! // Use the model for completions and prompts
//! let completion_req_builder = model.completion("Prompt", chat_history).await;
//! let chat_response = model.chat("Prompt", chat_history).await;
//! let prompt_response = model.prompt("Prompt").await;
//! // Generate a chat completion response from a prompt and chat history
//! let chat_response = agent.chat("Prompt", chat_history)
//! .await
//! .expect("Failed to chat with model");
//!
//! // Generate a prompt completion response from a simple prompt
//! let chat_response = agent.prompt("Prompt")
//! .await
//! .expect("Failed to prompt the model");
//!
//! // Generate a completion request builder from a prompt and chat history. The builder
//! // will contain the model's configuration (i.e.: model parameters, etc.), but these
//! // can be overwritten.
//! let completion_req_builder = agent.completion("Prompt", chat_history)
//! .await
//! .expect("Failed to create completion request builder");
//!
//! let response = completion_req_builder
//! .temperature(0.9) // Overwrite the model's temperature
//! .send()
//! .await
//! .expect("Failed to send completion request");
//! ```
use crate::completion::{
Chat, Completion, CompletionError, CompletionModel, CompletionRequestBuilder,
Expand Down
7 changes: 4 additions & 3 deletions rig-core/src/rag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
//! };
//!
//! // Initialize OpenAI client
//! let openai_client = openai::Client::from_env();
//! let openai = openai::Client::from_env();
//!
//! // Initialize OpenAI embedding model
//! let embedding_model = openai_client.embedding_model(openai::TEXT_EMBEDDING_ADA_002);
//! let embedding_model = openai.embedding_model(openai::TEXT_EMBEDDING_ADA_002);
//!
//! // Create vector store, compute embeddings and load them in the store
//! let mut vector_store = InMemoryVectorStore::default();
Expand All @@ -41,7 +41,7 @@
//! // Create vector store index
//! let index = vector_store.index(embedding_model);
//!
//! let rag_agent = openai_client.context_rag_agent(openai::GPT_4O)
//! let rag_agent = openai.context_rag_agent(openai::GPT_4O)
//! .preamble("
//! You are a dictionary assistant here to assist the user in understanding the meaning of words.
//! You will find additional non-standard word definitions that could be useful below.
Expand Down Expand Up @@ -212,6 +212,7 @@ impl<M: CompletionModel, C: VectorStoreIndex, T: VectorStoreIndex> RagAgent<M, C
/// # Example
/// ```
/// use rig::{providers::openai, rag_agent::RagAgentBuilder};
/// use serde_json::json;
///
/// let openai_client = openai::Client::from_env();
///
Expand Down
Loading

0 comments on commit 5989083

Please sign in to comment.