diff --git a/.github/workflows/cd.yaml b/.github/workflows/cd.yaml new file mode 100644 index 00000000..6adf2425 --- /dev/null +++ b/.github/workflows/cd.yaml @@ -0,0 +1,38 @@ +name: "Build & Release" + +permissions: + pull-requests: write + contents: write + +on: + push: + branches: + - main + secrets: + CARGO_REGISTRY_TOKEN: + description: "Token to publish to crates.io" + required: true + +jobs: + run-ci: + needs: validate-input + uses: ./.github/workflows/ci.yaml + + release-plz: + name: Release-plz + requires: run-ci + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install Rust toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + + - name: Run release-plz + uses: MarcoIeni/release-plz-action@v0.5 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..c3ced72f --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,49 @@ +# Inspired by https://github.com/joshka/github-workflows/blob/main/.github/workflows/rust-check.yml +name: Lint & Test + +on: + pull_request: + branches: [ main ] + workflow_call: + +env: + CARGO_TERM_COLOR: always + +# ensure that the workflow is only triggered once per PR, subsequent pushes to the PR will cancel +# and restart the workflow. See https://docs.github.com/en/actions/using-jobs/using-concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + fmt: + name: stable / fmt + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust stable + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + components: rustfmt + + - name: Run cargo fmt + run: cargo fmt -- --check + + clippy: + name: stable / clippy + runs-on: ubuntu-latest + permissions: + checks: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Rust stable + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + components: clippy + + - name: Run clippy action + uses: clechasseur/rs-clippy-check@v3 \ No newline at end of file diff --git a/rig-core/src/agent.rs b/rig-core/src/agent.rs index cac1f5b8..965ec474 100644 --- a/rig-core/src/agent.rs +++ b/rig-core/src/agent.rs @@ -33,19 +33,19 @@ //! let chat_response = agent.chat("Prompt", chat_history) //! .await //! .expect("Failed to chat with Agent"); -//! +//! //! // Generate a prompt completion response from a simple prompt //! let chat_response = agent.prompt("Prompt") //! .await //! .expect("Failed to prompt the Agent"); -//! +//! //! // Generate a completion request builder from a prompt and chat history. The builder -//! // will contain the agent's configuration (i.e.: preamble, context documents, tools, +//! // will contain the agent's configuration (i.e.: preamble, context documents, tools, //! // model parameters, etc.), but these can be overwritten. //! let completion_req_builder = agent.completion("Prompt", chat_history) //! .await //! .expect("Failed to create completion request builder"); -//! +//! //! let response = completion_req_builder //! .temperature(0.9) // Overwrite the agent's temperature //! .send() diff --git a/rig-core/src/cli_chatbot.rs b/rig-core/src/cli_chatbot.rs index 362ba798..df33fa8e 100644 --- a/rig-core/src/cli_chatbot.rs +++ b/rig-core/src/cli_chatbot.rs @@ -2,7 +2,7 @@ use std::io::{self, Write}; use crate::completion::{Chat, Message, PromptError}; -/// Utility function to create a simple REPL CLI chatbot from a type that implements the +/// Utility function to create a simple REPL CLI chatbot from a type that implements the /// `Chat` trait. pub async fn cli_chatbot(chatbot: impl Chat) -> Result<(), PromptError> { let stdin = io::stdin(); diff --git a/rig-core/src/completion.rs b/rig-core/src/completion.rs index f642d292..1dd6f6b9 100644 --- a/rig-core/src/completion.rs +++ b/rig-core/src/completion.rs @@ -7,7 +7,7 @@ //! - [Chat]: Defines a high-level LLM chat interface with chat history. //! - [Completion]: Defines a low-level LLM completion interface for generating completion requests. //! - [CompletionModel]: Defines a completion model that can be used to generate completion -//! responses from requests. +//! responses from requests. //! //! The [Prompt] and [Chat] traits are high level traits that users are expected to use //! to interact with LLM models. Moreover, it is good practice to implement one of these @@ -133,12 +133,12 @@ pub struct ToolDefinition { /// Trait defining a high-level LLM simple prompt interface (i.e.: prompt in, response out). pub trait Prompt: Send + Sync { /// Send a simple prompt to the underlying completion model. - /// - /// If the completion model's response is a message, then it is returned as a string. - /// - /// If the completion model's response is a tool call, then the tool is called and + /// + /// If the completion model's response is a message, then it is returned as a string. + /// + /// If the completion model's response is a tool call, then the tool is called and /// the result is returned as a string. - /// + /// /// If the tool does not exist, or the tool call fails, then an error is returned. fn prompt( &self, @@ -149,12 +149,12 @@ pub trait Prompt: Send + Sync { /// Trait defining a high-level LLM chat interface (i.e.: prompt and chat history in, response out). pub trait Chat: Send + Sync { /// Send a prompt with optional chat history to the underlying completion model. - /// - /// If the completion model's response is a message, then it is returned as a string. - /// - /// If the completion model's response is a tool call, then the tool is called and the result + /// + /// If the completion model's response is a message, then it is returned as a string. + /// + /// If the completion model's response is a tool call, then the tool is called and the result /// is returned as a string. - /// + /// /// If the tool does not exist, or the tool call fails, then an error is returned. fn chat( &self, @@ -242,38 +242,38 @@ pub struct CompletionRequest { } /// Builder struct for constructing a completion request. -/// +/// /// Example usage: /// ```rust /// use rig::{ /// providers::openai::{Client, self}, /// completion::CompletionRequestBuilder, /// }; -/// +/// /// let openai = Client::new("your-openai-api-key"); /// let model = openai.completion_model(openai::GPT_4O).build(); -/// +/// /// // Create the completion request and execute it separately /// let request = CompletionRequestBuilder::new(model, "Who are you?".to_string()) /// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string()) /// .temperature(0.5) /// .build(); -/// +/// /// let response = model.completion(request) /// .await /// .expect("Failed to get completion response"); /// ``` -/// +/// /// Alternatively, you can execute the completion request directly from the builder: /// ```rust /// use rig::{ /// providers::openai::{Client, self}, /// completion::CompletionRequestBuilder, /// }; -/// +/// /// let openai = Client::new("your-openai-api-key"); /// let model = openai.completion_model(openai::GPT_4O).build(); -/// +/// /// // Create the completion request and execute it directly /// let response = CompletionRequestBuilder::new(model, "Who are you?".to_string()) /// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string()) @@ -282,7 +282,7 @@ pub struct CompletionRequest { /// .await /// .expect("Failed to get completion response"); /// ``` -/// +/// /// Note: It is usually unnecessary to create a completion request builder directly. /// Instead, use the [CompletionModel::completion_request] method. pub struct CompletionRequestBuilder { diff --git a/rig-core/src/embeddings.rs b/rig-core/src/embeddings.rs index aa9772a3..2d40bbc5 100644 --- a/rig-core/src/embeddings.rs +++ b/rig-core/src/embeddings.rs @@ -134,10 +134,10 @@ impl Embedding { /// /// The struct is designed to model any kind of documents that can be serialized to JSON /// (including a simple string). -/// -/// Moreover, it can hold multiple embeddings for the same document, thus allowing a -/// large document to be retrieved from a query that matches multiple smaller and -/// distinct text documents. For example, if the document is a textbook, a summary of +/// +/// Moreover, it can hold multiple embeddings for the same document, thus allowing a +/// large document to be retrieved from a query that matches multiple smaller and +/// distinct text documents. For example, if the document is a textbook, a summary of /// each chapter could serve as the book's embeddings. #[derive(Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct DocumentEmbeddings { diff --git a/rig-core/src/extractor.rs b/rig-core/src/extractor.rs index 02699496..9b70a148 100644 --- a/rig-core/src/extractor.rs +++ b/rig-core/src/extractor.rs @@ -1,6 +1,6 @@ //! This module provides high-level abstractions for extracting structured data from text using LLMs. -//! -//! Note: The target structure must implement the `serde::Deserialize`, `serde::Serialize`, +//! +//! Note: The target structure must implement the `serde::Deserialize`, `serde::Serialize`, //! and `schemars::JsonSchema` traits. Those can be easily derived using the `derive` macro. //! //! # Example diff --git a/rig-core/src/lib.rs b/rig-core/src/lib.rs index c6e82612..c480272f 100644 --- a/rig-core/src/lib.rs +++ b/rig-core/src/lib.rs @@ -52,11 +52,11 @@ //! system prompt to full blown RAG systems that can be used to answer questions using a knowledgebase. //! Here is a quick summary of each: //! - [Model](crate::model::Model): A simple LLM model that can be prompted directly. This structs acts -//! as a thin wrapper around a completion model (i.e.: a struct implementing the [CompletionModel](crate::completion::CompletionModel) trait). +//! as a thin wrapper around a completion model (i.e.: a struct implementing the [CompletionModel](crate::completion::CompletionModel) trait). //! - [Agent](crate::agent::Agent): An LLM model combined with a preamble (i.e.: system prompt) and a -//! static set of context documents and tools. +//! static set of context documents and tools. //! - [RagAgent](crate::rag::RagAgent): A RAG system that can be used to answer questions using a knowledgebase -//! containing both context documents and tools. +//! containing both context documents and tools. //! //! ## Vector stores and indexes //! Rig provides a common interface for working with vector stores and indexes. Specifically, the library diff --git a/rig-core/src/model.rs b/rig-core/src/model.rs index b75f2601..8534dca5 100644 --- a/rig-core/src/model.rs +++ b/rig-core/src/model.rs @@ -1,12 +1,12 @@ //! This module contains the implementation of the [Model] struct and its builder. //! //! The [Model] type is the simplest building block for creating an LLM powered application -//! and can be used to prompt completions from a completion model. This struct acts as a -//! thin wrapper around a completion model (i.e.: a struct implementing the +//! and can be used to prompt completions from a completion model. This struct acts as a +//! thin wrapper around a completion model (i.e.: a struct implementing the //! [CompletionModel](crate::completion::CompletionModel) trait). //! //! The [ModelBuilder] struct provides a builder interface for creating [Model] instances -//! and allows the user to set the underlying model and other common parameters such as +//! and allows the user to set the underlying model and other common parameters such as //! the temperature of the model. //! //! # Example @@ -28,19 +28,19 @@ //! let chat_response = agent.chat("Prompt", chat_history) //! .await //! .expect("Failed to chat with model"); -//! +//! //! // Generate a prompt completion response from a simple prompt //! let chat_response = agent.prompt("Prompt") //! .await //! .expect("Failed to prompt the model"); -//! +//! //! // Generate a completion request builder from a prompt and chat history. The builder -//! // will contain the model's configuration (i.e.: model parameters, etc.), but these +//! // will contain the model's configuration (i.e.: model parameters, etc.), but these //! // can be overwritten. //! let completion_req_builder = agent.completion("Prompt", chat_history) //! .await //! .expect("Failed to create completion request builder"); -//! +//! //! let response = completion_req_builder //! .temperature(0.9) // Overwrite the model's temperature //! .send() diff --git a/rig-core/src/tool.rs b/rig-core/src/tool.rs index 09cd6592..98394ecf 100644 --- a/rig-core/src/tool.rs +++ b/rig-core/src/tool.rs @@ -180,7 +180,7 @@ pub trait ToolEmbeddingDyn: ToolDyn { impl ToolEmbeddingDyn for T { fn context(&self) -> serde_json::Result { - serde_json::to_value(&self.context()) + serde_json::to_value(self.context()) } fn embedding_docs(&self) -> Vec { diff --git a/rig-mongodb/src/lib.rs b/rig-mongodb/src/lib.rs index 37969119..7d85201f 100644 --- a/rig-mongodb/src/lib.rs +++ b/rig-mongodb/src/lib.rs @@ -83,11 +83,11 @@ impl MongoDbVectorStore { } /// Create a new `MongoDbVectorIndex` from an existing `MongoDbVectorStore`. - /// + /// /// The index (of type "vector") must already exist for the MongoDB collection. /// See the MongoDB [documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-type/) for more information on creating indexes. - /// - /// An additional filter can be provided to further restrict the documents that are + /// + /// An additional filter can be provided to further restrict the documents that are /// considered in the search. pub fn index( &self,