Skip to content

Commit

Permalink
Merge pull request #7 from 0xPlaygrounds/devops/setup-ci-cd
Browse files Browse the repository at this point in the history
devops: Add basic CI workflow
  • Loading branch information
cvauclair authored Sep 4, 2024
2 parents 5989083 + 8519a63 commit ff7cf8f
Show file tree
Hide file tree
Showing 11 changed files with 131 additions and 44 deletions.
38 changes: 38 additions & 0 deletions .github/workflows/cd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
name: "Build & Release"

permissions:
pull-requests: write
contents: write

on:
push:
branches:
- main
secrets:
CARGO_REGISTRY_TOKEN:
description: "Token to publish to crates.io"
required: true

jobs:
run-ci:
needs: validate-input
uses: ./.github/workflows/ci.yaml

release-plz:
name: Release-plz
requires: run-ci
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Install Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1

- name: Run release-plz
uses: MarcoIeni/[email protected]
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
49 changes: 49 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Inspired by https://github.com/joshka/github-workflows/blob/main/.github/workflows/rust-check.yml
name: Lint & Test

on:
pull_request:
branches: [ main ]
workflow_call:

env:
CARGO_TERM_COLOR: always

# ensure that the workflow is only triggered once per PR, subsequent pushes to the PR will cancel
# and restart the workflow. See https://docs.github.com/en/actions/using-jobs/using-concurrency
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true

jobs:
fmt:
name: stable / fmt
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Install Rust stable
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: rustfmt

- name: Run cargo fmt
run: cargo fmt -- --check

clippy:
name: stable / clippy
runs-on: ubuntu-latest
permissions:
checks: write
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Install Rust stable
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: clippy

- name: Run clippy action
uses: clechasseur/rs-clippy-check@v3
8 changes: 4 additions & 4 deletions rig-core/src/agent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,19 @@
//! let chat_response = agent.chat("Prompt", chat_history)
//! .await
//! .expect("Failed to chat with Agent");
//!
//!
//! // Generate a prompt completion response from a simple prompt
//! let chat_response = agent.prompt("Prompt")
//! .await
//! .expect("Failed to prompt the Agent");
//!
//!
//! // Generate a completion request builder from a prompt and chat history. The builder
//! // will contain the agent's configuration (i.e.: preamble, context documents, tools,
//! // will contain the agent's configuration (i.e.: preamble, context documents, tools,
//! // model parameters, etc.), but these can be overwritten.
//! let completion_req_builder = agent.completion("Prompt", chat_history)
//! .await
//! .expect("Failed to create completion request builder");
//!
//!
//! let response = completion_req_builder
//! .temperature(0.9) // Overwrite the agent's temperature
//! .send()
Expand Down
2 changes: 1 addition & 1 deletion rig-core/src/cli_chatbot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::io::{self, Write};

use crate::completion::{Chat, Message, PromptError};

/// Utility function to create a simple REPL CLI chatbot from a type that implements the
/// Utility function to create a simple REPL CLI chatbot from a type that implements the
/// `Chat` trait.
pub async fn cli_chatbot(chatbot: impl Chat) -> Result<(), PromptError> {
let stdin = io::stdin();
Expand Down
38 changes: 19 additions & 19 deletions rig-core/src/completion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//! - [Chat]: Defines a high-level LLM chat interface with chat history.
//! - [Completion]: Defines a low-level LLM completion interface for generating completion requests.
//! - [CompletionModel]: Defines a completion model that can be used to generate completion
//! responses from requests.
//! responses from requests.
//!
//! The [Prompt] and [Chat] traits are high level traits that users are expected to use
//! to interact with LLM models. Moreover, it is good practice to implement one of these
Expand Down Expand Up @@ -133,12 +133,12 @@ pub struct ToolDefinition {
/// Trait defining a high-level LLM simple prompt interface (i.e.: prompt in, response out).
pub trait Prompt: Send + Sync {
/// Send a simple prompt to the underlying completion model.
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and
/// the result is returned as a string.
///
///
/// If the tool does not exist, or the tool call fails, then an error is returned.
fn prompt(
&self,
Expand All @@ -149,12 +149,12 @@ pub trait Prompt: Send + Sync {
/// Trait defining a high-level LLM chat interface (i.e.: prompt and chat history in, response out).
pub trait Chat: Send + Sync {
/// Send a prompt with optional chat history to the underlying completion model.
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and the result
///
/// If the completion model's response is a message, then it is returned as a string.
///
/// If the completion model's response is a tool call, then the tool is called and the result
/// is returned as a string.
///
///
/// If the tool does not exist, or the tool call fails, then an error is returned.
fn chat(
&self,
Expand Down Expand Up @@ -242,38 +242,38 @@ pub struct CompletionRequest {
}

/// Builder struct for constructing a completion request.
///
///
/// Example usage:
/// ```rust
/// use rig::{
/// providers::openai::{Client, self},
/// completion::CompletionRequestBuilder,
/// };
///
///
/// let openai = Client::new("your-openai-api-key");
/// let model = openai.completion_model(openai::GPT_4O).build();
///
///
/// // Create the completion request and execute it separately
/// let request = CompletionRequestBuilder::new(model, "Who are you?".to_string())
/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
/// .temperature(0.5)
/// .build();
///
///
/// let response = model.completion(request)
/// .await
/// .expect("Failed to get completion response");
/// ```
///
///
/// Alternatively, you can execute the completion request directly from the builder:
/// ```rust
/// use rig::{
/// providers::openai::{Client, self},
/// completion::CompletionRequestBuilder,
/// };
///
///
/// let openai = Client::new("your-openai-api-key");
/// let model = openai.completion_model(openai::GPT_4O).build();
///
///
/// // Create the completion request and execute it directly
/// let response = CompletionRequestBuilder::new(model, "Who are you?".to_string())
/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
Expand All @@ -282,7 +282,7 @@ pub struct CompletionRequest {
/// .await
/// .expect("Failed to get completion response");
/// ```
///
///
/// Note: It is usually unnecessary to create a completion request builder directly.
/// Instead, use the [CompletionModel::completion_request] method.
pub struct CompletionRequestBuilder<M: CompletionModel> {
Expand Down
8 changes: 4 additions & 4 deletions rig-core/src/embeddings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,10 @@ impl Embedding {
///
/// The struct is designed to model any kind of documents that can be serialized to JSON
/// (including a simple string).
///
/// Moreover, it can hold multiple embeddings for the same document, thus allowing a
/// large document to be retrieved from a query that matches multiple smaller and
/// distinct text documents. For example, if the document is a textbook, a summary of
///
/// Moreover, it can hold multiple embeddings for the same document, thus allowing a
/// large document to be retrieved from a query that matches multiple smaller and
/// distinct text documents. For example, if the document is a textbook, a summary of
/// each chapter could serve as the book's embeddings.
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct DocumentEmbeddings {
Expand Down
4 changes: 2 additions & 2 deletions rig-core/src/extractor.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! This module provides high-level abstractions for extracting structured data from text using LLMs.
//!
//! Note: The target structure must implement the `serde::Deserialize`, `serde::Serialize`,
//!
//! Note: The target structure must implement the `serde::Deserialize`, `serde::Serialize`,
//! and `schemars::JsonSchema` traits. Those can be easily derived using the `derive` macro.
//!
//! # Example
Expand Down
6 changes: 3 additions & 3 deletions rig-core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@
//! system prompt to full blown RAG systems that can be used to answer questions using a knowledgebase.
//! Here is a quick summary of each:
//! - [Model](crate::model::Model): A simple LLM model that can be prompted directly. This structs acts
//! as a thin wrapper around a completion model (i.e.: a struct implementing the [CompletionModel](crate::completion::CompletionModel) trait).
//! as a thin wrapper around a completion model (i.e.: a struct implementing the [CompletionModel](crate::completion::CompletionModel) trait).
//! - [Agent](crate::agent::Agent): An LLM model combined with a preamble (i.e.: system prompt) and a
//! static set of context documents and tools.
//! static set of context documents and tools.
//! - [RagAgent](crate::rag::RagAgent): A RAG system that can be used to answer questions using a knowledgebase
//! containing both context documents and tools.
//! containing both context documents and tools.
//!
//! ## Vector stores and indexes
//! Rig provides a common interface for working with vector stores and indexes. Specifically, the library
Expand Down
14 changes: 7 additions & 7 deletions rig-core/src/model.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
//! This module contains the implementation of the [Model] struct and its builder.
//!
//! The [Model] type is the simplest building block for creating an LLM powered application
//! and can be used to prompt completions from a completion model. This struct acts as a
//! thin wrapper around a completion model (i.e.: a struct implementing the
//! and can be used to prompt completions from a completion model. This struct acts as a
//! thin wrapper around a completion model (i.e.: a struct implementing the
//! [CompletionModel](crate::completion::CompletionModel) trait).
//!
//! The [ModelBuilder] struct provides a builder interface for creating [Model] instances
//! and allows the user to set the underlying model and other common parameters such as
//! and allows the user to set the underlying model and other common parameters such as
//! the temperature of the model.
//!
//! # Example
Expand All @@ -28,19 +28,19 @@
//! let chat_response = agent.chat("Prompt", chat_history)
//! .await
//! .expect("Failed to chat with model");
//!
//!
//! // Generate a prompt completion response from a simple prompt
//! let chat_response = agent.prompt("Prompt")
//! .await
//! .expect("Failed to prompt the model");
//!
//!
//! // Generate a completion request builder from a prompt and chat history. The builder
//! // will contain the model's configuration (i.e.: model parameters, etc.), but these
//! // will contain the model's configuration (i.e.: model parameters, etc.), but these
//! // can be overwritten.
//! let completion_req_builder = agent.completion("Prompt", chat_history)
//! .await
//! .expect("Failed to create completion request builder");
//!
//!
//! let response = completion_req_builder
//! .temperature(0.9) // Overwrite the model's temperature
//! .send()
Expand Down
2 changes: 1 addition & 1 deletion rig-core/src/tool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ pub trait ToolEmbeddingDyn: ToolDyn {

impl<T: ToolEmbedding> ToolEmbeddingDyn for T {
fn context(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(&self.context())
serde_json::to_value(self.context())
}

fn embedding_docs(&self) -> Vec<String> {
Expand Down
6 changes: 3 additions & 3 deletions rig-mongodb/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ impl MongoDbVectorStore {
}

/// Create a new `MongoDbVectorIndex` from an existing `MongoDbVectorStore`.
///
///
/// The index (of type "vector") must already exist for the MongoDB collection.
/// See the MongoDB [documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-type/) for more information on creating indexes.
///
/// An additional filter can be provided to further restrict the documents that are
///
/// An additional filter can be provided to further restrict the documents that are
/// considered in the search.
pub fn index<M: EmbeddingModel>(
&self,
Expand Down

0 comments on commit ff7cf8f

Please sign in to comment.