Skip to content

Commit

Permalink
fix docs
Browse files Browse the repository at this point in the history
  • Loading branch information
santiagomed committed Dec 9, 2023
1 parent 4bea280 commit 6e20eb3
Show file tree
Hide file tree
Showing 8 changed files with 64 additions and 71 deletions.
26 changes: 13 additions & 13 deletions orca-core/src/llm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@ pub trait LLM: Sync + Send {
/// # Examples
/// This example uses the OpenAI chat models.
/// ```
/// use orca::llm::LLM;
/// use orca::prompt::Prompt;
/// use orca::template;
/// use orca::llm::openai::OpenAI;
/// use orca::prompt::TemplateEngine;
/// use orca_core::llm::LLM;
/// use orca_core::prompt::Prompt;
/// use orca_core::template;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::prompt::TemplateEngine;
///
/// #[tokio::main]
/// async fn main() {
Expand Down Expand Up @@ -62,9 +62,9 @@ pub trait Embedding {
/// # Examples
/// This example uses the OpenAI chat models.
/// ```
/// # use orca::prompt;
/// # use orca::llm::Embedding;
/// # use orca::llm::openai::OpenAI;
/// # use orca_core::prompt;
/// # use orca_core::llm::Embedding;
/// # use orca_core::llm::openai::OpenAI;
/// # #[tokio::main]
/// # async fn main() {
/// let client = OpenAI::new();
Expand All @@ -84,10 +84,10 @@ pub trait Embedding {
/// # Example
/// This example uses the Bert model.
/// ```
/// # use orca::prompts;
/// # use orca::llm::Embedding;
/// # use orca::llm::bert::Bert;
/// # use orca::prompt::Prompt;
/// # use orca_core::prompts;
/// # use orca_core::llm::Embedding;
/// # use orca_core::llm::bert::Bert;
/// # use orca_core::prompt::Prompt;
/// # #[tokio::main]
/// # async fn main() {
/// let bert = Bert::new().build_model_and_tokenizer().await.unwrap();
Expand Down Expand Up @@ -255,7 +255,7 @@ impl Default for EmbeddingResponse {
///
/// # Examples
/// ```
/// use orca::llm::device;
/// use orca_core::llm::device;
///
/// // Use a CPU device
/// let cpu_device = device(true).unwrap();
Expand Down
46 changes: 23 additions & 23 deletions orca-core/src/pipeline/simple.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ impl<M: LLM + Clone + 'static> LLMPipeline<M> {
///
/// # Examples
/// ```rust
/// use orca::llm::openai::OpenAI;
/// use orca::llm::LLM;
/// use orca::prompt::TemplateEngine;
/// use orca::pipeline::simple::LLMPipeline;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::llm::LLM;
/// use orca_core::prompt::TemplateEngine;
/// use orca_core::pipeline::simple::LLMPipeline;
///
/// let client = OpenAI::new();
/// let prompt = "Hello, LLM!";
Expand All @@ -66,11 +66,11 @@ impl<M: LLM + Clone + 'static> LLMPipeline<M> {
///
/// # Examples
/// ```rust
/// use orca::llm::openai::OpenAI;
/// use orca::llm::LLM;
/// use orca::prompt::TemplateEngine;
/// use orca::pipeline::simple::LLMPipeline;
/// use orca::template;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::llm::LLM;
/// use orca_core::prompt::TemplateEngine;
/// use orca_core::pipeline::simple::LLMPipeline;
/// use orca_core::template;
///
/// let client = OpenAI::new();
/// let prompt = "Hello, LLM!";
Expand All @@ -94,11 +94,11 @@ impl<M: LLM + Clone + 'static> LLMPipeline<M> {
///
/// # Example
/// ```rust
/// use orca::llm::openai::OpenAI;
/// use orca::llm::LLM;
/// use orca::prompt::TemplateEngine;
/// use orca::pipeline::simple::LLMPipeline;
/// use orca::template;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::llm::LLM;
/// use orca_core::prompt::TemplateEngine;
/// use orca_core::pipeline::simple::LLMPipeline;
/// use orca_core::template;
///
/// let client = OpenAI::new();
/// let prompt = "Hello, LLM!";
Expand Down Expand Up @@ -128,11 +128,11 @@ impl<M: LLM + Clone + 'static> LLMPipeline<M> {
///
/// # Examples
/// ```rust
/// use orca::llm::openai::OpenAI;
/// use orca::llm::LLM;
/// use orca::prompt::TemplateEngine;
/// use orca::pipeline::simple::LLMPipeline;
/// use orca::memory::ChatBuffer;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::llm::LLM;
/// use orca_core::prompt::TemplateEngine;
/// use orca_core::pipeline::simple::LLMPipeline;
/// use orca_core::memory::ChatBuffer;
///
/// let client = OpenAI::new();
/// let prompt = "Hello, LLM!";
Expand All @@ -154,10 +154,10 @@ impl<M: LLM + Clone + 'static> LLMPipeline<M> {
/// # Examples
///
/// ```
/// use orca::pipeline::Pipeline;
/// use orca::llm::openai::OpenAI;
/// use orca::prompt::context::Context;
/// use orca::pipeline::simple::LLMPipeline;
/// use orca_core::pipeline::Pipeline;
/// use orca_core::llm::openai::OpenAI;
/// use orca_core::prompt::context::Context;
/// use orca_core::pipeline::simple::LLMPipeline;
/// use std::collections::HashMap;
///
/// # #[tokio::main]
Expand Down
24 changes: 12 additions & 12 deletions orca-core/src/prompt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ impl TemplateEngine {
///
/// # Example
/// ```
/// use orca::prompt::TemplateEngine;
/// use orca_core::prompt::TemplateEngine;
/// let prompt = TemplateEngine::new();
/// ```
pub fn new() -> TemplateEngine {
Expand Down Expand Up @@ -79,7 +79,7 @@ impl TemplateEngine {
///
/// # Example
/// ```
/// use orca::prompt::TemplateEngine;
/// use orca_core::prompt::TemplateEngine;
///
/// let mut prompt = TemplateEngine::new().register_template("template", "Welcome!").unwrap();
/// prompt.add_to_template("template", "Hello, world!");
Expand Down Expand Up @@ -111,7 +111,7 @@ impl TemplateEngine {
/// # Example
/// ```
/// use serde_json::json;
/// use orca::prompt::TemplateEngine;
/// use orca_core::prompt::TemplateEngine;
///
/// let prompt = TemplateEngine::new().register_template("template", "{{#if true}}Hello, world!{{/if}}").unwrap();
/// let result = prompt.render("template").unwrap();
Expand All @@ -137,7 +137,7 @@ impl TemplateEngine {
/// # Example
/// ```
/// use serde_json::json;
/// use orca::prompt::TemplateEngine;
/// use orca_core::prompt::TemplateEngine;
///
/// let prompt = TemplateEngine::new().register_template("template", "Hello, {{name}}!").unwrap();
/// let data = json!({"name": "world"});
Expand Down Expand Up @@ -181,8 +181,8 @@ impl TemplateEngine {
/// # Example
/// ```
/// # use serde_json::json;
/// # use orca::prompt::TemplateEngine;
/// # use orca::prompt::chat::{Role, Message, ChatPrompt};
/// # use orca_core::prompt::TemplateEngine;
/// # use orca_core::prompt::chat::{Role, Message, ChatPrompt};
///
/// let prompt = TemplateEngine::new().register_template("template", "{{#system}}Hello, {{name}}!{{/system}}").unwrap();
/// let data = json!({"name": "world"});
Expand Down Expand Up @@ -226,8 +226,8 @@ pub trait Prompt: Sync + Send + Display {
///
/// # Examples
/// ```
/// use orca::prompt;
/// use orca::prompt::Prompt;
/// use orca_core::prompt;
/// use orca_core::prompt::Prompt;
///
/// let mut my_prompt = prompt!("Some prompt");
/// let another_prompt = prompt!("Some other prompt");
Expand All @@ -253,8 +253,8 @@ pub trait Prompt: Sync + Send + Display {
///
/// # Examples
/// ```
/// use orca::prompt;
/// use orca::prompt::Prompt;
/// use orca_core::prompt;
/// use orca_core::prompt::Prompt;
///
/// let my_prompt = prompt!("Some prompt");
/// let cloned_prompt = my_prompt.clone_prompt();
Expand Down Expand Up @@ -313,8 +313,8 @@ macro_rules! prompts {
($e:expr) => {{
$e
.into_iter()
.map(|x| Box::new(x.clone()) as Box<dyn orca::prompt::Prompt>)
.collect::<Vec<Box<dyn orca::prompt::Prompt>>>()
.map(|x| Box::new(x.clone()) as Box<dyn crate::prompt::Prompt>)
.collect::<Vec<Box<dyn crate::prompt::Prompt>>>()
}};
($($e:expr),* $(,)?) => {
{
Expand Down
12 changes: 6 additions & 6 deletions orca-core/src/qdrant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ impl Qdrant {
///
/// # Example
/// ```
/// use orca::qdrant::Qdrant;
/// use orca_core::qdrant::Qdrant;
///
/// let client = Qdrant::new("http://localhost:6334").unwrap();
/// ```
Expand All @@ -116,7 +116,7 @@ impl Qdrant {
///
/// # Example
/// ```no_run
/// # use orca::qdrant::Qdrant;
/// # use orca_core::qdrant::Qdrant;
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Qdrant::new("http://localhost:6334").unwrap();
Expand Down Expand Up @@ -150,7 +150,7 @@ impl Qdrant {
///
/// # Example
/// ```no_run
/// # use orca::qdrant::Qdrant;
/// # use orca_core::qdrant::Qdrant;
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = Qdrant::new("http://localhost:6334").unwrap();
Expand All @@ -172,7 +172,7 @@ impl Qdrant {
///
/// # Examples
/// ```no_run
/// # use orca::qdrant::Qdrant;
/// # use orca_core::qdrant::Qdrant;
/// # use serde::{Serialize, Deserialize};
/// # #[derive(Serialize, Deserialize)]
/// # struct MyPayload {
Expand Down Expand Up @@ -215,7 +215,7 @@ impl Qdrant {
/// # Examples
///
/// ```no_run
/// # use orca::qdrant::Qdrant;
/// # use orca_core::qdrant::Qdrant;
/// # use std::error::Error;
/// #
/// # #[tokio::main]
Expand Down Expand Up @@ -266,7 +266,7 @@ impl Qdrant {
///
/// # Example
/// ```no_run
/// # use orca::qdrant::{Qdrant, Condition};
/// # use orca_core::qdrant::{Qdrant, Condition};
/// # #[tokio::main]
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Qdrant::new("http://localhost:6334").unwrap();
Expand Down
10 changes: 5 additions & 5 deletions orca-core/src/record/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ impl Record {
///
/// # Example
/// ```
/// # use orca::record::Record;
/// # use orca::record::Content;
/// # use orca_core::record::Record;
/// # use orca_core::record::Content;
/// let record = Record::new(Content::String("Hello World".into()));
/// let records = record.split(5);
/// assert_eq!(records.len(), 2);
Expand Down Expand Up @@ -131,9 +131,9 @@ impl Record {
///
/// # Example
/// ```no_run
/// # use orca::record::Record;
/// # use orca::record::Content;
/// # use orca::record::Tokenizer;
/// # use orca_core::record::Record;
/// # use orca_core::record::Content;
/// # use orca_core::record::Tokenizer;
/// # use std::path::Path;
/// let record = Record::new(Content::String("Hello World".into()));
/// let records = record.split_with_tokenizer(2, Tokenizer::Huggingface("path_to_tokenizer".into())).unwrap();
Expand Down
4 changes: 2 additions & 2 deletions orca-core/src/record/pdf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ impl Pdf {
/// Create a new Pdf record from a buffer
/// When calling this function, specify the PDF generic type as a slice of bytes
/// ```
/// use orca::record::pdf::Pdf;
/// use orca_core::record::pdf::Pdf;
/// use base64::{engine::general_purpose, Engine};
/// use std::io::Read;
///
Expand All @@ -48,7 +48,7 @@ impl Pdf {
/// Create a new PDF record from a file
/// When calling this function, specify the PDF generic type as a vector of bytes
/// ```
/// use orca::record::pdf::Pdf;
/// use orca_core::record::pdf::Pdf;
///
/// let record = Pdf::from_file("./tests/records/sample-resume.pdf", false);
/// ```
Expand Down
7 changes: 0 additions & 7 deletions orca-models/src/openai/completions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,13 +137,6 @@ impl OpenAI {
self
}

/// Set emedding model to use
/// e.g. "text-embedding-ada-002"
pub fn with_emedding_model(mut self, emedding_model: &str) -> Self {
self.emedding_model = emedding_model.to_string();
self
}

/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random,
/// while lower values like 0.2 will make it more focused and deterministic.
pub fn with_temperature(mut self, temperature: f32) -> Self {
Expand Down
6 changes: 3 additions & 3 deletions orca-models/src/openai/embeddings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ impl OpenAI {

/// Set emedding model to use
/// e.g. "text-embedding-ada-002"
pub fn with_model(mut self, emedding_model: &str) -> Self {
self.emedding_model = emedding_model.to_string();
pub fn with_model(mut self, model: &str) -> Self {
self.model = model.to_string();
self
}

Expand Down Expand Up @@ -160,7 +160,7 @@ impl OpenAI {
pub fn generate_request(&self, prompt: &str) -> Result<reqwest::Request> {
let payload = Payload {
model: self.emedding_model.clone(),
model: self.model.clone(),
input: prompt.to_string(),
};

Expand Down

0 comments on commit 6e20eb3

Please sign in to comment.