From 6e20eb3c94a75b9659d13e3bbfe279b2626a82c4 Mon Sep 17 00:00:00 2001 From: Santiago Medina Date: Fri, 8 Dec 2023 22:38:34 -0800 Subject: [PATCH] fix docs --- orca-core/src/llm/mod.rs | 26 +++++++-------- orca-core/src/pipeline/simple.rs | 46 +++++++++++++-------------- orca-core/src/prompt/mod.rs | 24 +++++++------- orca-core/src/qdrant.rs | 12 +++---- orca-core/src/record/mod.rs | 10 +++--- orca-core/src/record/pdf.rs | 4 +-- orca-models/src/openai/completions.rs | 7 ---- orca-models/src/openai/embeddings.rs | 6 ++-- 8 files changed, 64 insertions(+), 71 deletions(-) diff --git a/orca-core/src/llm/mod.rs b/orca-core/src/llm/mod.rs index 42938ef..451f89f 100644 --- a/orca-core/src/llm/mod.rs +++ b/orca-core/src/llm/mod.rs @@ -22,11 +22,11 @@ pub trait LLM: Sync + Send { /// # Examples /// This example uses the OpenAI chat models. /// ``` - /// use orca::llm::LLM; - /// use orca::prompt::Prompt; - /// use orca::template; - /// use orca::llm::openai::OpenAI; - /// use orca::prompt::TemplateEngine; + /// use orca_core::llm::LLM; + /// use orca_core::prompt::Prompt; + /// use orca_core::template; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::prompt::TemplateEngine; /// /// #[tokio::main] /// async fn main() { @@ -62,9 +62,9 @@ pub trait Embedding { /// # Examples /// This example uses the OpenAI chat models. /// ``` - /// # use orca::prompt; - /// # use orca::llm::Embedding; - /// # use orca::llm::openai::OpenAI; + /// # use orca_core::prompt; + /// # use orca_core::llm::Embedding; + /// # use orca_core::llm::openai::OpenAI; /// # #[tokio::main] /// # async fn main() { /// let client = OpenAI::new(); @@ -84,10 +84,10 @@ pub trait Embedding { /// # Example /// This example uses the Bert model. /// ``` - /// # use orca::prompts; - /// # use orca::llm::Embedding; - /// # use orca::llm::bert::Bert; - /// # use orca::prompt::Prompt; + /// # use orca_core::prompts; + /// # use orca_core::llm::Embedding; + /// # use orca_core::llm::bert::Bert; + /// # use orca_core::prompt::Prompt; /// # #[tokio::main] /// # async fn main() { /// let bert = Bert::new().build_model_and_tokenizer().await.unwrap(); @@ -255,7 +255,7 @@ impl Default for EmbeddingResponse { /// /// # Examples /// ``` -/// use orca::llm::device; +/// use orca_core::llm::device; /// /// // Use a CPU device /// let cpu_device = device(true).unwrap(); diff --git a/orca-core/src/pipeline/simple.rs b/orca-core/src/pipeline/simple.rs index e26ab51..a0d11c4 100644 --- a/orca-core/src/pipeline/simple.rs +++ b/orca-core/src/pipeline/simple.rs @@ -41,10 +41,10 @@ impl LLMPipeline { /// /// # Examples /// ```rust - /// use orca::llm::openai::OpenAI; - /// use orca::llm::LLM; - /// use orca::prompt::TemplateEngine; - /// use orca::pipeline::simple::LLMPipeline; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::llm::LLM; + /// use orca_core::prompt::TemplateEngine; + /// use orca_core::pipeline::simple::LLMPipeline; /// /// let client = OpenAI::new(); /// let prompt = "Hello, LLM!"; @@ -66,11 +66,11 @@ impl LLMPipeline { /// /// # Examples /// ```rust - /// use orca::llm::openai::OpenAI; - /// use orca::llm::LLM; - /// use orca::prompt::TemplateEngine; - /// use orca::pipeline::simple::LLMPipeline; - /// use orca::template; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::llm::LLM; + /// use orca_core::prompt::TemplateEngine; + /// use orca_core::pipeline::simple::LLMPipeline; + /// use orca_core::template; /// /// let client = OpenAI::new(); /// let prompt = "Hello, LLM!"; @@ -94,11 +94,11 @@ impl LLMPipeline { /// /// # Example /// ```rust - /// use orca::llm::openai::OpenAI; - /// use orca::llm::LLM; - /// use orca::prompt::TemplateEngine; - /// use orca::pipeline::simple::LLMPipeline; - /// use orca::template; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::llm::LLM; + /// use orca_core::prompt::TemplateEngine; + /// use orca_core::pipeline::simple::LLMPipeline; + /// use orca_core::template; /// /// let client = OpenAI::new(); /// let prompt = "Hello, LLM!"; @@ -128,11 +128,11 @@ impl LLMPipeline { /// /// # Examples /// ```rust - /// use orca::llm::openai::OpenAI; - /// use orca::llm::LLM; - /// use orca::prompt::TemplateEngine; - /// use orca::pipeline::simple::LLMPipeline; - /// use orca::memory::ChatBuffer; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::llm::LLM; + /// use orca_core::prompt::TemplateEngine; + /// use orca_core::pipeline::simple::LLMPipeline; + /// use orca_core::memory::ChatBuffer; /// /// let client = OpenAI::new(); /// let prompt = "Hello, LLM!"; @@ -154,10 +154,10 @@ impl LLMPipeline { /// # Examples /// /// ``` - /// use orca::pipeline::Pipeline; - /// use orca::llm::openai::OpenAI; - /// use orca::prompt::context::Context; - /// use orca::pipeline::simple::LLMPipeline; + /// use orca_core::pipeline::Pipeline; + /// use orca_core::llm::openai::OpenAI; + /// use orca_core::prompt::context::Context; + /// use orca_core::pipeline::simple::LLMPipeline; /// use std::collections::HashMap; /// /// # #[tokio::main] diff --git a/orca-core/src/prompt/mod.rs b/orca-core/src/prompt/mod.rs index eb89ae8..6f8decc 100644 --- a/orca-core/src/prompt/mod.rs +++ b/orca-core/src/prompt/mod.rs @@ -40,7 +40,7 @@ impl TemplateEngine { /// /// # Example /// ``` - /// use orca::prompt::TemplateEngine; + /// use orca_core::prompt::TemplateEngine; /// let prompt = TemplateEngine::new(); /// ``` pub fn new() -> TemplateEngine { @@ -79,7 +79,7 @@ impl TemplateEngine { /// /// # Example /// ``` - /// use orca::prompt::TemplateEngine; + /// use orca_core::prompt::TemplateEngine; /// /// let mut prompt = TemplateEngine::new().register_template("template", "Welcome!").unwrap(); /// prompt.add_to_template("template", "Hello, world!"); @@ -111,7 +111,7 @@ impl TemplateEngine { /// # Example /// ``` /// use serde_json::json; - /// use orca::prompt::TemplateEngine; + /// use orca_core::prompt::TemplateEngine; /// /// let prompt = TemplateEngine::new().register_template("template", "{{#if true}}Hello, world!{{/if}}").unwrap(); /// let result = prompt.render("template").unwrap(); @@ -137,7 +137,7 @@ impl TemplateEngine { /// # Example /// ``` /// use serde_json::json; - /// use orca::prompt::TemplateEngine; + /// use orca_core::prompt::TemplateEngine; /// /// let prompt = TemplateEngine::new().register_template("template", "Hello, {{name}}!").unwrap(); /// let data = json!({"name": "world"}); @@ -181,8 +181,8 @@ impl TemplateEngine { /// # Example /// ``` /// # use serde_json::json; - /// # use orca::prompt::TemplateEngine; - /// # use orca::prompt::chat::{Role, Message, ChatPrompt}; + /// # use orca_core::prompt::TemplateEngine; + /// # use orca_core::prompt::chat::{Role, Message, ChatPrompt}; /// /// let prompt = TemplateEngine::new().register_template("template", "{{#system}}Hello, {{name}}!{{/system}}").unwrap(); /// let data = json!({"name": "world"}); @@ -226,8 +226,8 @@ pub trait Prompt: Sync + Send + Display { /// /// # Examples /// ``` - /// use orca::prompt; - /// use orca::prompt::Prompt; + /// use orca_core::prompt; + /// use orca_core::prompt::Prompt; /// /// let mut my_prompt = prompt!("Some prompt"); /// let another_prompt = prompt!("Some other prompt"); @@ -253,8 +253,8 @@ pub trait Prompt: Sync + Send + Display { /// /// # Examples /// ``` - /// use orca::prompt; - /// use orca::prompt::Prompt; + /// use orca_core::prompt; + /// use orca_core::prompt::Prompt; /// /// let my_prompt = prompt!("Some prompt"); /// let cloned_prompt = my_prompt.clone_prompt(); @@ -313,8 +313,8 @@ macro_rules! prompts { ($e:expr) => {{ $e .into_iter() - .map(|x| Box::new(x.clone()) as Box) - .collect::>>() + .map(|x| Box::new(x.clone()) as Box) + .collect::>>() }}; ($($e:expr),* $(,)?) => { { diff --git a/orca-core/src/qdrant.rs b/orca-core/src/qdrant.rs index 31efe77..005cfc0 100644 --- a/orca-core/src/qdrant.rs +++ b/orca-core/src/qdrant.rs @@ -98,7 +98,7 @@ impl Qdrant { /// /// # Example /// ``` - /// use orca::qdrant::Qdrant; + /// use orca_core::qdrant::Qdrant; /// /// let client = Qdrant::new("http://localhost:6334").unwrap(); /// ``` @@ -116,7 +116,7 @@ impl Qdrant { /// /// # Example /// ```no_run - /// # use orca::qdrant::Qdrant; + /// # use orca_core::qdrant::Qdrant; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let client = Qdrant::new("http://localhost:6334").unwrap(); @@ -150,7 +150,7 @@ impl Qdrant { /// /// # Example /// ```no_run - /// # use orca::qdrant::Qdrant; + /// # use orca_core::qdrant::Qdrant; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # let client = Qdrant::new("http://localhost:6334").unwrap(); @@ -172,7 +172,7 @@ impl Qdrant { /// /// # Examples /// ```no_run - /// # use orca::qdrant::Qdrant; + /// # use orca_core::qdrant::Qdrant; /// # use serde::{Serialize, Deserialize}; /// # #[derive(Serialize, Deserialize)] /// # struct MyPayload { @@ -215,7 +215,7 @@ impl Qdrant { /// # Examples /// /// ```no_run - /// # use orca::qdrant::Qdrant; + /// # use orca_core::qdrant::Qdrant; /// # use std::error::Error; /// # /// # #[tokio::main] @@ -266,7 +266,7 @@ impl Qdrant { /// /// # Example /// ```no_run - /// # use orca::qdrant::{Qdrant, Condition}; + /// # use orca_core::qdrant::{Qdrant, Condition}; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let client = Qdrant::new("http://localhost:6334").unwrap(); diff --git a/orca-core/src/record/mod.rs b/orca-core/src/record/mod.rs index 51e9263..3d9eebf 100644 --- a/orca-core/src/record/mod.rs +++ b/orca-core/src/record/mod.rs @@ -86,8 +86,8 @@ impl Record { /// /// # Example /// ``` - /// # use orca::record::Record; - /// # use orca::record::Content; + /// # use orca_core::record::Record; + /// # use orca_core::record::Content; /// let record = Record::new(Content::String("Hello World".into())); /// let records = record.split(5); /// assert_eq!(records.len(), 2); @@ -131,9 +131,9 @@ impl Record { /// /// # Example /// ```no_run - /// # use orca::record::Record; - /// # use orca::record::Content; - /// # use orca::record::Tokenizer; + /// # use orca_core::record::Record; + /// # use orca_core::record::Content; + /// # use orca_core::record::Tokenizer; /// # use std::path::Path; /// let record = Record::new(Content::String("Hello World".into())); /// let records = record.split_with_tokenizer(2, Tokenizer::Huggingface("path_to_tokenizer".into())).unwrap(); diff --git a/orca-core/src/record/pdf.rs b/orca-core/src/record/pdf.rs index 42949f5..d1fa5da 100644 --- a/orca-core/src/record/pdf.rs +++ b/orca-core/src/record/pdf.rs @@ -25,7 +25,7 @@ impl Pdf { /// Create a new Pdf record from a buffer /// When calling this function, specify the PDF generic type as a slice of bytes /// ``` - /// use orca::record::pdf::Pdf; + /// use orca_core::record::pdf::Pdf; /// use base64::{engine::general_purpose, Engine}; /// use std::io::Read; /// @@ -48,7 +48,7 @@ impl Pdf { /// Create a new PDF record from a file /// When calling this function, specify the PDF generic type as a vector of bytes /// ``` - /// use orca::record::pdf::Pdf; + /// use orca_core::record::pdf::Pdf; /// /// let record = Pdf::from_file("./tests/records/sample-resume.pdf", false); /// ``` diff --git a/orca-models/src/openai/completions.rs b/orca-models/src/openai/completions.rs index 6dbe4cf..2d5e1bf 100644 --- a/orca-models/src/openai/completions.rs +++ b/orca-models/src/openai/completions.rs @@ -137,13 +137,6 @@ impl OpenAI { self } - /// Set emedding model to use - /// e.g. "text-embedding-ada-002" - pub fn with_emedding_model(mut self, emedding_model: &str) -> Self { - self.emedding_model = emedding_model.to_string(); - self - } - /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, /// while lower values like 0.2 will make it more focused and deterministic. pub fn with_temperature(mut self, temperature: f32) -> Self { diff --git a/orca-models/src/openai/embeddings.rs b/orca-models/src/openai/embeddings.rs index 85564ac..b7ef041 100644 --- a/orca-models/src/openai/embeddings.rs +++ b/orca-models/src/openai/embeddings.rs @@ -125,8 +125,8 @@ impl OpenAI { /// Set emedding model to use /// e.g. "text-embedding-ada-002" - pub fn with_model(mut self, emedding_model: &str) -> Self { - self.emedding_model = emedding_model.to_string(); + pub fn with_model(mut self, model: &str) -> Self { + self.model = model.to_string(); self } @@ -160,7 +160,7 @@ impl OpenAI { pub fn generate_request(&self, prompt: &str) -> Result { let payload = Payload { - model: self.emedding_model.clone(), + model: self.model.clone(), input: prompt.to_string(), };