Skip to content

Commit

Permalink
Add features to docs.rs generation
Browse files Browse the repository at this point in the history
To test run `RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features --no-deps`
  • Loading branch information
boydjohnson committed Nov 3, 2024
1 parent 273cf2a commit 242934d
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 0 deletions.
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,7 @@ ollama-rs = { path = ".", features = [
"function-calling",
] }
base64 = "0.22.0"

[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
1 change: 1 addition & 0 deletions src/generation.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
pub mod chat;
pub mod completion;
pub mod embeddings;
#[cfg_attr(docsrs, doc(cfg(feature = "function-calling")))]
#[cfg(feature = "function-calling")]
pub mod functions;
pub mod images;
Expand Down
5 changes: 5 additions & 0 deletions src/generation/chat/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,18 @@ pub mod request;
use super::images::Image;
use request::ChatMessageRequest;

#[cfg_attr(docsrs, doc(cfg(feature = "chat-history")))]
#[cfg(feature = "chat-history")]
use crate::history::MessagesHistory;

#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// A stream of `ChatMessageResponse` objects
pub type ChatMessageResponseStream =
std::pin::Pin<Box<dyn tokio_stream::Stream<Item = Result<ChatMessageResponse, ()>> + Send>>;

impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// Chat message generation with streaming.
/// Returns a stream of `ChatMessageResponse` objects
Expand Down Expand Up @@ -101,8 +104,10 @@ impl Ollama {
}
}

#[cfg_attr(docsrs, doc(cfg(feature = "chat-history")))]
#[cfg(feature = "chat-history")]
impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
pub async fn send_chat_messages_with_history_stream(
&mut self,
Expand Down
2 changes: 2 additions & 0 deletions src/generation/completion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use request::GenerationRequest;

pub mod request;

#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// A stream of `GenerationResponse` objects
pub type GenerationResponseStream = std::pin::Pin<
Expand All @@ -16,6 +17,7 @@ pub type GenerationResponseStream = std::pin::Pin<
pub type GenerationResponseStreamChunk = Vec<GenerationResponse>;

impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// Completion generation with streaming.
/// Returns a stream of `GenerationResponse` objects
Expand Down
2 changes: 2 additions & 0 deletions src/generation/functions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::generation::functions::pipelines::RequestParserBase;
use crate::generation::functions::tools::Tool;
use std::sync::Arc;

#[cfg_attr(docsrs, doc(cfg(feature = "function-calling")))]
#[cfg(feature = "function-calling")]
impl crate::Ollama {
fn has_system_prompt(&self, messages: &[ChatMessage], system_prompt: &str) -> bool {
Expand All @@ -30,6 +31,7 @@ impl crate::Ollama {
self.get_messages_history("default").is_some()
}

#[cfg_attr(docsrs, doc(cfg(feature = "chat-history")))]
#[cfg(feature = "chat-history")]
pub async fn send_function_call_with_history(
&mut self,
Expand Down
5 changes: 5 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,16 @@
#![cfg_attr(docsrs, feature(doc_cfg))]

#[cfg_attr(docsrs, doc(cfg(feature = "chat-history")))]
#[cfg(feature = "chat-history")]
use crate::history::WrappedMessageHistory;
use url::Url;

pub mod error;
pub mod generation;
#[cfg_attr(docsrs, doc(cfg(feature = "headers")))]
#[cfg(feature = "headers")]
pub mod headers;
#[cfg_attr(docsrs, doc(cfg(feature = "chat-history")))]
#[cfg(feature = "chat-history")]
pub mod history;
pub mod models;
Expand Down
2 changes: 2 additions & 0 deletions src/models/create.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ use serde::{Deserialize, Serialize};
use crate::Ollama;

/// A stream of `CreateModelStatus` objects
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
pub type CreateModelStatusStream = std::pin::Pin<
Box<dyn tokio_stream::Stream<Item = crate::error::Result<CreateModelStatus>> + Send>,
>;

impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// Create a model with streaming, meaning that each new status will be streamed.
pub async fn create_model_stream(
Expand Down
2 changes: 2 additions & 0 deletions src/models/pull.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ use serde::{Deserialize, Serialize};
use crate::Ollama;

/// A stream of `PullModelStatus` objects.
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
pub type PullModelStatusStream = std::pin::Pin<
Box<dyn tokio_stream::Stream<Item = crate::error::Result<PullModelStatus>> + Send>,
>;

impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// Pull a model with streaming, meaning that each new status will be streamed.
/// - `model_name` - The name of the model to pull.
Expand Down
2 changes: 2 additions & 0 deletions src/models/push.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ use serde::{Deserialize, Serialize};
use crate::Ollama;

/// A stream of `PushModelStatus` objects.
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
pub type PushModelStatusStream = std::pin::Pin<
Box<dyn tokio_stream::Stream<Item = crate::error::Result<PushModelStatus>> + Send>,
>;

impl Ollama {
#[cfg_attr(docsrs, doc(cfg(feature = "stream")))]
#[cfg(feature = "stream")]
/// Upload a model to a model library. Requires registering for ollama.ai and adding a public key first.
/// Push a model with streaming, meaning that each new status will be streamed.
Expand Down

0 comments on commit 242934d

Please sign in to comment.