diff --git a/rig-core/examples/gemini_agent.rs b/rig-core/examples/gemini_agent.rs index f221886a..978a7cff 100644 --- a/rig-core/examples/gemini_agent.rs +++ b/rig-core/examples/gemini_agent.rs @@ -12,23 +12,22 @@ async fn main() -> Result<(), anyhow::Error> { // Create agent with a single context prompt let agent = client .agent(gemini::completion::GEMINI_1_5_PRO) - .preamble("Be precise and concise.") + .preamble("Be creative and concise. Answer directly and clearly.") .temperature(0.5) .max_tokens(8192) - .additional_params( - serde_json::to_value(GenerationConfig { - top_k: Some(1), - top_p: Some(0.95), - candidate_count: Some(1), - ..Default::default() - }) - .unwrap(), - ) // Unwrap the Result to get the Value + .additional_params(serde_json::to_value(GenerationConfig { + top_k: Some(1), + top_p: Some(0.95), + candidate_count: Some(1), + ..Default::default() + })?) // Unwrap the Result to get the Value .build(); + tracing::info!("Prompting the agent..."); + // Prompt the agent and print the response let response = agent - .prompt("How much wood would a woodchuck chuck if a woodchuck could chuck wood?") + .prompt("How much wood would a woodchuck chuck if a woodchuck could chuck wood? Infer an answer.") .await?; println!("{}", response); diff --git a/rig-core/src/providers/gemini/completion.rs b/rig-core/src/providers/gemini/completion.rs index 12cd87be..c535a56c 100644 --- a/rig-core/src/providers/gemini/completion.rs +++ b/rig-core/src/providers/gemini/completion.rs @@ -36,7 +36,6 @@ pub struct GenerateContentResponse { pub usage_metadata: Option, } -// Define the struct for a Candidate #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ContentCandidate { @@ -341,7 +340,7 @@ pub struct GenerateContentRequest { pub safety_settings: Option>, /// Optional. Developer set system instruction(s). Currently, text only. /// https://ai.google.dev/gemini-api/docs/system-instructions?lang=rest - pub system_instruction: Option, + pub system_instruction: Option, // cachedContent: Optional } @@ -414,17 +413,7 @@ impl completion::CompletionModel for CompletionModel { &self, mut completion_request: CompletionRequest, ) -> Result, CompletionError> { - // QUESTION: Why do Anthropic/openAi implementation differ here? OpenAI adds the preamble but Anthropic does not. - - let mut full_history = if let Some(preamble) = &completion_request.preamble { - vec![completion::Message { - role: "system".into(), - content: preamble.clone(), - }] - } else { - vec![] - }; - + let mut full_history = Vec::new(); full_history.append(&mut completion_request.chat_history); let prompt_with_context = completion_request.prompt_with_context(); @@ -471,9 +460,16 @@ impl completion::CompletionModel for CompletionModel { .collect(), ), tool_config: None, - system_instruction: None, + system_instruction: Some(Content { + parts: vec![Part { + text: "system".to_string(), + }], + role: Some("system".to_string()), + }), }; + tracing::info!("Request: {:?}", request); + let response = self .client .post(&format!("/v1beta/models/{}:generateContent", self.model))