From 9d4487836c3da7100cf2e833cd0789b902080cd6 Mon Sep 17 00:00:00 2001 From: Haiping Chen Date: Thu, 30 Nov 2023 21:59:05 -0600 Subject: [PATCH] Add Temperature --- .../Providers/ChatCompletionProvider.cs | 34 ++++++++++++------- .../templates/response_with_function.liquid | 9 +++-- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/Plugins/BotSharp.Plugin.GoogleAI/Providers/ChatCompletionProvider.cs b/src/Plugins/BotSharp.Plugin.GoogleAI/Providers/ChatCompletionProvider.cs index 175f810ee..9e98e9b4b 100644 --- a/src/Plugins/BotSharp.Plugin.GoogleAI/Providers/ChatCompletionProvider.cs +++ b/src/Plugins/BotSharp.Plugin.GoogleAI/Providers/ChatCompletionProvider.cs @@ -1,14 +1,12 @@ using BotSharp.Abstraction.Agents; using BotSharp.Abstraction.Agents.Enums; -using BotSharp.Abstraction.Conversations; using BotSharp.Abstraction.Loggers; using BotSharp.Abstraction.Functions.Models; using BotSharp.Abstraction.Routing; using BotSharp.Plugin.GoogleAI.Settings; using LLMSharp.Google.Palm; using Microsoft.Extensions.Logging; -using System.Diagnostics.Metrics; -using static System.Net.Mime.MediaTypeNames; +using LLMSharp.Google.Palm.DiscussService; namespace BotSharp.Plugin.GoogleAI.Providers; @@ -39,19 +37,25 @@ public RoleDialogModel GetChatCompletions(Agent agent, List con var client = new GooglePalmClient(apiKey: _settings.PaLM.ApiKey); - var (prompt, messages) = PrepareOptions(agent, conversations); + var (prompt, messages, hasFunctions) = PrepareOptions(agent, conversations); RoleDialogModel msg; - if (messages == null) + if (hasFunctions) { // use text completion - var response = client.GenerateTextAsync(prompt, null).Result; + // var response = client.GenerateTextAsync(prompt, null).Result; + var response = client.ChatAsync(new PalmChatCompletionRequest + { + Context = prompt, + Messages = messages, + Temperature = 0.1f + }).Result; var message = response.Candidates.First(); // check if returns function calling - var llmResponse = message.Output.JsonContent(); + var llmResponse = message.Content.JsonContent(); msg = new RoleDialogModel(llmResponse.Role, llmResponse.Content) { @@ -79,13 +83,14 @@ public RoleDialogModel GetChatCompletions(Agent agent, List con Task.WaitAll(hooks.Select(hook => hook.AfterGenerated(msg, new TokenStatsModel { + Prompt = prompt, Model = _model })).ToArray()); return msg; } - private (string, List) PrepareOptions(Agent agent, List conversations) + private (string, List, bool) PrepareOptions(Agent agent, List conversations) { var prompt = ""; @@ -99,6 +104,9 @@ public RoleDialogModel GetChatCompletions(Agent agent, List con var routing = _services.GetRequiredService(); var router = routing.Router; + var messages = conversations.Select(c => new PalmChatMessage(c.Content, c.Role == AgentRole.User ? "user" : "AI")) + .ToList(); + if (agent.Functions != null && agent.Functions.Count > 0) { prompt += "\r\n\r\n[Functions] defined in JSON Schema:\r\n"; @@ -118,13 +126,13 @@ public RoleDialogModel GetChatCompletions(Agent agent, List con prompt += "\r\n\r\n" + router.Templates.FirstOrDefault(x => x.Name == "response_with_function").Content; - return (prompt, null); + return (prompt, new List + { + new PalmChatMessage("Which function should be used for the next step based on latest user or function response, output your response in JSON:", AgentRole.User), + }, true); } - var messages = conversations.Select(c => new PalmChatMessage(c.Content, c.Role == AgentRole.User ? "user" : "AI")) - .ToList(); - - return (prompt, messages); + return (prompt, messages, false); } public Task GetChatCompletionsAsync(Agent agent, List conversations, Func onMessageReceived, Func onFunctionExecuting) diff --git a/src/WebStarter/data/agents/01fcc3e5-9af7-49e6-ad7a-a760bd12dc4a/templates/response_with_function.liquid b/src/WebStarter/data/agents/01fcc3e5-9af7-49e6-ad7a-a760bd12dc4a/templates/response_with_function.liquid index b9e971729..d45771b3c 100644 --- a/src/WebStarter/data/agents/01fcc3e5-9af7-49e6-ad7a-a760bd12dc4a/templates/response_with_function.liquid +++ b/src/WebStarter/data/agents/01fcc3e5-9af7-49e6-ad7a-a760bd12dc4a/templates/response_with_function.liquid @@ -1,10 +1,9 @@ +[Output Requirements] 1. Read the [Functions] definition, you can utilize the function to retrieve data or execute actions. -2. Think step by step, check if specific function will provider data to help complete user request based on the [Conversation]. +2. Think step by step, check if specific function will provider data to help complete user request based on the conversation. 3. If you need to call a function to decide how to response user, response in format: {"role": "function", "reason":"why choose this function", "function_name": "", "args": {}}, otherwise response in format: {"role": "assistant", "reason":"why response to user", "content":"next step question"}. -4. If the [Conversation] already contains the function execution result, don't need to call it again. +4. If the conversation already contains the function execution result, don't need to call it again. 5. If user mentioned some specific requirment, don't ask this question in your response. -6. Don't repeat the same question in your response. - -Which function should be used for the next step based on latest user's response, output your response in JSON: \ No newline at end of file +6. Don't repeat the same question in your response. \ No newline at end of file