From ada8abedcd151f05158966603cbcb6d916312f90 Mon Sep 17 00:00:00 2001 From: mostlikely4r Date: Fri, 22 Nov 2024 14:40:59 +0100 Subject: [PATCH] AI chat: Better support for OPEN-AI (chat completions) format --- playerbot/PlayerbotLLMInterface.cpp | 29 ++++++++++++++++++++++++ playerbot/PlayerbotLLMInterface.h | 1 + playerbot/aiplayerbot.conf.dist.in | 3 ++- playerbot/aiplayerbot.conf.dist.in.tbc | 3 ++- playerbot/aiplayerbot.conf.dist.in.wotlk | 3 ++- 5 files changed, 36 insertions(+), 3 deletions(-) diff --git a/playerbot/PlayerbotLLMInterface.cpp b/playerbot/PlayerbotLLMInterface.cpp index 45b05e49..f54674ea 100644 --- a/playerbot/PlayerbotLLMInterface.cpp +++ b/playerbot/PlayerbotLLMInterface.cpp @@ -25,6 +25,31 @@ #include #endif +std::string PlayerbotLLMInterface::SanitizeForJson(const std::string& input) { + std::string sanitized; + for (char c : input) { + switch (c) { + case '\"': sanitized += "\\\""; break; + case '\\': sanitized += "\\\\"; break; + case '\b': sanitized += "\\b"; break; + case '\f': sanitized += "\\f"; break; + case '\n': sanitized += "\\n"; break; + case '\r': sanitized += "\\r"; break; + case '\t': sanitized += "\\t"; break; + default: + if (c < 0x20) { + char buffer[7]; + snprintf(buffer, sizeof(buffer), "\\u%04x", c); + sanitized += buffer; + } + else { + sanitized += c; + } + } + } + return sanitized; +} + inline void SetNonBlockingSocket(int sock) { #ifdef _WIN32 u_long mode = 1; @@ -131,6 +156,9 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt, std::vect hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; if (getaddrinfo(parsedUrl.hostname.c_str(), std::to_string(parsedUrl.port).c_str(), &hints, &res) != 0) { + if (debug) + debugLines.push_back("Failed to resolve hostname"); + sLog.outError("BotLLM: Failed to resolve hostname"); #ifdef _WIN32 WSACleanup(); @@ -156,6 +184,7 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt, std::vect if (sock < 0) { if (debug) debugLines.push_back("Socket creation failed"); + sLog.outError("BotLLM: Socket creation failed"); freeaddrinfo(res); return "error"; diff --git a/playerbot/PlayerbotLLMInterface.h b/playerbot/PlayerbotLLMInterface.h index 365ee302..b54f63ba 100644 --- a/playerbot/PlayerbotLLMInterface.h +++ b/playerbot/PlayerbotLLMInterface.h @@ -2,6 +2,7 @@ class PlayerbotLLMInterface { public: PlayerbotLLMInterface() {} + static std::string SanitizeForJson(const std::string& input); static std::string Generate(const std::string& prompt, std::vector& debugLines); diff --git a/playerbot/aiplayerbot.conf.dist.in b/playerbot/aiplayerbot.conf.dist.in index 9d33079f..0996c1e9 100644 --- a/playerbot/aiplayerbot.conf.dist.in +++ b/playerbot/aiplayerbot.conf.dist.in @@ -965,10 +965,11 @@ AiPlayerbot.PerfMonEnabled = 0 # The chance bots will use ai chat to respond to other bots # AiPlayerbot.LLMBotToBotChatChance = 0 -# OPEN-AI example: +# OPEN-AI (chat completions) example: # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions # AiPlayerbot.LLMApiKey = YOUR_API_KEY # AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
+# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0
diff --git a/playerbot/aiplayerbot.conf.dist.in.tbc b/playerbot/aiplayerbot.conf.dist.in.tbc
index 854ef616..602ca5ec 100644
--- a/playerbot/aiplayerbot.conf.dist.in.tbc
+++ b/playerbot/aiplayerbot.conf.dist.in.tbc
@@ -984,10 +984,11 @@ AiPlayerbot.PerfMonEnabled = 0
 # The chance bots will use ai chat to respond to other bots 
 # AiPlayerbot.LLMBotToBotChatChance = 0
 
-# OPEN-AI example:
+# OPEN-AI (chat completions) example:
 # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
 # AiPlayerbot.LLMApiKey = YOUR_API_KEY
 # AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
+# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0
diff --git a/playerbot/aiplayerbot.conf.dist.in.wotlk b/playerbot/aiplayerbot.conf.dist.in.wotlk
index 5aaddacd..b49b133e 100644
--- a/playerbot/aiplayerbot.conf.dist.in.wotlk
+++ b/playerbot/aiplayerbot.conf.dist.in.wotlk
@@ -924,10 +924,11 @@ AiPlayerbot.PerfMonEnabled = 0
 # The chance bots will use ai chat to respond to other bots 
 # AiPlayerbot.LLMBotToBotChatChance = 0
 
-# OPEN-AI example:
+# OPEN-AI (chat completions) example:
 # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
 # AiPlayerbot.LLMApiKey = YOUR_API_KEY
 # AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
+# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0