diff --git a/playerbot/PlayerbotLLMInterface.cpp b/playerbot/PlayerbotLLMInterface.cpp index f456d8e2..2942cf3e 100644 --- a/playerbot/PlayerbotLLMInterface.cpp +++ b/playerbot/PlayerbotLLMInterface.cpp @@ -88,7 +88,7 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt) { request << "Host: " << parsedUrl.hostname << "\r\n"; request << "Content-Type: application/json\r\n"; if (!sPlayerbotAIConfig.llmApiKey.empty()) - request << "Authorization: Bearer " << sPlayerbotAIConfig.llmApiKey; + request << "Authorization: Bearer " << sPlayerbotAIConfig.llmApiKey << "\r\n"; std::string body = prompt; request << "Content-Length: " << body.size() << "\r\n"; request << "\r\n"; diff --git a/playerbot/aiplayerbot.conf.dist.in b/playerbot/aiplayerbot.conf.dist.in index 53db3c72..e83a31ae 100644 --- a/playerbot/aiplayerbot.conf.dist.in +++ b/playerbot/aiplayerbot.conf.dist.in @@ -964,7 +964,7 @@ AiPlayerbot.PerfMonEnabled = 0 # OPEN-AI example: # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions # AiPlayerbot.LLMApiKey = YOUR_API_KEY -# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"
 \"},{\"role\": \"user\", \"content\": \"\"}],\"max_tokens\": 60}
+# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0
diff --git a/playerbot/aiplayerbot.conf.dist.in.tbc b/playerbot/aiplayerbot.conf.dist.in.tbc
index 7c9b1062..e2e0cca6 100644
--- a/playerbot/aiplayerbot.conf.dist.in.tbc
+++ b/playerbot/aiplayerbot.conf.dist.in.tbc
@@ -983,7 +983,7 @@ AiPlayerbot.PerfMonEnabled = 0
 # OPEN-AI example:
 # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
 # AiPlayerbot.LLMApiKey = YOUR_API_KEY
-# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"
 \"},{\"role\": \"user\", \"content\": \"\"}],\"max_tokens\": 60}
+# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0
diff --git a/playerbot/aiplayerbot.conf.dist.in.wotlk b/playerbot/aiplayerbot.conf.dist.in.wotlk
index 3bdbecd9..b2c8117e 100644
--- a/playerbot/aiplayerbot.conf.dist.in.wotlk
+++ b/playerbot/aiplayerbot.conf.dist.in.wotlk
@@ -923,7 +923,7 @@ AiPlayerbot.PerfMonEnabled = 0
 # OPEN-AI example:
 # AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
 # AiPlayerbot.LLMApiKey = YOUR_API_KEY
-# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"
 \"},{\"role\": \"user\", \"content\": \"\"}],\"max_tokens\": 60}
+# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "
 "},{"role": "user", "content": ""}],"max_tokens": 60}
 
 # Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
 # AiPlayerbot.TweakValue = 0