Skip to content

Commit

Permalink
AI chat: Better support for OPEN-AI (chat completions) format
Browse files Browse the repository at this point in the history
  • Loading branch information
mostlikely4r committed Nov 22, 2024
1 parent 12ed638 commit ada8abe
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 3 deletions.
29 changes: 29 additions & 0 deletions playerbot/PlayerbotLLMInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,31 @@
#include <errno.h>
#endif

std::string PlayerbotLLMInterface::SanitizeForJson(const std::string& input) {
std::string sanitized;
for (char c : input) {
switch (c) {
case '\"': sanitized += "\\\""; break;
case '\\': sanitized += "\\\\"; break;
case '\b': sanitized += "\\b"; break;
case '\f': sanitized += "\\f"; break;
case '\n': sanitized += "\\n"; break;
case '\r': sanitized += "\\r"; break;
case '\t': sanitized += "\\t"; break;
default:
if (c < 0x20) {
char buffer[7];
snprintf(buffer, sizeof(buffer), "\\u%04x", c);
sanitized += buffer;
}
else {
sanitized += c;
}
}
}
return sanitized;
}

inline void SetNonBlockingSocket(int sock) {
#ifdef _WIN32
u_long mode = 1;
Expand Down Expand Up @@ -131,6 +156,9 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt, std::vect
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if (getaddrinfo(parsedUrl.hostname.c_str(), std::to_string(parsedUrl.port).c_str(), &hints, &res) != 0) {
if (debug)
debugLines.push_back("Failed to resolve hostname");

sLog.outError("BotLLM: Failed to resolve hostname");
#ifdef _WIN32
WSACleanup();
Expand All @@ -156,6 +184,7 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt, std::vect
if (sock < 0) {
if (debug)
debugLines.push_back("Socket creation failed");

sLog.outError("BotLLM: Socket creation failed");
freeaddrinfo(res);
return "error";
Expand Down
1 change: 1 addition & 0 deletions playerbot/PlayerbotLLMInterface.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ class PlayerbotLLMInterface
{
public:
PlayerbotLLMInterface() {}
static std::string SanitizeForJson(const std::string& input);

static std::string Generate(const std::string& prompt, std::vector<std::string>& debugLines);

Expand Down
3 changes: 2 additions & 1 deletion playerbot/aiplayerbot.conf.dist.in
Original file line number Diff line number Diff line change
Expand Up @@ -965,10 +965,11 @@ AiPlayerbot.PerfMonEnabled = 0
# The chance bots will use ai chat to respond to other bots
# AiPlayerbot.LLMBotToBotChatChance = 0

# OPEN-AI example:
# OPEN-AI (chat completions) example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}
# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0
Expand Down
3 changes: 2 additions & 1 deletion playerbot/aiplayerbot.conf.dist.in.tbc
Original file line number Diff line number Diff line change
Expand Up @@ -984,10 +984,11 @@ AiPlayerbot.PerfMonEnabled = 0
# The chance bots will use ai chat to respond to other bots
# AiPlayerbot.LLMBotToBotChatChance = 0

# OPEN-AI example:
# OPEN-AI (chat completions) example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}
# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0
3 changes: 2 additions & 1 deletion playerbot/aiplayerbot.conf.dist.in.wotlk
Original file line number Diff line number Diff line change
Expand Up @@ -924,10 +924,11 @@ AiPlayerbot.PerfMonEnabled = 0
# The chance bots will use ai chat to respond to other bots
# AiPlayerbot.LLMBotToBotChatChance = 0

# OPEN-AI example:
# OPEN-AI (chat completions) example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}
# AiPlayerbot.LLMResponseStartPattern = ("content":\s*")

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0

0 comments on commit ada8abe

Please sign in to comment.