Skip to content

Commit

Permalink
AI chat: Open-api compatibility fix
Browse files Browse the repository at this point in the history
  • Loading branch information
mostlikely4r committed Nov 21, 2024
1 parent 7e3268a commit 5fb42c9
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion playerbot/PlayerbotLLMInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ std::string PlayerbotLLMInterface::Generate(const std::string& prompt) {
request << "Host: " << parsedUrl.hostname << "\r\n";
request << "Content-Type: application/json\r\n";
if (!sPlayerbotAIConfig.llmApiKey.empty())
request << "Authorization: Bearer " << sPlayerbotAIConfig.llmApiKey;
request << "Authorization: Bearer " << sPlayerbotAIConfig.llmApiKey << "\r\n";
std::string body = prompt;
request << "Content-Length: " << body.size() << "\r\n";
request << "\r\n";
Expand Down
2 changes: 1 addition & 1 deletion playerbot/aiplayerbot.conf.dist.in
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,7 @@ AiPlayerbot.PerfMonEnabled = 0
# OPEN-AI example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"<pre prompt> <context>\"},{\"role\": \"user\", \"content\": \"<prompt>\"}],\"max_tokens\": 60}
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0
Expand Down
2 changes: 1 addition & 1 deletion playerbot/aiplayerbot.conf.dist.in.tbc
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,7 @@ AiPlayerbot.PerfMonEnabled = 0
# OPEN-AI example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"<pre prompt> <context>\"},{\"role\": \"user\", \"content\": \"<prompt>\"}],\"max_tokens\": 60}
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0
2 changes: 1 addition & 1 deletion playerbot/aiplayerbot.conf.dist.in.wotlk
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,7 @@ AiPlayerbot.PerfMonEnabled = 0
# OPEN-AI example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"<pre prompt> <context>\"},{\"role\": \"user\", \"content\": \"<prompt>\"}],\"max_tokens\": 60}
# AiPlayerbot.LLMApiJson = {"model": "gpt-4o-mini", "messages": [{"role": "system", "content": "<pre prompt> <context>"},{"role": "user", "content": "<prompt>"}],"max_tokens": 60}

# Mystery config value. Currently enables async bot pathfinding. Rarely crashes the server.
# AiPlayerbot.TweakValue = 0

0 comments on commit 5fb42c9

Please sign in to comment.