Skip to content

Commit

Permalink
AI chat: Added the option for bots to respond to other bots.
Browse files Browse the repository at this point in the history
  • Loading branch information
mostlikely4r committed Nov 19, 2024
1 parent 10e3b1e commit 0bd52d6
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 7 deletions.
1 change: 1 addition & 0 deletions playerbot/PlayerbotAIConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -611,6 +611,7 @@ bool PlayerbotAIConfig::Initialize()
std::replace(llmResponseEndPattern.begin(), llmResponseEndPattern.end(), '\'', '\"');

llmPreventTalkingForPlayer = config.GetBoolDefault("AiPlayerbot.LLMPreventTalkingForPlayer", false);
llmBotToBotChatChance = config.GetIntDefault("AiPlayerbot.LLMBotToBotChatChance", false);

//LLM END

Expand Down
2 changes: 1 addition & 1 deletion playerbot/PlayerbotAIConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ class PlayerbotAIConfig

//LM BEGIN
std::string llmApiEndpoint, llmApiKey, llmApiJson, llmPrePrompt, llmPrompt, llmPostPrompt, llmResponseStartPattern, llmResponseEndPattern;
uint32 llmContextLength;
uint32 llmContextLength, llmBotToBotChatChance;
bool llmPreventTalkingForPlayer;
ParsedUrl llmEndPointUrl;
//LM END
Expand Down
19 changes: 14 additions & 5 deletions playerbot/aiplayerbot.conf.dist.in
Original file line number Diff line number Diff line change
Expand Up @@ -936,25 +936,34 @@ AiPlayerbot.PerfMonEnabled = 0
# The api key needed to access the endpoint.
# AiPlayerbot.LLMApiKey =
# The default json to send to the endpoint.
# AiPlayerbot.LLMApiJson = {"max_length": 100, "prompt": "<pre prompt><prompt><post prompt>"}
# AiPlayerbot.LLMApiJson = {"max_length": 100, "prompt": "[<pre prompt>]<context> <prompt> <post prompt>"}

# The max context length allowed for the model. 0 to disable. This will shrink the context (all previous conversations) to make the entire prompt <pre prompt><context><prompt><post prompt> fit.
# AiPlayerbot.LLMContextLength = 4096

# The default prompt to send at the beginning of each conversation.
# AiPlayerbot.LLMPrePrompt = You are a roleplaying character in World of Warcraft: <expansion name>. Your name is <bot name>. The player speaking to you is named <player name>. You are level <bot level> and play as a <bot race> <bot class>. Answer as a roleplaying character. Limit responses to 100 characters.
# AiPlayerbot.LLMPrePrompt = You are a roleplaying character in World of Warcraft: <expansion name>. Your name is <bot name>. The player <player name> is speaking to you <channel name> and is an <player race> <player class> of level <player level>. You are level <bot level> and play as a <bot race> <bot class> that is currently in <bot zone>. Answer as a roleplaying character. Limit responses to 100 characters.
# The prompt part containing the last message from the player the bot is responding to.
# AiPlayerbot.LLMPrompt = <player message>
# AiPlayerbot.LLMPrompt = <player name>:<player message>
# The default prompt to send at the end of each conversation.
# AiPlayerbot.LLMPostPrompt =
# AiPlayerbot.LLMPostPrompt = <bot name>:

# What pattern the server should look for to find the start of the repsonse the llm gave. Note spaces in the actual response are ignored of the start pattern.
# Double quotes at the start and end of the pattern need to be single quotes to be read correctly.
# AiPlayerbot.LLMResponseStartPattern = 'results":[{"text":'
# What pattern the server should look for that the response has ended.
# AiPlayerbot.LLMResponseEndPattern = '

# Stop parsing the LLM response the moment it finds the pattern <player name>: to stop it impersonating the player.
# AiPlayerbot.LLMPreventTalkingForPlayer = 0

# The chance bots will use ai chat to respond to other bots
# AiPlayerbot.LLMBotToBotChatChance = 0

# OPEN-AI example:
# AiPlayerbot.LLMApiEndpoint = http://IP/URL:PORT/v1/chat/completions
# AiPlayerbot.LLMApiKey = YOUR_API_KEY
# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"<pre prompt>\"},{\"role\": \"user\", \"content\": \"<prompt>\"}],\"max_tokens\": 60}
# AiPlayerbot.LLMApiJson = {\"model\": \"gpt-4o-mini\", "\"messages\": [{\"role\": \"system\", \"content\": \"<pre prompt> <context>\"},{\"role\": \"user\", \"content\": \"<prompt> <post prompt>\"}],\"max_tokens\": 60}
# AiPlayerbot.LLMPrePrompt = You are a roleplaying character in World of Warcraft: <expansion name>. Your name is <bot name>. The player speaking to you is named <player name>. You are level <bot level> and play as a <bot race> <bot class>. Answer as a roleplaying character. Limit responses to 100 characters.
# AiPlayerbot.LLMPrompt = <player message>
# AiPlayerbot.LLMPostPrompt =
Expand Down
2 changes: 1 addition & 1 deletion playerbot/strategy/actions/SayAction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ void ChatReplyAction::ChatReplyDo(Player* bot, uint32 type, uint32 guid1, uint32
if (bot->GetPlayerbotAI() && bot->GetPlayerbotAI()->HasStrategy("ai chat", BotState::BOT_STATE_NON_COMBAT))
{
Player* player = sObjectAccessor.FindPlayer(ObjectGuid(HIGHGUID_PLAYER, guid1));
if (player && player->isRealPlayer())
if (player && (player->isRealPlayer() || (sPlayerbotAIConfig.llmBotToBotChatChance && urand(0,99) < sPlayerbotAIConfig.llmBotToBotChatChance)))
{
PlayerbotAI* ai = bot->GetPlayerbotAI();
AiObjectContext* context = ai->GetAiObjectContext();
Expand Down

0 comments on commit 0bd52d6

Please sign in to comment.