From 55ab68fe3043f4d3a15fe8fc1640764b195c1ee2 Mon Sep 17 00:00:00 2001 From: Joshua Fitzmaurice Date: Thu, 22 Aug 2024 18:46:16 +0100 Subject: [PATCH 1/2] Added private_view input to hide response (only works on slash command) as well as fixed issue with .reply --- cogs/commands/summarise.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cogs/commands/summarise.py b/cogs/commands/summarise.py index 291c49d..b064c34 100644 --- a/cogs/commands/summarise.py +++ b/cogs/commands/summarise.py @@ -42,10 +42,9 @@ def build_prompt(self, bullet_points, channel_name): @commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel) @commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT) async def tldr( - self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False ): + self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False, private_view: bool = False): number_of_messages = 400 if number_of_messages > 400 else number_of_messages - - + # avoid banned users if not await is_author_banned_openai(ctx): await ctx.send("You are banned from OpenAI!") @@ -60,9 +59,8 @@ async def tldr( async with ctx.typing(): response = await self.dispatch_api(messages) if response: - prev = ctx.message for content in split_into_messages(response): - prev = await prev.reply(content, allowed_mentions=mentions) + await ctx.send(content, allowed_mentions=mentions, ephemeral=private_view) async def dispatch_api(self, messages) -> Optional[str]: logging.info(f"Making OpenAI request: {messages}") From 6750f1835dcaadc1c8df19d6666f62a4101e6b82 Mon Sep 17 00:00:00 2001 From: Joshua Fitzmaurice Date: Thu, 22 Aug 2024 19:40:18 +0100 Subject: [PATCH 2/2] reverted back to .reply making fix to prev = ctx. Created optional context manager as being in ctx/typing() makes ephemral not possible --- cogs/commands/summarise.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cogs/commands/summarise.py b/cogs/commands/summarise.py index b064c34..c80edf1 100644 --- a/cogs/commands/summarise.py +++ b/cogs/commands/summarise.py @@ -1,3 +1,4 @@ +import contextlib import logging from typing import Optional @@ -38,13 +39,19 @@ def build_prompt(self, bullet_points, channel_name): prompt = f"""People yap too much, I don't want to read all of it. The topic is something related to {channel_name}. In 2 sentences or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order: """ return prompt + + def optional_context_manager(self, use: bool, cm: callable): + if use: + return cm() + + return contextlib.nullcontext() @commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel) @commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT) async def tldr( self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False, private_view: bool = False): number_of_messages = 400 if number_of_messages > 400 else number_of_messages - + # avoid banned users if not await is_author_banned_openai(ctx): await ctx.send("You are banned from OpenAI!") @@ -56,11 +63,13 @@ async def tldr( messages = await self.create_message(messages, prompt) # send the prompt to the ai overlords to process - async with ctx.typing(): + async with self.optional_context_manager(not private_view, ctx.typing): response = await self.dispatch_api(messages) if response: + prev = ctx for content in split_into_messages(response): - await ctx.send(content, allowed_mentions=mentions, ephemeral=private_view) + prev = await prev.reply(content, allowed_mentions=mentions, ephemeral=private_view) + async def dispatch_api(self, messages) -> Optional[str]: logging.info(f"Making OpenAI request: {messages}")