Summarize, fix context menu actions

Kaveen Kumarasinghe 2 years ago
parent f473342bff
commit 57271f1051

@ -633,21 +633,29 @@ class Commands(discord.Cog, name="Commands"):
"Translations are disabled on this server.", ephemeral=True
)
# @discord.message_command(
# name="Paraphrase",
# guild_ids=ALLOWED_GUILDS,
# checks=[Check.check_gpt_roles()],
# )
# async def paraphrase_action(self, ctx, message: discord.Message):
# await self.converser_cog.paraphrase_action(ctx, message)
@discord.message_command(
name="Paraphrase",
name="Elaborate",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def paraphrase_action(self, ctx, message: discord.Message):
await self.converser_cog.paraphrase_action(ctx, message)
async def elaborate_action(self, ctx, message: discord.Message):
await self.converser_cog.elaborate_action(ctx, message)
@discord.message_command(
name="Elaborate",
name="Summarize",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def elaborate_action(self, ctx, message: discord.Message):
await self.converser_cog.elaborate_action(ctx, message)
async def summarize_action(self, ctx, message: discord.Message):
await self.converser_cog.summarize_action(ctx, message)
# Search slash commands
@discord.slash_command(

@ -697,7 +697,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
top_p: float,
frequency_penalty: float,
presence_penalty: float,
from_action=None,
from_ask_action=None,
from_other_action=None,
):
"""Command handler. Requests and returns a generation with no extras to the completion endpoint
@ -731,7 +732,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
overrides=overrides,
from_ask_command=True,
custom_api_key=user_api_key,
from_action=from_action,
from_ask_action=from_ask_action,
from_other_action=from_other_action,
)
async def edit_command(
@ -1056,24 +1058,61 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
async def ask_gpt_action(self, ctx, message: discord.Message):
"""Message command. Return the message"""
prompt = await self.mention_to_username(ctx, message.content)
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
await self.ask_command(ctx, prompt, None, None, None, None, from_ask_action=prompt)
async def paraphrase_action(self, ctx, message: discord.Message):
"""Message command. paraphrase the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt+"\nParaphrased:"
# Construct the paraphrase prompt
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \n\nParaphrased:"
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \nParaphrased:"
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens-1000:
await ctx.respond(
f"This message is too long to paraphrase.",
ephemeral=True, delete_after=10,
)
return
await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action)
async def elaborate_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt+"\nElaboration:"
# Construct the paraphrase prompt
prompt = f"Elaborate upon the subject of the following message: {prompt} \n\nElaboration:"
prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens-1000:
await ctx.respond(
f"This message is too long to elaborate on.",
ephemeral=True, delete_after=10,
)
return
await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action)
async def summarize_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = "Message at message link: " + message.jump_url + "\nSummarized:"
# Construct the paraphrase prompt
prompt = f"Summarize the following message, be as short and concise as possible: {prompt} \n\nSummary:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens-300:
await ctx.respond(
f"Your prompt is too long. It has {tokens} tokens, but the maximum is {self.model.max_tokens-300}.",
ephemeral=True, delete_after=10,
)
return
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action)

@ -237,7 +237,7 @@ def init():
signal.signal(signal.SIGTERM, cleanup_pid_file)
if check_process_file(PID_FILE):
print("Process ID file already exists")
print("Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid")
sys.exit(1)
else:
with PID_FILE.open("w") as f:

@ -36,7 +36,8 @@ class TextService:
custom_api_key=None,
edited_request=False,
redo_request=False,
from_action=False,
from_ask_action=False,
from_other_action=None,
):
"""General service function for sending and receiving gpt generations
@ -288,7 +289,9 @@ class TextService:
str(response["choices"][0]["text"])
)
if from_ask_command or from_action:
if from_other_action:
response_text = f"***{from_other_action}*** {response_text}"
elif from_ask_command or from_ask_action:
response_text = f"***{prompt}***{response_text}"
elif from_edit_command:
if codex:
@ -483,7 +486,7 @@ class TextService:
# Error catching for OpenAI model value errors
except ValueError as e:
embed = EmbedStatics.get_invalid_value_embed(e)
if from_action:
if from_ask_action:
await ctx.respond(embed=embed, ephemeral=True)
elif from_context:
await ctx.send_followup(embed=embed, ephemeral=True)

Loading…
Cancel
Save