diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index ca9fa97..d61ea75 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -574,7 +574,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # ctx can be of type AppContext(interaction) or Message async def encapsulated_send( - self, user_id, prompt, ctx, temp_override=None, top_p_override=None, frequency_penalty_override=None, presence_penalty_override=None, response_message=None, from_g_command=False + self, + user_id, + prompt, + ctx, + temp_override=None, + top_p_override=None, + frequency_penalty_override=None, + presence_penalty_override=None, + response_message=None, + from_g_command=False, ): new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt @@ -635,7 +644,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): return # Send the request to the model - response = await self.model.send_request(new_prompt, tokens=tokens, temp_override=temp_override, top_p_override=top_p_override, frequency_penalty_override=frequency_penalty_override, presence_penalty_override=presence_penalty_override) + response = await self.model.send_request( + new_prompt, + tokens=tokens, + temp_override=temp_override, + top_p_override=top_p_override, + frequency_penalty_override=frequency_penalty_override, + presence_penalty_override=presence_penalty_override, + ) # Clean the request response response_text = str(response["choices"][0]["text"]) @@ -737,19 +753,47 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): name="prompt", description="The prompt to send to GPT3", required=True ) @discord.option( - name="temperature", description="Higher values means the model will take more risks", required=False, input_type=float, min_value=0, max_value=1 + name="temperature", + description="Higher values means the model will take more risks", + required=False, + input_type=float, + min_value=0, + max_value=1, ) @discord.option( - name="top_p", description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", required=False, input_type=float, min_value=0, max_value=1 + name="top_p", + description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", + required=False, + input_type=float, + min_value=0, + max_value=1, ) @discord.option( - name="frequency_penalty", description="Decreasing the model's likelihood to repeat the same line verbatim", required=False, input_type=float, min_value=-2, max_value=2 + name="frequency_penalty", + description="Decreasing the model's likelihood to repeat the same line verbatim", + required=False, + input_type=float, + min_value=-2, + max_value=2, ) @discord.option( - name="presence_penalty", description="Increasing the model's likelihood to talk about new topics", required=False, input_type=float, min_value=-2, max_value=2 + name="presence_penalty", + description="Increasing the model's likelihood to talk about new topics", + required=False, + input_type=float, + min_value=-2, + max_value=2, ) @discord.guild_only() - async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: float, top_p:float, frequency_penalty: float, presence_penalty: float): + async def ask( + self, + ctx: discord.ApplicationContext, + prompt: str, + temperature: float, + top_p: float, + frequency_penalty: float, + presence_penalty: float, + ): await ctx.defer() user = ctx.user @@ -759,7 +803,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # Send the request to the model # If conversing, the prompt to send is the history, otherwise, it's just the prompt - await self.encapsulated_send(user.id, prompt, ctx, temp_override=temperature, top_p_override=top_p, frequency_penalty_override=frequency_penalty, presence_penalty_override=presence_penalty, from_g_command=True) + await self.encapsulated_send( + user.id, + prompt, + ctx, + temp_override=temperature, + top_p_override=top_p, + frequency_penalty_override=frequency_penalty, + presence_penalty_override=presence_penalty, + from_g_command=True, + ) @add_to_group("gpt") @discord.slash_command( diff --git a/models/openai_model.py b/models/openai_model.py index 020de9a..636c24d 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -381,7 +381,9 @@ class Model: ) print("The prompt about to be sent is " + prompt) - print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}") + print( + f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}" + ) async with aiohttp.ClientSession() as session: payload = {