Add ephemeral option to ask and edit

Rene Teigen 2 years ago
parent 95dd054f1c
commit 71125ce65d

@ -301,6 +301,11 @@ class Commands(discord.Cog, name="Commands"):
@discord.option(
name="prompt", description="The prompt to send to GPT3", required=True
)
@discord.option(
name="ephemeral",
description="Will only be visible to you",
required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -334,13 +339,14 @@ class Commands(discord.Cog, name="Commands"):
self,
ctx: discord.ApplicationContext,
prompt: str,
ephemeral: bool,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
):
await self.converser_cog.ask_command(
ctx, prompt, temperature, top_p, frequency_penalty, presence_penalty
ctx, prompt, ephemeral, temperature, top_p, frequency_penalty, presence_penalty
)
@add_to_group("gpt")
@ -360,6 +366,11 @@ class Commands(discord.Cog, name="Commands"):
required=False,
default="",
)
@discord.option(
name="ephemeral",
description="Will only be visible to you",
required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -385,12 +396,13 @@ class Commands(discord.Cog, name="Commands"):
ctx: discord.ApplicationContext,
instruction: str,
text: str,
ephemeral: bool,
temperature: float,
top_p: float,
codex: bool,
):
await self.converser_cog.edit_command(
ctx, instruction, text, temperature, top_p, codex
ctx, instruction, text, ephemeral, temperature, top_p, codex
)
@add_to_group("gpt")

@ -693,6 +693,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self,
ctx: discord.ApplicationContext,
prompt: str,
ephemeral: bool,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -720,7 +721,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if not user_api_key:
return
await ctx.defer()
await ctx.defer(ephemeral=ephemeral)
overrides = Override(temperature, top_p, frequency_penalty, presence_penalty)
@ -741,6 +742,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
ctx: discord.ApplicationContext,
instruction: str,
text: str,
ephemeral: bool,
temperature: float,
top_p: float,
codex: bool,
@ -766,7 +768,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if not user_api_key:
return
await ctx.defer()
await ctx.defer(ephemeral=ephemeral)
overrides = Override(temperature, top_p, 0, 0)

@ -584,7 +584,7 @@ class Model:
def backoff_handler_request(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].args}"
f"{details['exception'].args[0]}"
)
async def valid_text_request(self, response):
@ -657,7 +657,7 @@ class Model:
)
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override}")
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.EDIT if codex is False else Models.CODE_EDIT,
"input": "" if text is None else text,
@ -724,7 +724,7 @@ class Model:
tokens = self.usage_service.count_tokens(summary_request_text)
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,

Loading…
Cancel
Save