Merge pull request #55 from Hikari-Haru/ask-parameters

Added passthrough of gpt3 parameters to /ask
Kaveen Kumarasinghe 2 years ago committed by GitHub
commit b31bfbe26d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -46,7 +46,7 @@ These commands are grouped, so each group has a prefix but you can easily tab co
`/help` - Display help text for the bot
`/gpt ask <prompt>` Ask the GPT3 Davinci 003 model a question.
`/gpt ask <prompt> <temp> <top_p> <frequency penalty> <presence penalty>` Ask the GPT3 Davinci 003 model a question. Optional overrides available
`/gpt converse` - Start a conversation with the bot, like ChatGPT

@ -574,7 +574,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# ctx can be of type AppContext(interaction) or Message
async def encapsulated_send(
self, user_id, prompt, ctx, response_message=None, from_g_command=False
self, user_id, prompt, ctx, temp_override=None, top_p_override=None, frequency_penalty_override=None, presence_penalty_override=None, response_message=None, from_g_command=False
):
new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt
@ -635,7 +635,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
return
# Send the request to the model
response = await self.model.send_request(new_prompt, tokens=tokens)
response = await self.model.send_request(new_prompt, tokens=tokens, temp_override=temp_override, top_p_override=top_p_override, frequency_penalty_override=frequency_penalty_override, presence_penalty_override=presence_penalty_override)
# Clean the request response
response_text = str(response["choices"][0]["text"])
@ -736,8 +736,20 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
@discord.option(
name="prompt", description="The prompt to send to GPT3", required=True
)
@discord.option(
name="temperature", description="Higher values means the model will take more risks", required=False, input_type=float, min_value=0, max_value=1
)
@discord.option(
name="top_p", description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", required=False, input_type=float, min_value=0, max_value=1
)
@discord.option(
name="frequency_penalty", description="Decreasing the model's likelihood to repeat the same line verbatim", required=False, input_type=float, min_value=-2, max_value=2
)
@discord.option(
name="presence_penalty", description="Increasing the model's likelihood to talk about new topics", required=False, input_type=float, min_value=-2, max_value=2
)
@discord.guild_only()
async def ask(self, ctx: discord.ApplicationContext, prompt: str):
async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: float, top_p:float, frequency_penalty: float, presence_penalty: float):
await ctx.defer()
user = ctx.user
@ -747,7 +759,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Send the request to the model
# If conversing, the prompt to send is the history, otherwise, it's just the prompt
await self.encapsulated_send(user.id, prompt, ctx, from_g_command=True)
await self.encapsulated_send(user.id, prompt, ctx, temp_override=temperature, top_p_override=top_p, frequency_penalty_override=frequency_penalty, presence_penalty_override=presence_penalty, from_g_command=True)
@add_to_group("gpt")
@discord.slash_command(

@ -381,21 +381,22 @@ class Model:
)
print("The prompt about to be sent is " + prompt)
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}")
async with aiohttp.ClientSession() as session:
payload = {
"model": self.model,
"prompt": prompt,
"temperature": self.temp if not temp_override else temp_override,
"top_p": self.top_p if not top_p_override else top_p_override,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"max_tokens": self.max_tokens - tokens
if not max_tokens_override
else max_tokens_override,
"presence_penalty": self.presence_penalty
if not presence_penalty_override
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if not frequency_penalty_override
if frequency_penalty_override is None
else frequency_penalty_override,
"best_of": self.best_of if not best_of_override else best_of_override,
}
@ -404,7 +405,8 @@ class Model:
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
print(response)
print(f"Payload -> {payload}")
print(f"Response -> {response}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response)

Loading…
Cancel
Save