Kaveen Kumarasinghe 1 year ago
commit aa1a82b553

@ -46,7 +46,7 @@ These commands are grouped, so each group has a prefix but you can easily tab co
`/help` - Display help text for the bot
`/gpt ask <prompt>` Ask the GPT3 Davinci 003 model a question.
`/gpt ask <prompt> <temp> <top_p> <frequency penalty> <presence penalty>` Ask the GPT3 Davinci 003 model a question. Optional overrides available
`/gpt converse` - Start a conversation with the bot, like ChatGPT

@ -47,7 +47,7 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
try:
file, image_urls = await self.model.send_image_request(
prompt, vary=vary if not draw_from_optimizer else None
ctx, prompt, vary=vary if not draw_from_optimizer else None
)
except ValueError as e:
(

@ -574,7 +574,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# ctx can be of type AppContext(interaction) or Message
async def encapsulated_send(
self, user_id, prompt, ctx, response_message=None, from_g_command=False
self,
user_id,
prompt,
ctx,
temp_override=None,
top_p_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
response_message=None,
from_g_command=False,
):
new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt
@ -635,7 +644,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
return
# Send the request to the model
response = await self.model.send_request(new_prompt, tokens=tokens)
response = await self.model.send_request(
new_prompt,
tokens=tokens,
temp_override=temp_override,
top_p_override=top_p_override,
frequency_penalty_override=frequency_penalty_override,
presence_penalty_override=presence_penalty_override,
)
# Clean the request response
response_text = str(response["choices"][0]["text"])
@ -736,8 +752,48 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
@discord.option(
name="prompt", description="The prompt to send to GPT3", required=True
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.option(
name="top_p",
description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.option(
name="frequency_penalty",
description="Decreasing the model's likelihood to repeat the same line verbatim",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.option(
name="presence_penalty",
description="Increasing the model's likelihood to talk about new topics",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.guild_only()
async def ask(self, ctx: discord.ApplicationContext, prompt: str):
async def ask(
self,
ctx: discord.ApplicationContext,
prompt: str,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
):
await ctx.defer()
user = ctx.user
@ -747,7 +803,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Send the request to the model
# If conversing, the prompt to send is the history, otherwise, it's just the prompt
await self.encapsulated_send(user.id, prompt, ctx, from_g_command=True)
await self.encapsulated_send(
user.id,
prompt,
ctx,
temp_override=temperature,
top_p_override=top_p,
frequency_penalty_override=frequency_penalty,
presence_penalty_override=presence_penalty,
from_g_command=True,
)
@add_to_group("gpt")
@discord.slash_command(

@ -183,7 +183,7 @@ class DrawButton(discord.ui.Button["OptimizeView"]):
await self.image_service_cog.encapsulated_send(
user_id,
prompt,
None,
interaction,
msg,
True,
True,

@ -381,21 +381,24 @@ class Model:
)
print("The prompt about to be sent is " + prompt)
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
async with aiohttp.ClientSession() as session:
payload = {
"model": self.model,
"prompt": prompt,
"temperature": self.temp if not temp_override else temp_override,
"top_p": self.top_p if not top_p_override else top_p_override,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"max_tokens": self.max_tokens - tokens
if not max_tokens_override
else max_tokens_override,
"presence_penalty": self.presence_penalty
if not presence_penalty_override
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if not frequency_penalty_override
if frequency_penalty_override is None
else frequency_penalty_override,
"best_of": self.best_of if not best_of_override else best_of_override,
}
@ -404,13 +407,16 @@ class Model:
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
print(response)
print(f"Payload -> {payload}")
print(f"Response -> {response}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response)
return response
async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]:
async def send_image_request(
self, ctx, prompt, vary=None
) -> tuple[File, list[Any]]:
# Validate that all the parameters are in a good state before we send the request
words = len(prompt.split(" "))
if words < 3 or words > 75:
@ -533,17 +539,21 @@ class Model:
)
# Print the filesize of new_im, in mega bytes
image_size = os.path.getsize(temp_file.name) / 1000000
image_size = os.path.getsize(temp_file.name) / 1048576
if ctx.guild is None:
guild_file_limit = 8
else:
guild_file_limit = ctx.guild.filesize_limit / 1048576
# If the image size is greater than 8MB, we can't return this to the user, so we will need to downscale the
# image and try again
safety_counter = 0
while image_size > 8:
while image_size > guild_file_limit:
safety_counter += 1
if safety_counter >= 3:
break
print(
f"Image size is {image_size}MB, which is too large for discord. Downscaling and trying again"
f"Image size is {image_size}MB, which is too large for this server {guild_file_limit}MB. Downscaling and trying again"
)
# We want to do this resizing asynchronously, so that it doesn't block the main thread during the resize.
# We can use the asyncio.run_in_executor method to do this

Loading…
Cancel
Save