Conversation command now has a model option

Added model autocomplete
Rene Teigen 2 years ago
parent a3ce3577a7
commit 5de0fd37da

@ -422,6 +422,13 @@ class Commands(discord.Cog, name="Commands"):
required=False,
default=False,
)
@discord.option(
name="model",
description="Which model to use with the bot",
required=False,
default=False,
autocomplete=Settings_autocompleter.get_models,
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -462,6 +469,7 @@ class Commands(discord.Cog, name="Commands"):
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -473,6 +481,7 @@ class Commands(discord.Cog, name="Commands"):
opener_file,
private,
minimal,
model,
temperature,
top_p,
frequency_penalty,

@ -801,6 +801,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -814,6 +815,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
opener_file (str): A .txt or .json file which is appended before the opener
private (bool): If the thread should be private
minimal (bool): If a minimal starter should be used
model (str): The openai model that should be used
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
frequency_penalty (float): Sets the frequency penalty override
@ -866,7 +868,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
self.conversation_threads[thread.id] = Thread(thread.id)
self.conversation_threads[thread.id].model = self.model.model
self.conversation_threads[thread.id].model = self.model.model if not model else model
# Set the overrides for the conversation
self.conversation_threads[thread.id].set_overrides(

@ -86,6 +86,17 @@ class Settings_autocompleter:
await ctx.interaction.response.defer() # defer so the autocomplete in int values doesn't error but rather just says not found
return []
async def get_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
return [
value
for value in Models.TEXT_MODELS
if value.startswith(ctx.value.lower())
]
async def get_value_moderations(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command

@ -44,6 +44,7 @@ class Override:
class Models:
# Text models
DAVINCI = "text-davinci-003"
DAVINCI_FT = "davinci:ft-personal-2023-01-28-08-02-00"
CURIE = "text-curie-001"
BABBAGE = "text-babbage-001"
ADA = "text-ada-001"
@ -60,7 +61,7 @@ class Models:
CODE_EDIT = "code-davinci-edit-001"
# Model collections
TEXT_MODELS = [DAVINCI, CURIE, BABBAGE, ADA, CODE_DAVINCI, CODE_CUSHMAN]
TEXT_MODELS = [DAVINCI, DAVINCI_FT,CURIE, BABBAGE, ADA, CODE_DAVINCI, CODE_CUSHMAN]
EDIT_MODELS = [EDIT, CODE_EDIT]
DEFAULT = DAVINCI
@ -69,6 +70,7 @@ class Models:
# Tokens Mapping
TOKEN_MAPPING = {
"text-davinci-003": 4024,
"davinci:ft-personal-2023-01-28-08-02-00": 4042,
"text-curie-001": 2024,
"text-babbage-001": 2024,
"text-ada-001": 2024,

Loading…
Cancel
Save