Merge pull request #127 from Hikari-Haru/set-conversation-model

Add the ability to set model in threads
Kaveen Kumarasinghe 2 years ago committed by GitHub
commit 3839ff2abe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -301,6 +301,11 @@ class Commands(discord.Cog, name="Commands"):
@discord.option(
name="prompt", description="The prompt to send to GPT3", required=True
)
@discord.option(
name="private",
description="Will only be visible to you",
required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -334,13 +339,14 @@ class Commands(discord.Cog, name="Commands"):
self,
ctx: discord.ApplicationContext,
prompt: str,
private: bool,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
):
await self.converser_cog.ask_command(
ctx, prompt, temperature, top_p, frequency_penalty, presence_penalty
ctx, prompt, private, temperature, top_p, frequency_penalty, presence_penalty
)
@add_to_group("gpt")
@ -360,6 +366,11 @@ class Commands(discord.Cog, name="Commands"):
required=False,
default="",
)
@discord.option(
name="private",
description="Will only be visible to you",
required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -385,12 +396,13 @@ class Commands(discord.Cog, name="Commands"):
ctx: discord.ApplicationContext,
instruction: str,
text: str,
private: bool,
temperature: float,
top_p: float,
codex: bool,
):
await self.converser_cog.edit_command(
ctx, instruction, text, temperature, top_p, codex
ctx, instruction, text, private, temperature, top_p, codex
)
@add_to_group("gpt")
@ -422,6 +434,13 @@ class Commands(discord.Cog, name="Commands"):
required=False,
default=False,
)
@discord.option(
name="model",
description="Which model to use with the bot",
required=False,
default=False,
autocomplete=Settings_autocompleter.get_models,
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
@ -462,6 +481,7 @@ class Commands(discord.Cog, name="Commands"):
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -473,6 +493,7 @@ class Commands(discord.Cog, name="Commands"):
opener_file,
private,
minimal,
model,
temperature,
top_p,
frequency_penalty,

@ -693,6 +693,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self,
ctx: discord.ApplicationContext,
prompt: str,
private: bool,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -720,7 +721,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if not user_api_key:
return
await ctx.defer()
await ctx.defer(ephemeral=private)
overrides = Override(temperature, top_p, frequency_penalty, presence_penalty)
@ -741,6 +742,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
ctx: discord.ApplicationContext,
instruction: str,
text: str,
private: bool,
temperature: float,
top_p: float,
codex: bool,
@ -766,7 +768,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if not user_api_key:
return
await ctx.defer()
await ctx.defer(ephemeral=private)
overrides = Override(temperature, top_p, 0, 0)
@ -801,6 +803,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
@ -814,6 +817,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
opener_file (str): A .txt or .json file which is appended before the opener
private (bool): If the thread should be private
minimal (bool): If a minimal starter should be used
model (str): The openai model that should be used
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
frequency_penalty (float): Sets the frequency penalty override
@ -866,7 +870,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
self.conversation_threads[thread.id] = Thread(thread.id)
self.conversation_threads[thread.id].model = self.model.model
self.conversation_threads[thread.id].model = self.model.model if not model else model
# Set the overrides for the conversation
self.conversation_threads[thread.id].set_overrides(

@ -30,7 +30,8 @@ from services.environment_service import EnvService
from models.openai_model import Model
__version__ = "9.0.4"
__version__ = "9.1"
PID_FILE = Path("bot.pid")

@ -86,6 +86,17 @@ class Settings_autocompleter:
await ctx.interaction.response.defer() # defer so the autocomplete in int values doesn't error but rather just says not found
return []
async def get_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
return [
value
for value in Models.TEXT_MODELS
if value.startswith(ctx.value.lower())
]
async def get_value_moderations(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command

@ -78,7 +78,7 @@ class Models:
@staticmethod
def get_max_tokens(model: str) -> int:
return Models.TOKEN_MAPPING.get(model, 4024)
return Models.TOKEN_MAPPING.get(model, 2024)
class ImageSize:
@ -576,11 +576,16 @@ class Model:
self._prompt_min_length = value
SETTINGS_DB["prompt_min_length"] = value
def backoff_handler(details):
def backoff_handler_http(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].status}: {details['exception'].message}"
)
def backoff_handler_request(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].args[0]}"
)
async def valid_text_request(self, response):
try:
@ -598,7 +603,7 @@ class Model:
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
on_backoff=backoff_handler_http,
)
async def send_embedding_request(self, text, custom_api_key=None):
async with aiohttp.ClientSession(raise_for_status=True) as session:
@ -624,11 +629,11 @@ class Model:
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
ValueError,
factor=3,
base=5,
max_tries=6,
on_backoff=backoff_handler,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_edit_request(
self,
@ -651,7 +656,7 @@ class Model:
)
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override}")
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.EDIT if codex is False else Models.CODE_EDIT,
"input": "" if text is None else text,
@ -676,7 +681,7 @@ class Model:
factor=3,
base=5,
max_tries=6,
on_backoff=backoff_handler,
on_backoff=backoff_handler_http,
)
async def send_moderations_request(self, text):
# Use aiohttp to send the above request:
@ -695,11 +700,11 @@ class Model:
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
on_backoff=backoff_handler_request,
)
async def send_summary_request(self, prompt, custom_api_key=None):
"""
@ -718,7 +723,7 @@ class Model:
tokens = self.usage_service.count_tokens(summary_request_text)
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,
@ -746,11 +751,11 @@ class Model:
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
on_backoff=backoff_handler_request,
)
async def send_request(
self,
@ -774,12 +779,16 @@ class Model:
f"Prompt must be greater than {self.prompt_min_length} characters, it is currently: {len(prompt)} characters"
)
if not max_tokens_override:
if model:
max_tokens_override = Models.get_max_tokens(model) - tokens
print(f"The prompt about to be sent is {prompt}")
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": self.model if model is None else model,
"prompt": prompt,
@ -787,7 +796,7 @@ class Model:
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"max_tokens": self.max_tokens - tokens
if not max_tokens_override
if max_tokens_override is None
else max_tokens_override,
"presence_penalty": self.presence_penalty
if presence_penalty_override is None
@ -839,7 +848,7 @@ class Model:
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
on_backoff=backoff_handler_http,
)
async def send_image_request(
self, ctx, prompt, vary=None, custom_api_key=None

Loading…
Cancel
Save