From 595feaf539041947c7c17667a6813f5080f16d0b Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Fri, 27 Jan 2023 13:55:34 -0500 Subject: [PATCH 1/7] persistent settings --- cogs/commands.py | 10 +++ cogs/text_service_cog.py | 6 ++ models/openai_model.py | 128 ++++++++++++++++++++++++++++----------- 3 files changed, 110 insertions(+), 34 deletions(-) diff --git a/cogs/commands.py b/cogs/commands.py index b70c43f..ae7904b 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -96,6 +96,16 @@ class Commands(discord.Cog, name="Commands"): ): await self.converser_cog.settings_command(ctx, parameter, value) + @add_to_group("system") + @discord.slash_command( + name="settings-reset", + description="Reset all settings for GPT3Discord", + guild_ids=ALLOWED_GUILDS, + ) + @discord.guild_only() + async def settings_reset(self, ctx: discord.ApplicationContext): + await self.converser_cog.settings_reset_command(ctx) + @add_to_group("system") @discord.slash_command( name="local-size", diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index 684aee2..0c0af98 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -1043,6 +1043,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # Otherwise, process the settings change await self.process_settings(ctx, parameter, value) + async def settings_reset_command(self, ctx: discord.ApplicationContext): + """Command handler. Resets all settings to default""" + await ctx.defer() + self.model.reset_settings() + await ctx.respond("Settings reset to default") + # # Text-based context menu commands from here # diff --git a/models/openai_model.py b/models/openai_model.py index fbd789f..bd124c9 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -15,7 +15,15 @@ import discord import requests from PIL import Image from discord import File +from sqlitedict import SqliteDict +try: + print("Attempting to retrieve the settings DB") + SETTINGS_DB = SqliteDict("main_db.sqlite", tablename="settings", autocommit=True) + print("Retrieved the settings DB") +except Exception as e: + print("Failed to retrieve the settings DB. The bot is terminating.") + raise e class Mode: TEMPERATURE = "temperature" @@ -106,36 +114,68 @@ class ModelLimits: MIN_BEST_OF = 1 MAX_BEST_OF = 3 - MIN_PROMPT_MIN_LENGTH = 10 - MAX_PROMPT_MIN_LENGTH = 4096 + MIN_PROMPT_MIN_LENGTH = 5 + MAX_PROMPT_MIN_LENGTH = 4000 class Model: - def __init__(self, usage_service): - self._mode = Mode.TEMPERATURE - self._temp = 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence - self._top_p = 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution - self._max_tokens = 4000 # The maximum number of tokens the model can generate - self._presence_penalty = ( - 0 # Penalize new tokens based on whether they appear in the text so far - ) + + def set_initial_state(self, usage_service): + self.mode = Mode.TEMPERATURE + self.temp = SETTINGS_DB['temp'] if 'temp' in SETTINGS_DB else 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence + self.top_p = SETTINGS_DB['top_p'] if 'top_p' in SETTINGS_DB else 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution + self.max_tokens = SETTINGS_DB['max_tokens'] if 'max_tokens' in SETTINGS_DB else 4000 # The maximum number of tokens the model can generate + self.presence_penalty = SETTINGS_DB['presence_penalty'] if 'presence_penalty' in SETTINGS_DB else 0.0 # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text # Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.) - self._frequency_penalty = 0 - self._best_of = 1 # Number of responses to compare the loglikelihoods of - self._prompt_min_length = 8 - self._max_conversation_length = 100 - self._model = Models.DEFAULT + self.frequency_penalty = SETTINGS_DB['frequency_penalty'] if 'frequency_penalty' in SETTINGS_DB else 0.0 + self.best_of = SETTINGS_DB['best_of'] if 'best_of' in SETTINGS_DB else 1 # Number of responses to compare the loglikelihoods of + self.prompt_min_length = SETTINGS_DB['prompt_min_length'] if 'prompt_min_length' in SETTINGS_DB else 6 # The minimum length of the prompt + self.max_conversation_length = SETTINGS_DB['max_conversation_length'] if 'max_conversation_length' in SETTINGS_DB else 100 # The maximum number of conversation items to keep in memory + self.model = SETTINGS_DB['model'] if 'model' in SETTINGS_DB else Models.DEFAULT # The model to use self._low_usage_mode = False self.usage_service = usage_service self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"] - self._image_size = ImageSize.MEDIUM - self._num_images = 2 - self._summarize_conversations = True - self._summarize_threshold = 3000 + self.image_size = SETTINGS_DB['image_size'] if 'image_size' in SETTINGS_DB else ImageSize.MEDIUM + self.num_images = SETTINGS_DB['num_images'] if 'num_images' in SETTINGS_DB else 2 + self.summarize_conversations = bool(SETTINGS_DB['summarize_conversations']) if 'summarize_conversations' in SETTINGS_DB else True + self.summarize_threshold = SETTINGS_DB['summarize_threshold'] if 'summarize_threshold' in SETTINGS_DB else 3000 self.model_max_tokens = 4024 - self._welcome_message_enabled = False - self._num_static_conversation_items = 10 - self._num_conversation_lookback = 5 + self.welcome_message_enabled = bool(SETTINGS_DB['welcome_message_enabled']) if 'welcome_message_enabled' in SETTINGS_DB else False + self.num_static_conversation_items = SETTINGS_DB['num_static_conversation_items'] if 'num_static_conversation_items' in SETTINGS_DB else 10 + self.num_conversation_lookback = SETTINGS_DB['num_conversation_lookback'] if 'num_conversation_lookback' in SETTINGS_DB else 5 + + def reset_settings(self): + keys = ['temp', 'top_p', 'max_tokens', 'presence_penalty', 'frequency_penalty', 'best_of', 'prompt_min_length', 'max_conversation_length', 'model', 'image_size', 'num_images', 'summarize_conversations', 'summarize_threshold', 'welcome_message_enabled', 'num_static_conversation_items', 'num_conversation_lookback'] + for key in keys: + try: + del SETTINGS_DB[key] + except: + pass + self.set_initial_state(self.usage_service) + + def __init__(self, usage_service): + self._num_conversation_lookback = None + self._num_static_conversation_items = None + self._welcome_message_enabled = None + self.model_max_tokens = None + self._summarize_threshold = None + self._summarize_conversations = None + self._num_images = None + self._image_size = None + self.DAVINCI_ROLES = None + self.usage_service = None + self._low_usage_mode = None + self._model = None + self._max_conversation_length = None + self._prompt_min_length = None + self._best_of = None + self._frequency_penalty = None + self._presence_penalty = None + self._max_tokens = None + self._top_p = None + self._temp = None + self._mode = None + self.set_initial_state(usage_service) try: self.IMAGE_SAVE_PATH = os.environ["IMAGE_SAVE_PATH"] @@ -177,6 +217,7 @@ class Model: f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!" ) self._num_static_conversation_items = value + SETTINGS_DB['num_static_conversation_items'] = value @property def num_conversation_lookback(self): @@ -194,6 +235,7 @@ class Model: f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!" ) self._num_conversation_lookback = value + SETTINGS_DB['num_conversation_lookback'] = value @property def welcome_message_enabled(self): @@ -201,12 +243,15 @@ class Model: @welcome_message_enabled.setter def welcome_message_enabled(self, value): - if value.lower() == "true": - self._welcome_message_enabled = True - elif value.lower() == "false": - self._welcome_message_enabled = False - else: - raise ValueError("Value must be either `true` or `false`!") + if not isinstance(value, bool): + if value.lower() == "true": + value = True + elif value.lower() == "false": + value = False + else: + raise ValueError("Value must be either `true` or `false`!") + self._welcome_message_enabled = value + SETTINGS_DB['welcome_message_enabled'] = self._welcome_message_enabled @property def summarize_threshold(self): @@ -223,6 +268,7 @@ class Model: f"Summarize threshold should be a number between {ModelLimits.MIN_SUMMARIZE_THRESHOLD} and {ModelLimits.MAX_SUMMARIZE_THRESHOLD}!" ) self._summarize_threshold = value + SETTINGS_DB['summarize_threshold'] = value @property def summarize_conversations(self): @@ -231,13 +277,15 @@ class Model: @summarize_conversations.setter def summarize_conversations(self, value): # convert value string into boolean - if value.lower() == "true": - value = True - elif value.lower() == "false": - value = False - else: - raise ValueError("Value must be either `true` or `false`!") + if not isinstance(value, bool): + if value.lower() == "true": + value = True + elif value.lower() == "false": + value = False + else: + raise ValueError("Value must be either `true` or `false`!") self._summarize_conversations = value + SETTINGS_DB['summarize_conversations'] = value @property def image_size(self): @@ -247,6 +295,7 @@ class Model: def image_size(self, value): if value in ImageSize.ALL_SIZES: self._image_size = value + SETTINGS_DB['image_size'] = value else: raise ValueError( f"Image size must be one of the following: {ImageSize.ALL_SIZES}" @@ -264,6 +313,7 @@ class Model: f"Number of images to generate should be a number between {ModelLimits.MIN_NUM_IMAGES} and {ModelLimits.MAX_NUM_IMAGES}!" ) self._num_images = value + SETTINGS_DB['num_images'] = value @property def low_usage_mode(self): @@ -300,6 +350,7 @@ class Model: # Set the token count self._max_tokens = Models.get_max_tokens(self._model) + SETTINGS_DB['model'] = model @property def max_conversation_length(self): @@ -317,6 +368,7 @@ class Model: f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick." ) self._max_conversation_length = value + SETTINGS_DB['max_conversation_length'] = value @property def mode(self): @@ -337,6 +389,7 @@ class Model: raise ValueError(f"Unknown mode: {value}") self._mode = value + SETTINGS_DB['mode'] = value @property def temp(self): @@ -351,6 +404,7 @@ class Model: ) self._temp = value + SETTINGS_DB['temp'] = value @property def top_p(self): @@ -364,6 +418,7 @@ class Model: f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}" ) self._top_p = value + SETTINGS_DB['top_p'] = value @property def max_tokens(self): @@ -377,6 +432,7 @@ class Model: f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}" ) self._max_tokens = value + SETTINGS_DB['max_tokens'] = value @property def presence_penalty(self): @@ -393,6 +449,7 @@ class Model: f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}" ) self._presence_penalty = value + SETTINGS_DB['presence_penalty'] = value @property def frequency_penalty(self): @@ -409,6 +466,7 @@ class Model: f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}" ) self._frequency_penalty = value + SETTINGS_DB['frequency_penalty'] = value @property def best_of(self): @@ -422,6 +480,7 @@ class Model: f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!" ) self._best_of = value + SETTINGS_DB['best_of'] = value @property def prompt_min_length(self): @@ -438,6 +497,7 @@ class Model: f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}" ) self._prompt_min_length = value + SETTINGS_DB['prompt_min_length'] = value def backoff_handler(details): print( From f473342bffbed0114f699812cf310b0c4d092422 Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Fri, 27 Jan 2023 14:37:47 -0500 Subject: [PATCH 2/7] move overrides into class --- cogs/commands.py | 4 ++-- cogs/text_service_cog.py | 20 ++++++++++---------- models/openai_model.py | 7 +++++++ services/text_service.py | 28 ++++++++++++---------------- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/cogs/commands.py b/cogs/commands.py index ae7904b..78b440b 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -306,7 +306,7 @@ class Commands(discord.Cog, name="Commands"): description="Higher values means the model will take more risks", required=False, min_value=0, - max_value=1, + max_value=2, ) @discord.option( name="top_p", @@ -366,7 +366,7 @@ class Commands(discord.Cog, name="Commands"): required=False, input_type=float, min_value=0, - max_value=1, + max_value=2, ) @discord.option( name="top_p", diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index 0c0af98..75d1d3a 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -11,6 +11,7 @@ import json import discord from models.embed_statics_model import EmbedStatics +from models.openai_model import Override from services.environment_service import EnvService from services.message_queue_service import Message from services.moderations_service import Moderation @@ -720,15 +721,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): await ctx.defer() + overrides = Override(temperature,top_p,frequency_penalty,presence_penalty) + await TextService.encapsulated_send( self, user.id, prompt, ctx, - temp_override=temperature, - top_p_override=top_p, - frequency_penalty_override=frequency_penalty, - presence_penalty_override=presence_penalty, + overrides=overrides, from_ask_command=True, custom_api_key=user_api_key, from_action=from_action, @@ -766,13 +766,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): await ctx.defer() + overrides = Override(temperature,top_p,0,0) + await TextService.encapsulated_send( self, user.id, prompt=text, ctx=ctx, - temp_override=temperature, - top_p_override=top_p, + overrides=overrides, instruction=instruction, from_edit_command=True, codex=codex, @@ -963,6 +964,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): self.conversation_threads[thread.id].count += 1 + overrides = Override(overrides['temperature'], overrides['top_p'], overrides['frequency_penalty'], overrides['presence_penalty']) + await TextService.encapsulated_send( self, thread.id, @@ -972,10 +975,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): [item.text for item in self.conversation_threads[thread.id].history] ), thread_message, - temp_override=overrides["temperature"], - top_p_override=overrides["top_p"], - frequency_penalty_override=overrides["frequency_penalty"], - presence_penalty_override=overrides["presence_penalty"], + overrides=overrides, user=user, model=self.conversation_threads[thread.id].model, custom_api_key=user_api_key, diff --git a/models/openai_model.py b/models/openai_model.py index bd124c9..289bb9a 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -31,6 +31,13 @@ class Mode: ALL_MODES = [TEMPERATURE, TOP_P] +class Override: + def __init__(self, temp=None, top_p=None, frequency=None, presence=None): + self.temperature = temp + self.top_p = top_p + self.frequency_penalty = frequency + self.presence_penalty = presence + class Models: # Text models diff --git a/services/text_service.py b/services/text_service.py index ce4af21..4cd0d90 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -8,7 +8,7 @@ from discord.ext import pages from models.embed_statics_model import EmbedStatics from services.deletion_service import Deletion -from models.openai_model import Model +from models.openai_model import Model, Override from models.user_model import EmbeddedConversationItem, RedoUser from services.environment_service import EnvService @@ -26,10 +26,7 @@ class TextService: prompt, ctx, response_message=None, - temp_override=None, - top_p_override=None, - frequency_penalty_override=None, - presence_penalty_override=None, + overrides=None, instruction=None, from_ask_command=False, from_edit_command=False, @@ -268,8 +265,8 @@ class TextService: response = await converser_cog.model.send_edit_request( text=new_prompt, instruction=instruction, - temp_override=temp_override, - top_p_override=top_p_override, + temp_override=overrides.temperature, + top_p_override=overrides.top_p, codex=codex, custom_api_key=custom_api_key, ) @@ -277,10 +274,10 @@ class TextService: response = await converser_cog.model.send_request( new_prompt, tokens=tokens, - temp_override=temp_override, - top_p_override=top_p_override, - frequency_penalty_override=frequency_penalty_override, - presence_penalty_override=presence_penalty_override, + temp_override=overrides.temperature, + top_p_override=overrides.top_p, + frequency_penalty_override=overrides.frequency_penalty, + presence_penalty_override=overrides.presence_penalty, model=model, stop=stop if not from_ask_command else None, custom_api_key=custom_api_key, @@ -622,19 +619,18 @@ class TextService: ) # set conversation overrides - overrides = converser_cog.conversation_threads[ + conversation_overrides = converser_cog.conversation_threads[ message.channel.id ].get_overrides() + overrides = Override(conversation_overrides['temperature'],conversation_overrides['top_p'],conversation_overrides['frequency_penalty'],conversation_overrides['presence_penalty']) + await TextService.encapsulated_send( converser_cog, message.channel.id, primary_prompt, message, - temp_override=overrides["temperature"], - top_p_override=overrides["top_p"], - frequency_penalty_override=overrides["frequency_penalty"], - presence_penalty_override=overrides["presence_penalty"], + overrides=overrides, model=converser_cog.conversation_threads[message.channel.id].model, custom_api_key=user_api_key, ) From 57271f10519bce8bd917b986db6e7df99d67fe26 Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Fri, 27 Jan 2023 15:17:27 -0500 Subject: [PATCH 3/7] Summarize, fix context menu actions --- cogs/commands.py | 20 ++++++++++----- cogs/text_service_cog.py | 53 ++++++++++++++++++++++++++++++++++------ gpt3discord.py | 2 +- services/text_service.py | 9 ++++--- 4 files changed, 67 insertions(+), 17 deletions(-) diff --git a/cogs/commands.py b/cogs/commands.py index 78b440b..ef48595 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -633,21 +633,29 @@ class Commands(discord.Cog, name="Commands"): "Translations are disabled on this server.", ephemeral=True ) + # @discord.message_command( + # name="Paraphrase", + # guild_ids=ALLOWED_GUILDS, + # checks=[Check.check_gpt_roles()], + # ) + # async def paraphrase_action(self, ctx, message: discord.Message): + # await self.converser_cog.paraphrase_action(ctx, message) + @discord.message_command( - name="Paraphrase", + name="Elaborate", guild_ids=ALLOWED_GUILDS, checks=[Check.check_gpt_roles()], ) - async def paraphrase_action(self, ctx, message: discord.Message): - await self.converser_cog.paraphrase_action(ctx, message) + async def elaborate_action(self, ctx, message: discord.Message): + await self.converser_cog.elaborate_action(ctx, message) @discord.message_command( - name="Elaborate", + name="Summarize", guild_ids=ALLOWED_GUILDS, checks=[Check.check_gpt_roles()], ) - async def elaborate_action(self, ctx, message: discord.Message): - await self.converser_cog.elaborate_action(ctx, message) + async def summarize_action(self, ctx, message: discord.Message): + await self.converser_cog.summarize_action(ctx, message) # Search slash commands @discord.slash_command( diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index 75d1d3a..b8df9ec 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -697,7 +697,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): top_p: float, frequency_penalty: float, presence_penalty: float, - from_action=None, + from_ask_action=None, + from_other_action=None, ): """Command handler. Requests and returns a generation with no extras to the completion endpoint @@ -731,7 +732,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): overrides=overrides, from_ask_command=True, custom_api_key=user_api_key, - from_action=from_action, + from_ask_action=from_ask_action, + from_other_action=from_other_action, ) async def edit_command( @@ -1056,24 +1058,61 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): async def ask_gpt_action(self, ctx, message: discord.Message): """Message command. Return the message""" prompt = await self.mention_to_username(ctx, message.content) - await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt) + await self.ask_command(ctx, prompt, None, None, None, None, from_ask_action=prompt) async def paraphrase_action(self, ctx, message: discord.Message): """Message command. paraphrase the current message content""" user = ctx.user prompt = await self.mention_to_username(ctx, message.content) + from_other_action = prompt+"\nParaphrased:" # Construct the paraphrase prompt - prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \n\nParaphrased:" + prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \nParaphrased:" - await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt) + tokens = self.model.usage_service.count_tokens(prompt) + if tokens > self.model.max_tokens-1000: + await ctx.respond( + f"This message is too long to paraphrase.", + ephemeral=True, delete_after=10, + ) + return + + await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) async def elaborate_action(self, ctx, message: discord.Message): """Message command. elaborate on the subject of the current message content""" user = ctx.user prompt = await self.mention_to_username(ctx, message.content) + from_other_action = prompt+"\nElaboration:" # Construct the paraphrase prompt - prompt = f"Elaborate upon the subject of the following message: {prompt} \n\nElaboration:" + prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:" + + tokens = self.model.usage_service.count_tokens(prompt) + if tokens > self.model.max_tokens-1000: + await ctx.respond( + f"This message is too long to elaborate on.", + ephemeral=True, delete_after=10, + ) + return + + await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) + + async def summarize_action(self, ctx, message: discord.Message): + """Message command. elaborate on the subject of the current message content""" + user = ctx.user + prompt = await self.mention_to_username(ctx, message.content) + from_other_action = "Message at message link: " + message.jump_url + "\nSummarized:" + + # Construct the paraphrase prompt + prompt = f"Summarize the following message, be as short and concise as possible: {prompt} \n\nSummary:" + + tokens = self.model.usage_service.count_tokens(prompt) + if tokens > self.model.max_tokens-300: + await ctx.respond( + f"Your prompt is too long. It has {tokens} tokens, but the maximum is {self.model.max_tokens-300}.", + ephemeral=True, delete_after=10, + ) + return - await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt) + await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) diff --git a/gpt3discord.py b/gpt3discord.py index e57b460..abae2d2 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -237,7 +237,7 @@ def init(): signal.signal(signal.SIGTERM, cleanup_pid_file) if check_process_file(PID_FILE): - print("Process ID file already exists") + print("Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid") sys.exit(1) else: with PID_FILE.open("w") as f: diff --git a/services/text_service.py b/services/text_service.py index 4cd0d90..bfdbbb8 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -36,7 +36,8 @@ class TextService: custom_api_key=None, edited_request=False, redo_request=False, - from_action=False, + from_ask_action=False, + from_other_action=None, ): """General service function for sending and receiving gpt generations @@ -288,7 +289,9 @@ class TextService: str(response["choices"][0]["text"]) ) - if from_ask_command or from_action: + if from_other_action: + response_text = f"***{from_other_action}*** {response_text}" + elif from_ask_command or from_ask_action: response_text = f"***{prompt}***{response_text}" elif from_edit_command: if codex: @@ -483,7 +486,7 @@ class TextService: # Error catching for OpenAI model value errors except ValueError as e: embed = EmbedStatics.get_invalid_value_embed(e) - if from_action: + if from_ask_action: await ctx.respond(embed=embed, ephemeral=True) elif from_context: await ctx.send_followup(embed=embed, ephemeral=True) From d8c3b2bd322b8714e2ee5ed1ed8b6ba225ae8b87 Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Fri, 27 Jan 2023 15:32:27 -0500 Subject: [PATCH 4/7] bump version --- cogs/text_service_cog.py | 2 +- gpt3discord.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index b8df9ec..b0c075a 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -1089,7 +1089,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:" tokens = self.model.usage_service.count_tokens(prompt) - if tokens > self.model.max_tokens-1000: + if tokens > self.model.max_tokens-500: await ctx.respond( f"This message is too long to elaborate on.", ephemeral=True, delete_after=10, diff --git a/gpt3discord.py b/gpt3discord.py index abae2d2..5226d5e 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -30,7 +30,7 @@ from services.environment_service import EnvService from models.openai_model import Model -__version__ = "8.7.5" +__version__ = "9.0" PID_FILE = Path("bot.pid") From f8d797f3a86df6c20948eb4028a5dae729a4b0eb Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Fri, 27 Jan 2023 20:33:16 +0000 Subject: [PATCH 5/7] Format Python code with psf/black push --- cogs/text_service_cog.py | 50 +++++++++----- gpt3discord.py | 4 +- models/openai_model.py | 140 +++++++++++++++++++++++++++++---------- services/text_service.py | 8 ++- 4 files changed, 148 insertions(+), 54 deletions(-) diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index b0c075a..88fb3bb 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -722,7 +722,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): await ctx.defer() - overrides = Override(temperature,top_p,frequency_penalty,presence_penalty) + overrides = Override(temperature, top_p, frequency_penalty, presence_penalty) await TextService.encapsulated_send( self, @@ -768,7 +768,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): await ctx.defer() - overrides = Override(temperature,top_p,0,0) + overrides = Override(temperature, top_p, 0, 0) await TextService.encapsulated_send( self, @@ -966,7 +966,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): self.conversation_threads[thread.id].count += 1 - overrides = Override(overrides['temperature'], overrides['top_p'], overrides['frequency_penalty'], overrides['presence_penalty']) + overrides = Override( + overrides["temperature"], + overrides["top_p"], + overrides["frequency_penalty"], + overrides["presence_penalty"], + ) await TextService.encapsulated_send( self, @@ -1058,61 +1063,74 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): async def ask_gpt_action(self, ctx, message: discord.Message): """Message command. Return the message""" prompt = await self.mention_to_username(ctx, message.content) - await self.ask_command(ctx, prompt, None, None, None, None, from_ask_action=prompt) + await self.ask_command( + ctx, prompt, None, None, None, None, from_ask_action=prompt + ) async def paraphrase_action(self, ctx, message: discord.Message): """Message command. paraphrase the current message content""" user = ctx.user prompt = await self.mention_to_username(ctx, message.content) - from_other_action = prompt+"\nParaphrased:" + from_other_action = prompt + "\nParaphrased:" # Construct the paraphrase prompt prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \nParaphrased:" tokens = self.model.usage_service.count_tokens(prompt) - if tokens > self.model.max_tokens-1000: + if tokens > self.model.max_tokens - 1000: await ctx.respond( f"This message is too long to paraphrase.", - ephemeral=True, delete_after=10, + ephemeral=True, + delete_after=10, ) return - await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) + await self.ask_command( + ctx, prompt, None, None, None, None, from_other_action=from_other_action + ) async def elaborate_action(self, ctx, message: discord.Message): """Message command. elaborate on the subject of the current message content""" user = ctx.user prompt = await self.mention_to_username(ctx, message.content) - from_other_action = prompt+"\nElaboration:" + from_other_action = prompt + "\nElaboration:" # Construct the paraphrase prompt prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:" tokens = self.model.usage_service.count_tokens(prompt) - if tokens > self.model.max_tokens-500: + if tokens > self.model.max_tokens - 500: await ctx.respond( f"This message is too long to elaborate on.", - ephemeral=True, delete_after=10, + ephemeral=True, + delete_after=10, ) return - await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) + await self.ask_command( + ctx, prompt, None, None, None, None, from_other_action=from_other_action + ) async def summarize_action(self, ctx, message: discord.Message): """Message command. elaborate on the subject of the current message content""" user = ctx.user prompt = await self.mention_to_username(ctx, message.content) - from_other_action = "Message at message link: " + message.jump_url + "\nSummarized:" + from_other_action = ( + "Message at message link: " + message.jump_url + "\nSummarized:" + ) # Construct the paraphrase prompt prompt = f"Summarize the following message, be as short and concise as possible: {prompt} \n\nSummary:" tokens = self.model.usage_service.count_tokens(prompt) - if tokens > self.model.max_tokens-300: + if tokens > self.model.max_tokens - 300: await ctx.respond( f"Your prompt is too long. It has {tokens} tokens, but the maximum is {self.model.max_tokens-300}.", - ephemeral=True, delete_after=10, + ephemeral=True, + delete_after=10, ) return - await self.ask_command(ctx, prompt, None, None, None, None, from_other_action=from_other_action) + await self.ask_command( + ctx, prompt, None, None, None, None, from_other_action=from_other_action + ) diff --git a/gpt3discord.py b/gpt3discord.py index 5226d5e..ca86ab0 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -237,7 +237,9 @@ def init(): signal.signal(signal.SIGTERM, cleanup_pid_file) if check_process_file(PID_FILE): - print("Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid") + print( + "Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid" + ) sys.exit(1) else: with PID_FILE.open("w") as f: diff --git a/models/openai_model.py b/models/openai_model.py index 289bb9a..03c5d28 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -25,12 +25,14 @@ except Exception as e: print("Failed to retrieve the settings DB. The bot is terminating.") raise e + class Mode: TEMPERATURE = "temperature" TOP_P = "top_p" ALL_MODES = [TEMPERATURE, TOP_P] + class Override: def __init__(self, temp=None, top_p=None, frequency=None, presence=None): self.temperature = temp @@ -126,33 +128,101 @@ class ModelLimits: class Model: - def set_initial_state(self, usage_service): self.mode = Mode.TEMPERATURE - self.temp = SETTINGS_DB['temp'] if 'temp' in SETTINGS_DB else 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence - self.top_p = SETTINGS_DB['top_p'] if 'top_p' in SETTINGS_DB else 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution - self.max_tokens = SETTINGS_DB['max_tokens'] if 'max_tokens' in SETTINGS_DB else 4000 # The maximum number of tokens the model can generate - self.presence_penalty = SETTINGS_DB['presence_penalty'] if 'presence_penalty' in SETTINGS_DB else 0.0 # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text + self.temp = ( + SETTINGS_DB["temp"] if "temp" in SETTINGS_DB else 0.8 + ) # Higher value means more random, lower value means more likely to be a coherent sentence + self.top_p = ( + SETTINGS_DB["top_p"] if "top_p" in SETTINGS_DB else 1 + ) # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution + self.max_tokens = ( + SETTINGS_DB["max_tokens"] if "max_tokens" in SETTINGS_DB else 4000 + ) # The maximum number of tokens the model can generate + self.presence_penalty = ( + SETTINGS_DB["presence_penalty"] + if "presence_penalty" in SETTINGS_DB + else 0.0 + ) # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text # Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.) - self.frequency_penalty = SETTINGS_DB['frequency_penalty'] if 'frequency_penalty' in SETTINGS_DB else 0.0 - self.best_of = SETTINGS_DB['best_of'] if 'best_of' in SETTINGS_DB else 1 # Number of responses to compare the loglikelihoods of - self.prompt_min_length = SETTINGS_DB['prompt_min_length'] if 'prompt_min_length' in SETTINGS_DB else 6 # The minimum length of the prompt - self.max_conversation_length = SETTINGS_DB['max_conversation_length'] if 'max_conversation_length' in SETTINGS_DB else 100 # The maximum number of conversation items to keep in memory - self.model = SETTINGS_DB['model'] if 'model' in SETTINGS_DB else Models.DEFAULT # The model to use + self.frequency_penalty = ( + SETTINGS_DB["frequency_penalty"] + if "frequency_penalty" in SETTINGS_DB + else 0.0 + ) + self.best_of = ( + SETTINGS_DB["best_of"] if "best_of" in SETTINGS_DB else 1 + ) # Number of responses to compare the loglikelihoods of + self.prompt_min_length = ( + SETTINGS_DB["prompt_min_length"] + if "prompt_min_length" in SETTINGS_DB + else 6 + ) # The minimum length of the prompt + self.max_conversation_length = ( + SETTINGS_DB["max_conversation_length"] + if "max_conversation_length" in SETTINGS_DB + else 100 + ) # The maximum number of conversation items to keep in memory + self.model = ( + SETTINGS_DB["model"] if "model" in SETTINGS_DB else Models.DEFAULT + ) # The model to use self._low_usage_mode = False self.usage_service = usage_service self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"] - self.image_size = SETTINGS_DB['image_size'] if 'image_size' in SETTINGS_DB else ImageSize.MEDIUM - self.num_images = SETTINGS_DB['num_images'] if 'num_images' in SETTINGS_DB else 2 - self.summarize_conversations = bool(SETTINGS_DB['summarize_conversations']) if 'summarize_conversations' in SETTINGS_DB else True - self.summarize_threshold = SETTINGS_DB['summarize_threshold'] if 'summarize_threshold' in SETTINGS_DB else 3000 + self.image_size = ( + SETTINGS_DB["image_size"] + if "image_size" in SETTINGS_DB + else ImageSize.MEDIUM + ) + self.num_images = ( + SETTINGS_DB["num_images"] if "num_images" in SETTINGS_DB else 2 + ) + self.summarize_conversations = ( + bool(SETTINGS_DB["summarize_conversations"]) + if "summarize_conversations" in SETTINGS_DB + else True + ) + self.summarize_threshold = ( + SETTINGS_DB["summarize_threshold"] + if "summarize_threshold" in SETTINGS_DB + else 3000 + ) self.model_max_tokens = 4024 - self.welcome_message_enabled = bool(SETTINGS_DB['welcome_message_enabled']) if 'welcome_message_enabled' in SETTINGS_DB else False - self.num_static_conversation_items = SETTINGS_DB['num_static_conversation_items'] if 'num_static_conversation_items' in SETTINGS_DB else 10 - self.num_conversation_lookback = SETTINGS_DB['num_conversation_lookback'] if 'num_conversation_lookback' in SETTINGS_DB else 5 + self.welcome_message_enabled = ( + bool(SETTINGS_DB["welcome_message_enabled"]) + if "welcome_message_enabled" in SETTINGS_DB + else False + ) + self.num_static_conversation_items = ( + SETTINGS_DB["num_static_conversation_items"] + if "num_static_conversation_items" in SETTINGS_DB + else 10 + ) + self.num_conversation_lookback = ( + SETTINGS_DB["num_conversation_lookback"] + if "num_conversation_lookback" in SETTINGS_DB + else 5 + ) def reset_settings(self): - keys = ['temp', 'top_p', 'max_tokens', 'presence_penalty', 'frequency_penalty', 'best_of', 'prompt_min_length', 'max_conversation_length', 'model', 'image_size', 'num_images', 'summarize_conversations', 'summarize_threshold', 'welcome_message_enabled', 'num_static_conversation_items', 'num_conversation_lookback'] + keys = [ + "temp", + "top_p", + "max_tokens", + "presence_penalty", + "frequency_penalty", + "best_of", + "prompt_min_length", + "max_conversation_length", + "model", + "image_size", + "num_images", + "summarize_conversations", + "summarize_threshold", + "welcome_message_enabled", + "num_static_conversation_items", + "num_conversation_lookback", + ] for key in keys: try: del SETTINGS_DB[key] @@ -224,7 +294,7 @@ class Model: f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!" ) self._num_static_conversation_items = value - SETTINGS_DB['num_static_conversation_items'] = value + SETTINGS_DB["num_static_conversation_items"] = value @property def num_conversation_lookback(self): @@ -242,7 +312,7 @@ class Model: f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!" ) self._num_conversation_lookback = value - SETTINGS_DB['num_conversation_lookback'] = value + SETTINGS_DB["num_conversation_lookback"] = value @property def welcome_message_enabled(self): @@ -258,7 +328,7 @@ class Model: else: raise ValueError("Value must be either `true` or `false`!") self._welcome_message_enabled = value - SETTINGS_DB['welcome_message_enabled'] = self._welcome_message_enabled + SETTINGS_DB["welcome_message_enabled"] = self._welcome_message_enabled @property def summarize_threshold(self): @@ -275,7 +345,7 @@ class Model: f"Summarize threshold should be a number between {ModelLimits.MIN_SUMMARIZE_THRESHOLD} and {ModelLimits.MAX_SUMMARIZE_THRESHOLD}!" ) self._summarize_threshold = value - SETTINGS_DB['summarize_threshold'] = value + SETTINGS_DB["summarize_threshold"] = value @property def summarize_conversations(self): @@ -292,7 +362,7 @@ class Model: else: raise ValueError("Value must be either `true` or `false`!") self._summarize_conversations = value - SETTINGS_DB['summarize_conversations'] = value + SETTINGS_DB["summarize_conversations"] = value @property def image_size(self): @@ -302,7 +372,7 @@ class Model: def image_size(self, value): if value in ImageSize.ALL_SIZES: self._image_size = value - SETTINGS_DB['image_size'] = value + SETTINGS_DB["image_size"] = value else: raise ValueError( f"Image size must be one of the following: {ImageSize.ALL_SIZES}" @@ -320,7 +390,7 @@ class Model: f"Number of images to generate should be a number between {ModelLimits.MIN_NUM_IMAGES} and {ModelLimits.MAX_NUM_IMAGES}!" ) self._num_images = value - SETTINGS_DB['num_images'] = value + SETTINGS_DB["num_images"] = value @property def low_usage_mode(self): @@ -357,7 +427,7 @@ class Model: # Set the token count self._max_tokens = Models.get_max_tokens(self._model) - SETTINGS_DB['model'] = model + SETTINGS_DB["model"] = model @property def max_conversation_length(self): @@ -375,7 +445,7 @@ class Model: f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick." ) self._max_conversation_length = value - SETTINGS_DB['max_conversation_length'] = value + SETTINGS_DB["max_conversation_length"] = value @property def mode(self): @@ -396,7 +466,7 @@ class Model: raise ValueError(f"Unknown mode: {value}") self._mode = value - SETTINGS_DB['mode'] = value + SETTINGS_DB["mode"] = value @property def temp(self): @@ -411,7 +481,7 @@ class Model: ) self._temp = value - SETTINGS_DB['temp'] = value + SETTINGS_DB["temp"] = value @property def top_p(self): @@ -425,7 +495,7 @@ class Model: f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}" ) self._top_p = value - SETTINGS_DB['top_p'] = value + SETTINGS_DB["top_p"] = value @property def max_tokens(self): @@ -439,7 +509,7 @@ class Model: f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}" ) self._max_tokens = value - SETTINGS_DB['max_tokens'] = value + SETTINGS_DB["max_tokens"] = value @property def presence_penalty(self): @@ -456,7 +526,7 @@ class Model: f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}" ) self._presence_penalty = value - SETTINGS_DB['presence_penalty'] = value + SETTINGS_DB["presence_penalty"] = value @property def frequency_penalty(self): @@ -473,7 +543,7 @@ class Model: f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}" ) self._frequency_penalty = value - SETTINGS_DB['frequency_penalty'] = value + SETTINGS_DB["frequency_penalty"] = value @property def best_of(self): @@ -487,7 +557,7 @@ class Model: f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!" ) self._best_of = value - SETTINGS_DB['best_of'] = value + SETTINGS_DB["best_of"] = value @property def prompt_min_length(self): @@ -504,7 +574,7 @@ class Model: f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}" ) self._prompt_min_length = value - SETTINGS_DB['prompt_min_length'] = value + SETTINGS_DB["prompt_min_length"] = value def backoff_handler(details): print( diff --git a/services/text_service.py b/services/text_service.py index bfdbbb8..c2e960a 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -625,8 +625,12 @@ class TextService: conversation_overrides = converser_cog.conversation_threads[ message.channel.id ].get_overrides() - overrides = Override(conversation_overrides['temperature'],conversation_overrides['top_p'],conversation_overrides['frequency_penalty'],conversation_overrides['presence_penalty']) - + overrides = Override( + conversation_overrides["temperature"], + conversation_overrides["top_p"], + conversation_overrides["frequency_penalty"], + conversation_overrides["presence_penalty"], + ) await TextService.encapsulated_send( converser_cog, From 87cd1f01f63bd43ac1b10ba0e72b577cb6f9a12e Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Fri, 27 Jan 2023 18:14:19 -0500 Subject: [PATCH 6/7] fix convo redo --- gpt3discord.py | 2 +- services/text_service.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/gpt3discord.py b/gpt3discord.py index ca86ab0..77356d9 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -30,7 +30,7 @@ from services.environment_service import EnvService from models.openai_model import Model -__version__ = "9.0" +__version__ = "9.0.1" PID_FILE = Path("bot.pid") diff --git a/services/text_service.py b/services/text_service.py index c2e960a..e4372e7 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -839,6 +839,7 @@ class RedoButton(discord.ui.Button["ConversationView"]): await TextService.encapsulated_send( self.converser_cog, + overrides=Override(None,None,None,None), id=user_id, prompt=prompt, instruction=instruction, From a3ce3577a7d243357733a70989f412f16345b6e9 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Fri, 27 Jan 2023 23:14:38 +0000 Subject: [PATCH 7/7] Format Python code with psf/black push --- services/text_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/text_service.py b/services/text_service.py index e4372e7..535f158 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -839,7 +839,7 @@ class RedoButton(discord.ui.Button["ConversationView"]): await TextService.encapsulated_send( self.converser_cog, - overrides=Override(None,None,None,None), + overrides=Override(None, None, None, None), id=user_id, prompt=prompt, instruction=instruction,