diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index 1842ef6..e810829 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -29,14 +29,14 @@ original_message = {} class GPT3ComCon(commands.Cog, name="GPT3ComCon"): def __init__( - self, - bot, - usage_service, - model, - message_queue, - deletion_queue, - DEBUG_GUILD, - DEBUG_CHANNEL, + self, + bot, + usage_service, + model, + message_queue, + deletion_queue, + DEBUG_GUILD, + DEBUG_CHANNEL, ): self.debug_channel = None self.bot = bot @@ -134,13 +134,13 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): def check_conversing(self, message): cond1 = ( - message.author.id in self.conversating_users - and message.channel.name in ["gpt3", "general-bot", "bot"] + message.author.id in self.conversating_users + and message.channel.name in ["gpt3", "general-bot", "bot"] ) cond2 = ( - message.author.id in self.conversating_users - and message.author.id in self.conversation_threads - and message.channel.id == self.conversation_threads[message.author.id] + message.author.id in self.conversating_users + and message.author.id in self.conversation_threads + and message.channel.id == self.conversation_threads[message.author.id] ) # If the trimmed message starts with a Tilde, then we want to not contribute this to the conversation @@ -286,7 +286,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): async def paginate_and_send(self, response_text, message): response_text = [ - response_text[i: i + self.TEXT_CUTOFF] + response_text[i : i + self.TEXT_CUTOFF] for i in range(0, len(response_text), self.TEXT_CUTOFF) ] # Send each chunk as a message @@ -303,7 +303,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): async def queue_debug_chunks(self, debug_message, message, debug_channel): debug_message_chunks = [ - debug_message[i: i + self.TEXT_CUTOFF] + debug_message[i : i + self.TEXT_CUTOFF] for i in range(0, len(debug_message), self.TEXT_CUTOFF) ] @@ -346,8 +346,8 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): if message.author.id in self.conversating_users: # If the user has reached the max conversation length, end the conversation if ( - self.conversating_users[message.author.id].count - >= self.model.max_conversation_length + self.conversating_users[message.author.id].count + >= self.model.max_conversation_length ): await message.reply( "You have reached the maximum conversation length. You have ended the conversation with GPT3, and it has ended." @@ -360,14 +360,19 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): new_conversation_history = [] new_conversation_history.append(self.CONVERSATION_STARTER_TEXT) - new_conversation_history.append("\nThis conversation has some context from earlier, which has been summarized as follows: ") + new_conversation_history.append( + "\nThis conversation has some context from earlier, which has been summarized as follows: " + ) new_conversation_history.append(summarized_text) - new_conversation_history.append("\nContinue the conversation, paying very close attention to things Human told you, such as their name, and personal details.\n") + new_conversation_history.append( + "\nContinue the conversation, paying very close attention to things Human told you, such as their name, and personal details.\n" + ) # Get the last entry from the user's conversation history - new_conversation_history.append(self.conversating_users[message.author.id].history[-1]+"\n") + new_conversation_history.append( + self.conversating_users[message.author.id].history[-1] + "\n" + ) self.conversating_users[message.author.id].history = new_conversation_history - async def encapsulated_send(self, message, prompt, response_message=None): # Append a newline, and GPTie: to the prompt @@ -381,33 +386,42 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): tokens = self.usage_service.count_tokens(new_prompt) if tokens > self.model.summarize_threshold: # 250 is a buffer - if self.model.summarize_conversations: - await message.reply( - "I'm currently summarizing our current conversation so we can keep chatting, " - "give me one moment!") + if self.model.summarize_conversations: + await message.reply( + "I'm currently summarizing our current conversation so we can keep chatting, " + "give me one moment!" + ) - self.summarize_conversation(message, new_prompt) + self.summarize_conversation(message, new_prompt) - # Check again if the prompt is about to go past the token limit - new_prompt = "".join(self.conversating_users[message.author.id].history) + "\nGPTie: " + # Check again if the prompt is about to go past the token limit + new_prompt = ( + "".join(self.conversating_users[message.author.id].history) + + "\nGPTie: " + ) - tokens = self.usage_service.count_tokens(new_prompt) + tokens = self.usage_service.count_tokens(new_prompt) - if tokens > self.model.summarize_threshold - 150: # 150 is a buffer for the second stage - await message.reply("I tried to summarize our current conversation so we could keep chatting, " - "but it still went over the token " - "limit. Please try again later.") + if ( + tokens > self.model.summarize_threshold - 150 + ): # 150 is a buffer for the second stage + await message.reply( + "I tried to summarize our current conversation so we could keep chatting, " + "but it still went over the token " + "limit. Please try again later." + ) - await self.end_conversation(message) - return - else: - await message.reply("The conversation context limit has been reached.") await self.end_conversation(message) return + else: + await message.reply( + "The conversation context limit has been reached." + ) + await self.end_conversation(message) + return response = self.model.send_request(new_prompt, message) - response_text = response["choices"][0]["text"] if re.search(r"<@!?\d+>|<@&\d+>|<#\d+>", response_text): @@ -444,8 +458,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): original_message[message.author.id] = message.id else: # We have response_text available, this is the original message that we want to edit - await response_message.edit(content=response_text.replace("<|endofstatement|>", "")) - + await response_message.edit( + content=response_text.replace("<|endofstatement|>", "") + ) # After each response, check if the user has reached the conversation limit in terms of messages or time. await self.check_conversation_limit(message) @@ -484,17 +499,18 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): if after.author.id in self.conversating_users: # Remove the last two elements from the history array and add the new Human: prompt - self.conversating_users[after.author.id].history = self.conversating_users[after.author.id].history[ - :-2] + self.conversating_users[ + after.author.id + ].history = self.conversating_users[after.author.id].history[:-2] self.conversating_users[after.author.id].history.append( - f"\nHuman: {after.content}<|endofstatement|>\n") - edited_content = "".join(self.conversating_users[after.author.id].history) + f"\nHuman: {after.content}<|endofstatement|>\n" + ) + edited_content = "".join( + self.conversating_users[after.author.id].history + ) self.conversating_users[after.author.id].count += 1 - await self.encapsulated_send( - message, - edited_content, response_message - ) + await self.encapsulated_send(message, edited_content, response_message) redo_users[after.author.id].prompt = after.content @@ -532,7 +548,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # A global GLOBAL_COOLDOWN_TIME timer for all users if (message.author.id in self.last_used) and ( - time.time() - self.last_used[message.author.id] < self.GLOBAL_COOLDOWN_TIME + time.time() - self.last_used[message.author.id] < self.GLOBAL_COOLDOWN_TIME ): await message.reply( "You must wait " @@ -577,9 +593,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # If the user is not already conversating, start a conversation with GPT3 self.conversating_users[message.author.id] = User(message.author.id) # Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later - self.conversating_users[ - message.author.id - ].history.append(self.CONVERSATION_STARTER_TEXT) + self.conversating_users[message.author.id].history.append( + self.CONVERSATION_STARTER_TEXT + ) # Create a new discord thread, and then send the conversation starting message inside of that thread if not ("nothread" in prompt): @@ -621,16 +637,16 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # we can append their history to the prompt. if message.author.id in self.conversating_users: self.conversating_users[message.author.id].history.append( - "\nHuman: " - + prompt - + "<|endofstatement|>\n" + "\nHuman: " + prompt + "<|endofstatement|>\n" ) # increment the conversation counter for the user self.conversating_users[message.author.id].count += 1 # Send the request to the model - await self.encapsulated_send(message, "".join(self.conversating_users[message.author.id].history)) + await self.encapsulated_send( + message, "".join(self.conversating_users[message.author.id].history) + ) class RedoView(discord.ui.View): diff --git a/models/openai_model.py b/models/openai_model.py index d2a9865..f5f2cce 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -82,7 +82,9 @@ class Model: def summarize_threshold(self, value): value = int(value) if value < 800 or value > 4000: - raise ValueError("Summarize threshold cannot be greater than 4000 or less than 800!") + raise ValueError( + "Summarize threshold cannot be greater than 4000 or less than 800!" + ) self._summarize_threshold = value @property @@ -292,9 +294,11 @@ class Model: Sends a summary request to the OpenAI API """ summary_request_text = [] - summary_request_text.append("The following is a conversation instruction set and a conversation" - " between two people named Human, and GPTie. Do not summarize the instructions for GPTie, only the conversation. Summarize the conversation in a detailed fashion. If Human mentioned their name, be sure to mention it in the summary. Pay close attention to things the Human has told you, such as personal details.") - summary_request_text.append(prompt+"\nDetailed summary of conversation: \n") + summary_request_text.append( + "The following is a conversation instruction set and a conversation" + " between two people named Human, and GPTie. Do not summarize the instructions for GPTie, only the conversation. Summarize the conversation in a detailed fashion. If Human mentioned their name, be sure to mention it in the summary. Pay close attention to things the Human has told you, such as personal details." + ) + summary_request_text.append(prompt + "\nDetailed summary of conversation: \n") summary_request_text = "".join(summary_request_text) @@ -327,7 +331,10 @@ class Model: frequency_penalty_override=None, presence_penalty_override=None, max_tokens_override=None, - ) -> (dict, bool): # The response, and a boolean indicating whether or not the context limit was reached. + ) -> ( + dict, + bool, + ): # The response, and a boolean indicating whether or not the context limit was reached. # Validate that all the parameters are in a good state before we send the request if len(prompt) < self.prompt_min_length: @@ -361,7 +368,7 @@ class Model: else frequency_penalty_override, best_of=self.best_of if not best_of_override else best_of_override, ) - #print(response.__dict__) + # print(response.__dict__) # Parse the total tokens used for this request and response pair from the response tokens_used = int(response["usage"]["total_tokens"]) @@ -378,7 +385,7 @@ class Model: + str(words) ) - #print("The prompt about to be sent is " + prompt) + # print("The prompt about to be sent is " + prompt) self.usage_service.update_usage_image(self.image_size) if not vary: