Kaveen Kumarasinghe 2 years ago
commit 4f8d4afd6b

@ -360,14 +360,19 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
new_conversation_history = [] new_conversation_history = []
new_conversation_history.append(self.CONVERSATION_STARTER_TEXT) new_conversation_history.append(self.CONVERSATION_STARTER_TEXT)
new_conversation_history.append("\nThis conversation has some context from earlier, which has been summarized as follows: ") new_conversation_history.append(
"\nThis conversation has some context from earlier, which has been summarized as follows: "
)
new_conversation_history.append(summarized_text) new_conversation_history.append(summarized_text)
new_conversation_history.append("\nContinue the conversation, paying very close attention to things Human told you, such as their name, and personal details.\n") new_conversation_history.append(
"\nContinue the conversation, paying very close attention to things Human told you, such as their name, and personal details.\n"
)
# Get the last entry from the user's conversation history # Get the last entry from the user's conversation history
new_conversation_history.append(self.conversating_users[message.author.id].history[-1]+"\n") new_conversation_history.append(
self.conversating_users[message.author.id].history[-1] + "\n"
)
self.conversating_users[message.author.id].history = new_conversation_history self.conversating_users[message.author.id].history = new_conversation_history
async def encapsulated_send(self, message, prompt, response_message=None): async def encapsulated_send(self, message, prompt, response_message=None):
# Append a newline, and GPTie: to the prompt # Append a newline, and GPTie: to the prompt
@ -384,30 +389,39 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
if self.model.summarize_conversations: if self.model.summarize_conversations:
await message.reply( await message.reply(
"I'm currently summarizing our current conversation so we can keep chatting, " "I'm currently summarizing our current conversation so we can keep chatting, "
"give me one moment!") "give me one moment!"
)
self.summarize_conversation(message, new_prompt) self.summarize_conversation(message, new_prompt)
# Check again if the prompt is about to go past the token limit # Check again if the prompt is about to go past the token limit
new_prompt = "".join(self.conversating_users[message.author.id].history) + "\nGPTie: " new_prompt = (
"".join(self.conversating_users[message.author.id].history)
+ "\nGPTie: "
)
tokens = self.usage_service.count_tokens(new_prompt) tokens = self.usage_service.count_tokens(new_prompt)
if tokens > self.model.summarize_threshold - 150: # 150 is a buffer for the second stage if (
await message.reply("I tried to summarize our current conversation so we could keep chatting, " tokens > self.model.summarize_threshold - 150
): # 150 is a buffer for the second stage
await message.reply(
"I tried to summarize our current conversation so we could keep chatting, "
"but it still went over the token " "but it still went over the token "
"limit. Please try again later.") "limit. Please try again later."
)
await self.end_conversation(message) await self.end_conversation(message)
return return
else: else:
await message.reply("The conversation context limit has been reached.") await message.reply(
"The conversation context limit has been reached."
)
await self.end_conversation(message) await self.end_conversation(message)
return return
response = self.model.send_request(new_prompt, message) response = self.model.send_request(new_prompt, message)
response_text = response["choices"][0]["text"] response_text = response["choices"][0]["text"]
if re.search(r"<@!?\d+>|<@&\d+>|<#\d+>", response_text): if re.search(r"<@!?\d+>|<@&\d+>|<#\d+>", response_text):
@ -444,8 +458,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
original_message[message.author.id] = message.id original_message[message.author.id] = message.id
else: else:
# We have response_text available, this is the original message that we want to edit # We have response_text available, this is the original message that we want to edit
await response_message.edit(content=response_text.replace("<|endofstatement|>", "")) await response_message.edit(
content=response_text.replace("<|endofstatement|>", "")
)
# After each response, check if the user has reached the conversation limit in terms of messages or time. # After each response, check if the user has reached the conversation limit in terms of messages or time.
await self.check_conversation_limit(message) await self.check_conversation_limit(message)
@ -484,17 +499,18 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
if after.author.id in self.conversating_users: if after.author.id in self.conversating_users:
# Remove the last two elements from the history array and add the new Human: prompt # Remove the last two elements from the history array and add the new Human: prompt
self.conversating_users[after.author.id].history = self.conversating_users[after.author.id].history[ self.conversating_users[
:-2] after.author.id
].history = self.conversating_users[after.author.id].history[:-2]
self.conversating_users[after.author.id].history.append( self.conversating_users[after.author.id].history.append(
f"\nHuman: {after.content}<|endofstatement|>\n") f"\nHuman: {after.content}<|endofstatement|>\n"
edited_content = "".join(self.conversating_users[after.author.id].history) )
edited_content = "".join(
self.conversating_users[after.author.id].history
)
self.conversating_users[after.author.id].count += 1 self.conversating_users[after.author.id].count += 1
await self.encapsulated_send( await self.encapsulated_send(message, edited_content, response_message)
message,
edited_content, response_message
)
redo_users[after.author.id].prompt = after.content redo_users[after.author.id].prompt = after.content
@ -581,9 +597,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
# If the user is not already conversating, start a conversation with GPT3 # If the user is not already conversating, start a conversation with GPT3
self.conversating_users[message.author.id] = User(message.author.id) self.conversating_users[message.author.id] = User(message.author.id)
# Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later # Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later
self.conversating_users[ self.conversating_users[message.author.id].history.append(
message.author.id self.CONVERSATION_STARTER_TEXT
].history.append(self.CONVERSATION_STARTER_TEXT) )
# Create a new discord thread, and then send the conversation starting message inside of that thread # Create a new discord thread, and then send the conversation starting message inside of that thread
if not ("nothread" in prompt): if not ("nothread" in prompt):
@ -625,16 +641,16 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
# we can append their history to the prompt. # we can append their history to the prompt.
if message.author.id in self.conversating_users: if message.author.id in self.conversating_users:
self.conversating_users[message.author.id].history.append( self.conversating_users[message.author.id].history.append(
"\nHuman: " "\nHuman: " + prompt + "<|endofstatement|>\n"
+ prompt
+ "<|endofstatement|>\n"
) )
# increment the conversation counter for the user # increment the conversation counter for the user
self.conversating_users[message.author.id].count += 1 self.conversating_users[message.author.id].count += 1
# Send the request to the model # Send the request to the model
await self.encapsulated_send(message, "".join(self.conversating_users[message.author.id].history)) await self.encapsulated_send(
message, "".join(self.conversating_users[message.author.id].history)
)
class RedoView(discord.ui.View): class RedoView(discord.ui.View):

@ -82,7 +82,9 @@ class Model:
def summarize_threshold(self, value): def summarize_threshold(self, value):
value = int(value) value = int(value)
if value < 800 or value > 4000: if value < 800 or value > 4000:
raise ValueError("Summarize threshold cannot be greater than 4000 or less than 800!") raise ValueError(
"Summarize threshold cannot be greater than 4000 or less than 800!"
)
self._summarize_threshold = value self._summarize_threshold = value
@property @property
@ -292,8 +294,10 @@ class Model:
Sends a summary request to the OpenAI API Sends a summary request to the OpenAI API
""" """
summary_request_text = [] summary_request_text = []
summary_request_text.append("The following is a conversation instruction set and a conversation" summary_request_text.append(
" between two people named Human, and GPTie. Do not summarize the instructions for GPTie, only the conversation. Summarize the conversation in a detailed fashion. If Human mentioned their name, be sure to mention it in the summary. Pay close attention to things the Human has told you, such as personal details.") "The following is a conversation instruction set and a conversation"
" between two people named Human, and GPTie. Do not summarize the instructions for GPTie, only the conversation. Summarize the conversation in a detailed fashion. If Human mentioned their name, be sure to mention it in the summary. Pay close attention to things the Human has told you, such as personal details."
)
summary_request_text.append(prompt + "\nDetailed summary of conversation: \n") summary_request_text.append(prompt + "\nDetailed summary of conversation: \n")
summary_request_text = "".join(summary_request_text) summary_request_text = "".join(summary_request_text)
@ -327,7 +331,10 @@ class Model:
frequency_penalty_override=None, frequency_penalty_override=None,
presence_penalty_override=None, presence_penalty_override=None,
max_tokens_override=None, max_tokens_override=None,
) -> (dict, bool): # The response, and a boolean indicating whether or not the context limit was reached. ) -> (
dict,
bool,
): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request # Validate that all the parameters are in a good state before we send the request
if len(prompt) < self.prompt_min_length: if len(prompt) < self.prompt_min_length:

Loading…
Cancel
Save