From 96c69c417f5bf77b9e4e8412e8cac2ed6248fd21 Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Tue, 20 Dec 2022 07:01:14 -0500 Subject: [PATCH] fix bugs --- cogs/gpt_3_commands_and_converser.py | 8 +++++--- models/openai_model.py | 7 ++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index bbc7238..e7f0c40 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -377,13 +377,13 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # Append a newline, and GPTie: to the prompt new_prompt = prompt + "\nGPTie: " + tokens = self.usage_service.count_tokens(new_prompt) # Send the request to the model try: # Pre-conversation token check. if message.author.id in self.conversating_users: # Check if the prompt is about to go past the token limit - tokens = self.usage_service.count_tokens(new_prompt) if tokens > self.model.summarize_threshold: # 250 is a buffer if self.model.summarize_conversations: @@ -420,7 +420,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): await self.end_conversation(message) return - response = self.model.send_request(new_prompt, message) + response = self.model.send_request(new_prompt, message, tokens=tokens) response_text = response["choices"][0]["text"] @@ -648,8 +648,10 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): self.conversating_users[message.author.id].count += 1 # Send the request to the model + # If conversing, the prompt to send is the history, otherwise, it's just the prompt + await self.encapsulated_send( - message, "".join(self.conversating_users[message.author.id].history) + message, prompt if message.author.id not in self.conversating_users else "".join(self.conversating_users[message.author.id].history) ) diff --git a/models/openai_model.py b/models/openai_model.py index f5f2cce..fa9898d 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -325,6 +325,7 @@ class Model: self, prompt, message, + tokens, temp_override=None, top_p_override=None, best_of_override=None, @@ -345,10 +346,6 @@ class Model: print("The prompt about to be sent is " + prompt) - # TODO TO REMOVE - prompt_tokens = self.usage_service.count_tokens(prompt) - print(f"The prompt tokens will be {prompt_tokens}") - print(f"The total max tokens will then be {self.max_tokens - prompt_tokens}") response = openai.Completion.create( model=Models.DAVINCI @@ -357,7 +354,7 @@ class Model: prompt=prompt, temperature=self.temp if not temp_override else temp_override, top_p=self.top_p if not top_p_override else top_p_override, - max_tokens=self.max_tokens - prompt_tokens + max_tokens=self.max_tokens - tokens if not max_tokens_override else max_tokens_override, presence_penalty=self.presence_penalty