diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index e7e9957..e029f17 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -693,14 +693,20 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # General catch case for everything except Exception: + message = "Something went wrong, please try again later. This may be due to upstream issues on the API, or rate limiting." + await ctx.send_followup(message) if from_context else await ctx.reply( message ) if user_id in self.awaiting_responses: self.awaiting_responses.remove(user_id) traceback.print_exc() - await self.end_conversation(ctx) + + try: + await self.end_conversation(ctx) + except: + pass return @add_to_group("gpt") diff --git a/models/openai_model.py b/models/openai_model.py index 6a6d850..29c57bb 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -42,7 +42,7 @@ class Model: ) self._frequency_penalty = 0 # Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.) self._best_of = 1 # Number of responses to compare the loglikelihoods of - self._prompt_min_length = 12 + self._prompt_min_length = 8 self._max_conversation_length = 100 self._model = Models.DAVINCI self._low_usage_mode = False @@ -307,6 +307,13 @@ class Model: ) self._prompt_min_length = value + async def valid_text_request(self, response): + try: + tokens_used = int(response["usage"]["total_tokens"]) + self.usage_service.update_usage(tokens_used) + except: + raise ValueError("The API returned an invalid response: " + str(response['error']['message'])) + async def send_summary_request(self, prompt): """ Sends a summary request to the OpenAI API @@ -322,9 +329,6 @@ class Model: tokens = self.usage_service.count_tokens(summary_request_text) - print("The summary request will use " + str(tokens) + " tokens.") - print(f"{self.max_tokens - tokens} is the remaining that we will use.") - async with aiohttp.ClientSession() as session: payload = { "model": Models.DAVINCI, @@ -345,10 +349,10 @@ class Model: ) as resp: response = await resp.json() + await self.valid_text_request(response) + print(response["choices"][0]["text"]) - tokens_used = int(response["usage"]["total_tokens"]) - self.usage_service.update_usage(tokens_used) return response async def send_request( @@ -369,7 +373,7 @@ class Model: # Validate that all the parameters are in a good state before we send the request if len(prompt) < self.prompt_min_length: raise ValueError( - "Prompt must be greater than 12 characters, it is currently " + "Prompt must be greater than 8 characters, it is currently " + str(len(prompt)) ) @@ -399,8 +403,7 @@ class Model: response = await resp.json() print(response) # Parse the total tokens used for this request and response pair from the response - tokens_used = int(response["usage"]["total_tokens"]) - self.usage_service.update_usage(tokens_used) + await self.valid_text_request(response) return response @@ -451,7 +454,6 @@ class Model: response = await resp.json() print(response) - print("JUST PRINTED THE RESPONSE") image_urls = [] for result in response["data"]: diff --git a/models/usage_service_model.py b/models/usage_service_model.py index e34d733..51cf64a 100644 --- a/models/usage_service_model.py +++ b/models/usage_service_model.py @@ -17,7 +17,6 @@ class UsageService: def update_usage(self, tokens_used): tokens_used = int(tokens_used) price = (tokens_used / 1000) * 0.02 - print("This request cost " + str(price) + " credits") usage = self.get_usage() print("The current usage is " + str(usage) + " credits") with self.usage_file_path.open("w") as f: