diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index e029f17..e0a25ca 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -4,6 +4,7 @@ import re import traceback from pathlib import Path +import aiofiles import discord from pycord.multicog import add_to_group @@ -14,22 +15,21 @@ from models.user_model import User, RedoUser from models.check_model import Check from collections import defaultdict - original_message = {} ALLOWED_GUILDS = EnvService.get_allowed_guilds() class GPT3ComCon(discord.Cog, name="GPT3ComCon"): def __init__( - self, - bot, - usage_service, - model, - message_queue, - deletion_queue, - DEBUG_GUILD, - DEBUG_CHANNEL, - data_path: Path, + self, + bot, + usage_service, + model, + message_queue, + deletion_queue, + DEBUG_GUILD, + DEBUG_CHANNEL, + data_path: Path, ): super().__init__() self.data_path = data_path @@ -66,12 +66,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): assert self.CONVERSATION_STARTER_TEXT is not None conversation_file_path_minimal = ( - data_path / "conversation_starter_pretext_minimal.txt" + data_path / "conversation_starter_pretext_minimal.txt" ) with conversation_file_path_minimal.open("r") as f: self.CONVERSATION_STARTER_TEXT_MINIMAL = f.read() print( - f"Conversation starter text loaded from {conversation_file_path_minimal }." + f"Conversation starter text loaded from {conversation_file_path_minimal}." ) assert self.CONVERSATION_STARTER_TEXT_MINIMAL is not None @@ -113,6 +113,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): checks=[Check.check_admin_roles()], ) + async def load_file(self, file, ctx): + try: + async with aiofiles.open(file, "r") as f: + return await f.read() + except Exception as e: + traceback.print_exc() + await ctx.respond( + "Error loading file. Please check that it is correctly placed in the bot's root file directory.") + raise e + @discord.Cog.listener() async def on_member_join(self, member): if self.model.welcome_message_enabled: @@ -175,7 +185,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # Attempt to convert the input usage value into a float try: usage = float(usage_amount) - self.usage_service.set_usage(usage) + await self.usage_service.set_usage(usage) await ctx.respond(f"Set the usage to {usage}") except: await ctx.respond("The usage value must be a valid float.") @@ -199,9 +209,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): def check_conversing(self, user_id, channel_id, message_content): cond1 = ( - user_id in self.conversating_users - and user_id in self.conversation_threads - and channel_id == self.conversation_threads[user_id] + user_id in self.conversating_users + and user_id in self.conversation_threads + and channel_id == self.conversation_threads[user_id] ) # If the trimmed message starts with a Tilde, then we want to not contribute this to the conversation try: @@ -241,35 +251,35 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): title="GPT3Bot Help", description="The current commands", color=0xC730C7 ) embed.add_field( - name="/ask", + name="/gpt ask", value="Ask GPT3 something. Be clear, long, and concise in your prompt. Don't waste tokens.", inline=False, ) embed.add_field( - name="/converse", value="Start a conversation with GPT3", inline=False + name="/gpt converse", value="Start a conversation with GPT3", inline=False ) embed.add_field( - name="/end-chat", + name="/gpt end-chat", value="End a conversation with GPT3. You can also type `end` in the conversation.", inline=False, ) embed.add_field( - name="/settings", + name="/system settings", value="Print the current settings of the model", inline=False, ) embed.add_field( - name="/settings ", + name="/system settings ", value="Change the parameter of the model named by to new value ", inline=False, ) embed.add_field( - name="/draw ", + name="/dalle draw ", value="Use DALL-E2 to draw an image based on a text prompt", inline=False, ) embed.add_field( - name="/optimize ", + name="/dalle optimize ", value="Optimize an image prompt for use with DALL-E2, Midjourney, SD, etc.", inline=False, ) @@ -284,12 +294,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # 1000 tokens costs 0.02 USD, so we can calculate the total tokens used from the price that we have stored embed.add_field( name="Total tokens used", - value=str(int((self.usage_service.get_usage() / 0.02)) * 1000), + value=str(int((await self.usage_service.get_usage() / 0.02)) * 1000), inline=False, ) embed.add_field( name="Total price", - value="$" + str(round(self.usage_service.get_usage(), 2)), + value="$" + str(round(await self.usage_service.get_usage(), 2)), inline=False, ) await ctx.respond(embed=embed) @@ -361,7 +371,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): from_context = isinstance(ctx, discord.ApplicationContext) response_text = [ - response_text[i : i + self.TEXT_CUTOFF] + response_text[i: i + self.TEXT_CUTOFF] for i in range(0, len(response_text), self.TEXT_CUTOFF) ] # Send each chunk as a message @@ -384,7 +394,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): async def queue_debug_chunks(self, debug_message, debug_channel): debug_message_chunks = [ - debug_message[i : i + self.TEXT_CUTOFF] + debug_message[i: i + self.TEXT_CUTOFF] for i in range(0, len(debug_message), self.TEXT_CUTOFF) ] @@ -426,8 +436,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): if message.author.id in self.conversating_users: # If the user has reached the max conversation length, end the conversation if ( - self.conversating_users[message.author.id].count - >= self.model.max_conversation_length + self.conversating_users[message.author.id].count + >= self.model.max_conversation_length ): await message.reply( "You have reached the maximum conversation length. You have ended the conversation with GPT3, and it has ended." @@ -558,7 +568,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # ctx can be of type AppContext(interaction) or Message async def encapsulated_send( - self, user_id, prompt, ctx, response_message=None, from_g_command=False + self, user_id, prompt, ctx, response_message=None, from_g_command=False ): new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt @@ -580,9 +590,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # Check if the prompt is about to go past the token limit if ( - user_id in self.conversating_users - and tokens > self.model.summarize_threshold - and not from_g_command + user_id in self.conversating_users + and tokens > self.model.summarize_threshold + and not from_g_command ): # We don't need to worry about the differences between interactions and messages in this block, @@ -597,13 +607,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # Check again if the prompt is about to go past the token limit new_prompt = ( - "".join(self.conversating_users[user_id].history) + "\nGPTie: " + "".join(self.conversating_users[user_id].history) + "\nGPTie: " ) tokens = self.usage_service.count_tokens(new_prompt) if ( - tokens > self.model.summarize_threshold - 150 + tokens > self.model.summarize_threshold - 150 ): # 150 is a buffer for the second stage await ctx.reply( "I tried to summarize our current conversation so we could keep chatting, " @@ -754,7 +764,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): ) @discord.guild_only() async def converse( - self, ctx: discord.ApplicationContext, opener: str, private, minimal + self, ctx: discord.ApplicationContext, opener: str, private, minimal ): if private: await ctx.defer(ephemeral=True) @@ -774,6 +784,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): user_id_normalized = user.id else: user_id_normalized = ctx.author.id + # Pre-check for opener, check if they provided a valid file if it is indeed a file. + # If the opener ends in .txt, its a file and we want to load it + if opener.endswith(".txt"): + # Load the file and read it into opener + opener = await self.load_file(opener, ctx) + if not opener: return self.conversating_users[user_id_normalized] = User(user_id_normalized) @@ -812,7 +828,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): if opener: thread_message = await thread.send( "***Opening prompt*** \n" - "<@" + str(user_id_normalized) + ">: " + opener + + opener ) if user_id_normalized in self.conversating_users: self.awaiting_responses.append(user_id_normalized) @@ -900,7 +916,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): ) @discord.guild_only() async def settings( - self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None + self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None ): await ctx.defer() if parameter is None and value is None: @@ -909,10 +925,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): # If only one of the options are set, then this is invalid. if ( - parameter is None - and value is not None - or parameter is not None - and value is None + parameter is None + and value is not None + or parameter is not None + and value is None ): await ctx.respond( "Invalid settings command. Please use `/settings ` to change a setting" diff --git a/gpt3discord.py b/gpt3discord.py index afac153..aa0a21e 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -49,8 +49,6 @@ model = Model(usage_service) """ An encapsulating wrapper for the discord.py client. This uses the old re-write without cogs, but it gets the job done! """ - - @bot.event # Using self gives u async def on_ready(): # I can make self optional by print("We have logged in as {0.user}".format(bot)) diff --git a/models/openai_model.py b/models/openai_model.py index 70d4f5f..71ffa5a 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -310,7 +310,7 @@ class Model: async def valid_text_request(self, response): try: tokens_used = int(response["usage"]["total_tokens"]) - self.usage_service.update_usage(tokens_used) + await self.usage_service.update_usage(tokens_used) except: raise ValueError( "The API returned an invalid response: " @@ -420,7 +420,7 @@ class Model: ) # print("The prompt about to be sent is " + prompt) - self.usage_service.update_usage_image(self.image_size) + await self.usage_service.update_usage_image(self.image_size) response = None diff --git a/models/usage_service_model.py b/models/usage_service_model.py index 51cf64a..4bf7e4e 100644 --- a/models/usage_service_model.py +++ b/models/usage_service_model.py @@ -1,6 +1,7 @@ import os from pathlib import Path +import aiofiles from transformers import GPT2TokenizerFast @@ -14,31 +15,32 @@ class UsageService: f.close() self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") - def update_usage(self, tokens_used): + async def update_usage(self, tokens_used): tokens_used = int(tokens_used) price = (tokens_used / 1000) * 0.02 - usage = self.get_usage() + usage = await self.get_usage() print("The current usage is " + str(usage) + " credits") - with self.usage_file_path.open("w") as f: - f.write(str(usage + float(price))) - f.close() - - def set_usage(self, usage): - with self.usage_file_path.open("w") as f: - f.write(str(usage)) - f.close() - - def get_usage(self): - with self.usage_file_path.open("r") as f: - usage = float(f.read().strip()) - f.close() + # Do the same as above but with aiofiles + async with aiofiles.open(self.usage_file_path, "w") as f: + await f.write(str(usage + float(price))) + await f.close() + + async def set_usage(self, usage): + async with aiofiles.open(self.usage_file_path, "w") as f: + await f.write(str(usage)) + await f.close() + + async def get_usage(self): + async with aiofiles.open(self.usage_file_path, "r") as f: + usage = float((await f.read()).strip()) + await f.close() return usage def count_tokens(self, input): res = self.tokenizer(input)["input_ids"] return len(res) - def update_usage_image(self, image_size): + async def update_usage_image(self, image_size): # 1024×1024 $0.020 / image # 512×512 $0.018 / image # 256×256 $0.016 / image @@ -52,8 +54,8 @@ class UsageService: else: raise ValueError("Invalid image size") - usage = self.get_usage() + usage = await self.get_usage() - with self.usage_file_path.open("w") as f: - f.write(str(usage + float(price))) - f.close() + async with aiofiles.open(self.usage_file_path, "w") as f: + await f.write(str(usage + float(price))) + await f.close() diff --git a/pyproject.toml b/pyproject.toml index 6aa5cb1..80bf75b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,8 @@ dependencies = [ "python-dotenv", "requests", "transformers", - "pycord-multicog" + "pycord-multicog", + "aiofiles" ] dynamic = ["version"] [project.scripts] diff --git a/requirements.txt b/requirements.txt index 808b97a..19a6eb8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ python-dotenv==0.21.0 requests==2.28.1 transformers==4.25.1 pycord-multicog==1.0.2 +aiofiles==22.1.0 \ No newline at end of file