From 4621cd89bb775db0fbce8cd15e3ff2b76af32c69 Mon Sep 17 00:00:00 2001 From: Cooper Ry Lees Date: Fri, 30 Dec 2022 17:07:25 -0800 Subject: [PATCH 1/2] Add PyPI Uploading - Add a GitHub action to publish a source distribution (sdist) + wheel (binary package) to PyPI - PyPI will create the package on upload unlike Docker Hub This will need a PYPI_TOKEN secret added. Please go register @ pypi.org make a token please + add to GitHub secrets - Later on you could swap the token locked to this one module if you want to be security concious - (Please do this before merging) --- .github/workflows/pypi_upload.yml | 34 +++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .github/workflows/pypi_upload.yml diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml new file mode 100644 index 0000000..ed434bd --- /dev/null +++ b/.github/workflows/pypi_upload.yml @@ -0,0 +1,34 @@ +name: Publish to PyPI + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + main: + name: sdist + pure wheel + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Set up latest Python + uses: actions/setup-python@v4 + with: + python-version: "*" + + - name: Install latest pip, build, twine + run: | + python -m pip install --upgrade --disable-pip-version-check pip + python -m pip install --upgrade build twine + + - name: Build wheel and source distributions + run: python -m build + + - name: Upload to PyPI via Twine + env: + TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + run: twine upload --verbose -u '__token__' dist/* \ No newline at end of file From 2fc67fc13b5d85067e674ed0a88eecde095053eb Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Sat, 31 Dec 2022 10:48:38 +0000 Subject: [PATCH 2/2] Format Python code with psf/black push --- cogs/draw_image_generation.py | 34 +++-- cogs/gpt_3_commands_and_converser.py | 186 +++++++++++++++++---------- cogs/image_prompt_optimizer.py | 28 ++-- models/env_service_model.py | 26 ++-- models/openai_model.py | 2 +- 5 files changed, 179 insertions(+), 97 deletions(-) diff --git a/cogs/draw_image_generation.py b/cogs/draw_image_generation.py index f15d6f3..7d39c62 100644 --- a/cogs/draw_image_generation.py +++ b/cogs/draw_image_generation.py @@ -65,19 +65,21 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"): if not response_message: # Original generation case # Start an interaction with the user, we also want to send data embed=embed, file=file, view=SaveView(image_urls, self, self.converser_cog) - result_message = await ctx.channel.send( - embed=embed, - file=file, - ) if not from_context else await ctx.respond(embed=embed, file=file) + result_message = ( + await ctx.channel.send( + embed=embed, + file=file, + ) + if not from_context + else await ctx.respond(embed=embed, file=file) + ) await result_message.edit( view=SaveView(image_urls, self, self.converser_cog, result_message) ) self.converser_cog.users_to_interactions[user_id] = [] - self.converser_cog.users_to_interactions[user_id].append( - result_message.id - ) + self.converser_cog.users_to_interactions[user_id].append(result_message.id) # Get the actual result message object if from_context: @@ -128,8 +130,10 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"): result_message.id ) - @discord.slash_command(name="draw", description="Draw an image from a prompt", guild_ids=ALLOWED_GUILDS) - @discord.option(name = "prompt", description = "The prompt to draw from", required = True) + @discord.slash_command( + name="draw", description="Draw an image from a prompt", guild_ids=ALLOWED_GUILDS + ) + @discord.option(name="prompt", description="The prompt to draw from", required=True) async def draw(self, ctx: discord.ApplicationContext, prompt: str): await ctx.defer() @@ -151,7 +155,11 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"): await ctx.respond("Something went wrong. Please try again later.") await ctx.send_followup(e) - @discord.slash_command(name="local-size", description="Get the size of the dall-e images folder that we have on the current system", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="local-size", + description="Get the size of the dall-e images folder that we have on the current system", + guild_ids=ALLOWED_GUILDS, + ) @discord.guild_only() async def local_size(self, ctx: discord.ApplicationContext): await ctx.defer() @@ -171,7 +179,11 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"): total_size = total_size / 1000000 await ctx.respond(f"The size of the local images folder is {total_size} MB.") - @discord.slash_command(name="clear-local", description="Clear the local dalleimages folder on system.", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="clear-local", + description="Clear the local dalleimages folder on system.", + guild_ids=ALLOWED_GUILDS, + ) @discord.guild_only() async def clear_local(self, ctx): await ctx.defer() diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index cc5bfda..b36ea18 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -89,7 +89,6 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): return False return True - @commands.Cog.listener() async def on_member_remove(self, member): pass @@ -101,9 +100,14 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): ) print(f"The debug channel was acquired") - - @discord.slash_command(name="set-usage", description="Set the current OpenAI usage (in dollars)") - @discord.option(name="usage_amount", description="The current usage amount in dollars and cents (e.g 10.24)", type=float) + @discord.slash_command( + name="set-usage", description="Set the current OpenAI usage (in dollars)" + ) + @discord.option( + name="usage_amount", + description="The current usage amount in dollars and cents (e.g 10.24)", + type=float, + ) async def set_usage(self, ctx, usage_amount: float): await ctx.defer() @@ -119,7 +123,10 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): await ctx.respond("The usage value must be a valid float.") return - @discord.slash_command(name="delete-conversation-threads", description="Delete all conversation threads across the bot servers.") + @discord.slash_command( + name="delete-conversation-threads", + description="Delete all conversation threads across the bot servers.", + ) async def delete_all_conversation_threads(self, ctx): await ctx.defer() # If the user has ADMIN_ROLES @@ -184,10 +191,14 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): name="/chat-gpt", value="Start a conversation with GPT3", inline=False ) embed.add_field( - name="/end-chat", value="End a conversation with GPT3. You can also type `end` in the conversation.", inline=False + name="/end-chat", + value="End a conversation with GPT3. You can also type `end` in the conversation.", + inline=False, ) embed.add_field( - name="/settings", value="Print the current settings of the model", inline=False + name="/settings", + value="Print the current settings of the model", + inline=False, ) embed.add_field( name="/settings ", @@ -410,7 +421,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): ) self.conversating_users[after.author.id].count += 1 - await self.encapsulated_send(after.author.id, edited_content,ctx, response_message) + await self.encapsulated_send( + after.author.id, edited_content, ctx, response_message + ) self.redo_users[after.author.id].prompt = after.content @@ -423,7 +436,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): content = message.content.strip() - conversing = self.check_conversing(message.author.id, message.channel.id, content) + conversing = self.check_conversing( + message.author.id, message.channel.id, content + ) # If the user is conversing and they want to end it, end it immediately before we continue any further. if conversing and message.content.lower() in self.END_PROMPTS: @@ -433,7 +448,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # GPT3 command if conversing: # Extract all the text after the !g and use it as the prompt. - prompt = content # dead store but its okay :3 + prompt = content # dead store but its okay :3 await self.check_conversation_limit(message) @@ -480,7 +495,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): prompt if message.author.id not in self.conversating_users else "".join(self.conversating_users[message.author.id].history), - message + message, ) # ctx can be of type AppContext(interaction) or Message @@ -492,7 +507,10 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): tokens = self.usage_service.count_tokens(new_prompt) # Check if the prompt is about to go past the token limit - if user_id in self.conversating_users and tokens > self.model.summarize_threshold: + if ( + user_id in self.conversating_users + and tokens > self.model.summarize_threshold + ): # We don't need to worry about the differences between interactions and messages in this block, # because if we are in this block, we can only be using a message object for ctx @@ -506,14 +524,13 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # Check again if the prompt is about to go past the token limit new_prompt = ( - "".join(self.conversating_users[user_id].history) - + "\nGPTie: " + "".join(self.conversating_users[user_id].history) + "\nGPTie: " ) tokens = self.usage_service.count_tokens(new_prompt) if ( - tokens > self.model.summarize_threshold - 150 + tokens > self.model.summarize_threshold - 150 ): # 150 is a buffer for the second stage await ctx.reply( "I tried to summarize our current conversation so we could keep chatting, " @@ -524,9 +541,7 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): await self.end_conversation(ctx) return else: - await ctx.reply( - "The conversation context limit has been reached." - ) + await ctx.reply("The conversation context limit has been reached.") await self.end_conversation(ctx) return @@ -541,8 +556,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # If GPT3 tries to ping somebody, don't let it happen if re.search(r"<@!?\d+>|<@&\d+>|<#\d+>", str(response_text)): message = "I'm sorry, I can't mention users, roles, or channels." - await ctx.send_followup(message) if from_context else await ctx.reply(message) - + await ctx.send_followup(message) if from_context else await ctx.reply( + message + ) # If the user is conversing, add the GPT response to their conversation history. if user_id in self.conversating_users: @@ -553,44 +569,43 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # If we don't have a response message, we are not doing a redo, send as a new message(s) if not response_message: if len(response_text) > self.TEXT_CUTOFF: - await self.paginate_and_send( - response_text, ctx - ) + await self.paginate_and_send(response_text, ctx) else: - response_message = \ + response_message = ( await ctx.respond( - response_text, - view=RedoView(self, user_id), - ) if from_context \ - else \ - await ctx.reply( - response_text, - view=RedoView(self, user_id), + response_text, + view=RedoView(self, user_id), + ) + if from_context + else await ctx.reply( + response_text, + view=RedoView(self, user_id), + ) ) - # Get the actual message object of response_message in case it's an WebhookMessage - actual_response_message = response_message if not from_context else await ctx.fetch_message(response_message.id) + actual_response_message = ( + response_message + if not from_context + else await ctx.fetch_message(response_message.id) + ) self.redo_users[user_id] = RedoUser( prompt, ctx, ctx, actual_response_message ) - self.redo_users[user_id].add_interaction( - actual_response_message.id - ) + self.redo_users[user_id].add_interaction(actual_response_message.id) # We are doing a redo, edit the message. else: - await response_message.edit( - content=response_text - ) + await response_message.edit(content=response_text) - await self.send_debug_message(self.generate_debug_message(prompt, response), self.debug_channel) + await self.send_debug_message( + self.generate_debug_message(prompt, response), self.debug_channel + ) if user_id in self.awaiting_responses: self.awaiting_responses.remove(user_id) - # Error catching for OpenAI model value errors except ValueError as e: if from_context: @@ -601,15 +616,19 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # General catch case for everything except Exception: message = "Something went wrong, please try again later" - await ctx.send_followup(message) if from_context else await ctx.reply(message) + await ctx.send_followup(message) if from_context else await ctx.reply( + message + ) traceback.print_exc() await self.end_conversation(ctx) return - - - @discord.slash_command(name="g", description="Ask GPT3 something!", guild_ids=ALLOWED_GUILDS) - @discord.option(name="prompt", description="The prompt to send to GPT3", required=True) + @discord.slash_command( + name="g", description="Ask GPT3 something!", guild_ids=ALLOWED_GUILDS + ) + @discord.option( + name="prompt", description="The prompt to send to GPT3", required=True + ) @discord.guild_only() async def g(self, ctx: discord.ApplicationContext, prompt: str): await ctx.defer() @@ -625,14 +644,18 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): # If conversing, the prompt to send is the history, otherwise, it's just the prompt await self.encapsulated_send( - user.id, - prompt - if user.id not in self.conversating_users - else "".join(self.conversating_users[user.id].history), - ctx, - ) + user.id, + prompt + if user.id not in self.conversating_users + else "".join(self.conversating_users[user.id].history), + ctx, + ) - @discord.slash_command(name="chat-gpt", description="Have a conversation with GPT3", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="chat-gpt", + description="Have a conversation with GPT3", + guild_ids=ALLOWED_GUILDS, + ) @discord.guild_only() async def chat_gpt(self, ctx: discord.ApplicationContext): await ctx.defer() @@ -652,13 +675,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): self.conversating_users[user.id] = User(user.id) # Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later - self.conversating_users[user.id].history.append( - self.CONVERSATION_STARTER_TEXT - ) + self.conversating_users[user.id].history.append(self.CONVERSATION_STARTER_TEXT) - message_thread = await ctx.respond( - user.name + "'s conversation with GPT3" - ) + message_thread = await ctx.respond(user.name + "'s conversation with GPT3") # Get the actual message object for the message_thread message_thread_real = await ctx.fetch_message(message_thread.id) thread = await message_thread_real.create_thread( @@ -673,37 +692,66 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"): ) self.conversation_threads[user.id] = thread.id - @discord.slash_command(name="end-chat", description="End a conversation with GPT3", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="end-chat", + description="End a conversation with GPT3", + guild_ids=ALLOWED_GUILDS, + ) @discord.guild_only() async def end_chat(self, ctx: discord.ApplicationContext): await ctx.defer() - await ctx.respond("This has not been implemented yet. Please type `end` in your conversation thread to end the chat.") + await ctx.respond( + "This has not been implemented yet. Please type `end` in your conversation thread to end the chat." + ) - @discord.slash_command(name="help", description="Get help for GPT3Discord", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="help", description="Get help for GPT3Discord", guild_ids=ALLOWED_GUILDS + ) @discord.guild_only() async def help(self, ctx: discord.ApplicationContext): await ctx.defer() await self.send_help_text(ctx) - @discord.slash_command(name="usage", description="Get usage statistics for GPT3Discord", guild_ids=ALLOWED_GUILDS) + @discord.slash_command( + name="usage", + description="Get usage statistics for GPT3Discord", + guild_ids=ALLOWED_GUILDS, + ) @discord.guild_only() async def usage(self, ctx: discord.ApplicationContext): await ctx.defer() await self.send_usage_text(ctx) - @discord.slash_command(name="settings", description="Get settings for GPT3Discord", guild_ids=ALLOWED_GUILDS) - @discord.option(name="parameter", description="The setting to change", required=False) - @discord.option(name="value", description="The value to set the setting to", required=False) + @discord.slash_command( + name="settings", + description="Get settings for GPT3Discord", + guild_ids=ALLOWED_GUILDS, + ) + @discord.option( + name="parameter", description="The setting to change", required=False + ) + @discord.option( + name="value", description="The value to set the setting to", required=False + ) @discord.guild_only() - async def settings(self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None): + async def settings( + self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None + ): await ctx.defer() if parameter is None and value is None: await self.send_settings_text(ctx) return # If only one of the options are set, then this is invalid. - if parameter is None and value is not None or parameter is not None and value is None: - await ctx.respond("Invalid settings command. Please use `/settings ` to change a setting") + if ( + parameter is None + and value is not None + or parameter is not None + and value is None + ): + await ctx.respond( + "Invalid settings command. Please use `/settings ` to change a setting" + ) return # Otherwise, process the settings change diff --git a/cogs/image_prompt_optimizer.py b/cogs/image_prompt_optimizer.py index 677da8c..144b417 100644 --- a/cogs/image_prompt_optimizer.py +++ b/cogs/image_prompt_optimizer.py @@ -9,6 +9,7 @@ from models.user_model import RedoUser ALLOWED_GUILDS = EnvService.get_allowed_guilds() + class ImgPromptOptimizer(commands.Cog, name="ImgPromptOptimizer"): _OPTIMIZER_PRETEXT = "Optimize the following text for DALL-E image generation to have the most detailed and realistic image possible. Prompt:" @@ -45,8 +46,14 @@ class ImgPromptOptimizer(commands.Cog, name="ImgPromptOptimizer"): traceback.print_exc() self.OPTIMIZER_PRETEXT = self._OPTIMIZER_PRETEXT - @discord.slash_command(name="imgoptimize", description="Optimize a text prompt for DALL-E/MJ/SD image generation.", guild_ids=ALLOWED_GUILDS) - @discord.option(name="prompt", description="The text prompt to optimize.", required=True) + @discord.slash_command( + name="imgoptimize", + description="Optimize a text prompt for DALL-E/MJ/SD image generation.", + guild_ids=ALLOWED_GUILDS, + ) + @discord.option( + name="prompt", description="The text prompt to optimize.", required=True + ) @discord.guild_only() async def imgoptimize(self, ctx: discord.ApplicationContext, prompt: str): await ctx.defer() @@ -74,7 +81,7 @@ class ImgPromptOptimizer(commands.Cog, name="ImgPromptOptimizer"): temp_override=0.9, presence_penalty_override=0.5, best_of_override=1, - max_tokens_override=80 + max_tokens_override=80, ) # THIS USES MORE TOKENS THAN A NORMAL REQUEST! This will use roughly 4000 tokens, and will repeat the query @@ -84,7 +91,9 @@ class ImgPromptOptimizer(commands.Cog, name="ImgPromptOptimizer"): response_text = response["choices"][0]["text"] if re.search(r"<@!?\d+>|<@&\d+>|<#\d+>", response_text): - await ctx.respond("I'm sorry, I can't mention users, roles, or channels.") + await ctx.respond( + "I'm sorry, I can't mention users, roles, or channels." + ) return response_message = await ctx.respond( @@ -101,9 +110,7 @@ class ImgPromptOptimizer(commands.Cog, name="ImgPromptOptimizer"): self.converser_cog.redo_users[user.id] = RedoUser( final_prompt, ctx, ctx, response_message ) - self.converser_cog.redo_users[user.id].add_interaction( - response_message.id - ) + self.converser_cog.redo_users[user.id].add_interaction(response_message.id) await response_message.edit( view=OptimizeView( self.converser_cog, self.image_service_cog, self.deletion_queue @@ -176,7 +183,12 @@ class DrawButton(discord.ui.Button["OptimizeView"]): # Call the image service cog to draw the image await self.image_service_cog.encapsulated_send( - user_id, prompt, None, msg, True, True, + user_id, + prompt, + None, + msg, + True, + True, ) diff --git a/models/env_service_model.py b/models/env_service_model.py index 55ccde8..3536aa4 100644 --- a/models/env_service_model.py +++ b/models/env_service_model.py @@ -1,7 +1,9 @@ from dotenv import load_dotenv + load_dotenv() import os + class EnvService: # To be expanded upon later! def __init__(self): @@ -18,11 +20,15 @@ class EnvService: allowed_guilds = None if allowed_guilds is None: - raise ValueError("ALLOWED_GUILDS is not defined properly in the environment file!" - "Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file." - "For example a line should look like: `ALLOWED_GUILDS=\"971268468148166697\"`") + raise ValueError( + "ALLOWED_GUILDS is not defined properly in the environment file!" + "Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file." + 'For example a line should look like: `ALLOWED_GUILDS="971268468148166697"`' + ) - allowed_guilds = allowed_guilds.split(",") if "," in allowed_guilds else [allowed_guilds] + allowed_guilds = ( + allowed_guilds.split(",") if "," in allowed_guilds else [allowed_guilds] + ) allowed_guilds = [int(guild) for guild in allowed_guilds] return allowed_guilds @@ -37,9 +43,13 @@ class EnvService: allowed_roles = None if allowed_roles is None: - raise ValueError("ALLOWED_ROLES is not defined properly in the environment file!" - "Please copy your server's role and put it into ALLOWED_ROLES in the .env file." - "For example a line should look like: `ALLOWED_ROLES=\"Admin\"`") + raise ValueError( + "ALLOWED_ROLES is not defined properly in the environment file!" + "Please copy your server's role and put it into ALLOWED_ROLES in the .env file." + 'For example a line should look like: `ALLOWED_ROLES="Admin"`' + ) - allowed_roles = allowed_roles.split(",") if "," in allowed_roles else [allowed_roles] + allowed_roles = ( + allowed_roles.split(",") if "," in allowed_roles else [allowed_roles] + ) return allowed_roles diff --git a/models/openai_model.py b/models/openai_model.py index 1565cc2..99ac0f6 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -69,7 +69,7 @@ class Model: "custom_web_root", "_hidden_attributes", "model_max_tokens", - "openai_key" + "openai_key", ] self.openai_key = os.getenv("OPENAI_TOKEN")