diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index 3a1f914..627161e 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -1219,7 +1219,10 @@ class RedoButton(discord.ui.Button["RedoView"]): ) await self.converser_cog.encapsulated_send( - user_id=user_id, prompt=prompt, ctx=ctx, response_message=response_message + user_id=user_id, + prompt=prompt, + ctx=ctx, + response_message=response_message, ) else: await interaction.response.send_message( diff --git a/cogs/image_prompt_optimizer.py b/cogs/image_prompt_optimizer.py index b972c82..a12db52 100644 --- a/cogs/image_prompt_optimizer.py +++ b/cogs/image_prompt_optimizer.py @@ -215,7 +215,10 @@ class RedoButton(discord.ui.Button["OptimizeView"]): "Redoing your original request...", ephemeral=True, delete_after=20 ) await self.converser_cog.encapsulated_send( - user_id=user_id, prompt=prompt, ctx=ctx, response_message=response_message + user_id=user_id, + prompt=prompt, + ctx=ctx, + response_message=response_message, ) else: await interaction.response.send_message( diff --git a/models/moderations_service_model.py b/models/moderations_service_model.py index d5b5335..2565168 100644 --- a/models/moderations_service_model.py +++ b/models/moderations_service_model.py @@ -51,7 +51,7 @@ class Moderation: return embed @staticmethod - def determine_moderation_result(text,response): + def determine_moderation_result(text, response): HATE_THRESHOLD = 0.005 HATE_VIOLENCE_THRESHOLD = 0.05 SELF_HARM_THRESHOLD = 0.05 @@ -60,7 +60,11 @@ class Moderation: VIOLENCE_THRESHOLD = 0.08 VIOLENCE_GRAPHIC_THRESHOLD = 0.1 - extreme_hatred_qualifiers = ["i fucking hate", "fucking hate", "i fucking despise"] + extreme_hatred_qualifiers = [ + "i fucking hate", + "fucking hate", + "i fucking despise", + ] thresholds = [ HATE_THRESHOLD, @@ -88,7 +92,9 @@ class Moderation: # Iterate the category scores using the threshold_iterator and compare the values to thresholds for category, threshold in zip(threshold_iterator, thresholds): if category == "hate": - if "hate" in text.lower(): # The word "hate" makes the model oversensitive. This is a (bad) workaround. + if ( + "hate" in text.lower() + ): # The word "hate" makes the model oversensitive. This is a (bad) workaround. threshold = 0.1 if any(word in text.lower() for word in extreme_hatred_qualifiers): threshold = 0.6 @@ -118,7 +124,9 @@ class Moderation: response = await model.send_moderations_request( to_moderate.message.content ) - moderation_result = Moderation.determine_moderation_result(to_moderate.message.content,response) + moderation_result = Moderation.determine_moderation_result( + to_moderate.message.content, response + ) if moderation_result: # Take care of the flagged message