Format Python code with psf/black push

github-actions 2 years ago
parent 9896051a7b
commit 903c91497d

@ -1219,7 +1219,10 @@ class RedoButton(discord.ui.Button["RedoView"]):
)
await self.converser_cog.encapsulated_send(
user_id=user_id, prompt=prompt, ctx=ctx, response_message=response_message
user_id=user_id,
prompt=prompt,
ctx=ctx,
response_message=response_message,
)
else:
await interaction.response.send_message(

@ -215,7 +215,10 @@ class RedoButton(discord.ui.Button["OptimizeView"]):
"Redoing your original request...", ephemeral=True, delete_after=20
)
await self.converser_cog.encapsulated_send(
user_id=user_id, prompt=prompt, ctx=ctx, response_message=response_message
user_id=user_id,
prompt=prompt,
ctx=ctx,
response_message=response_message,
)
else:
await interaction.response.send_message(

@ -51,7 +51,7 @@ class Moderation:
return embed
@staticmethod
def determine_moderation_result(text,response):
def determine_moderation_result(text, response):
HATE_THRESHOLD = 0.005
HATE_VIOLENCE_THRESHOLD = 0.05
SELF_HARM_THRESHOLD = 0.05
@ -60,7 +60,11 @@ class Moderation:
VIOLENCE_THRESHOLD = 0.08
VIOLENCE_GRAPHIC_THRESHOLD = 0.1
extreme_hatred_qualifiers = ["i fucking hate", "fucking hate", "i fucking despise"]
extreme_hatred_qualifiers = [
"i fucking hate",
"fucking hate",
"i fucking despise",
]
thresholds = [
HATE_THRESHOLD,
@ -88,7 +92,9 @@ class Moderation:
# Iterate the category scores using the threshold_iterator and compare the values to thresholds
for category, threshold in zip(threshold_iterator, thresholds):
if category == "hate":
if "hate" in text.lower(): # The word "hate" makes the model oversensitive. This is a (bad) workaround.
if (
"hate" in text.lower()
): # The word "hate" makes the model oversensitive. This is a (bad) workaround.
threshold = 0.1
if any(word in text.lower() for word in extreme_hatred_qualifiers):
threshold = 0.6
@ -118,7 +124,9 @@ class Moderation:
response = await model.send_moderations_request(
to_moderate.message.content
)
moderation_result = Moderation.determine_moderation_result(to_moderate.message.content,response)
moderation_result = Moderation.determine_moderation_result(
to_moderate.message.content, response
)
if moderation_result:
# Take care of the flagged message

Loading…
Cancel
Save