hot fix for docker

Kaveen Kumarasinghe 1 year ago
commit 0b71478e56

@ -96,6 +96,16 @@ class Commands(discord.Cog, name="Commands"):
):
await self.converser_cog.settings_command(ctx, parameter, value)
@add_to_group("system")
@discord.slash_command(
name="settings-reset",
description="Reset all settings for GPT3Discord",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def settings_reset(self, ctx: discord.ApplicationContext):
await self.converser_cog.settings_reset_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="local-size",
@ -296,7 +306,7 @@ class Commands(discord.Cog, name="Commands"):
description="Higher values means the model will take more risks",
required=False,
min_value=0,
max_value=1,
max_value=2,
)
@discord.option(
name="top_p",
@ -356,7 +366,7 @@ class Commands(discord.Cog, name="Commands"):
required=False,
input_type=float,
min_value=0,
max_value=1,
max_value=2,
)
@discord.option(
name="top_p",
@ -623,21 +633,29 @@ class Commands(discord.Cog, name="Commands"):
"Translations are disabled on this server.", ephemeral=True
)
# @discord.message_command(
# name="Paraphrase",
# guild_ids=ALLOWED_GUILDS,
# checks=[Check.check_gpt_roles()],
# )
# async def paraphrase_action(self, ctx, message: discord.Message):
# await self.converser_cog.paraphrase_action(ctx, message)
@discord.message_command(
name="Paraphrase",
name="Elaborate",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def paraphrase_action(self, ctx, message: discord.Message):
await self.converser_cog.paraphrase_action(ctx, message)
async def elaborate_action(self, ctx, message: discord.Message):
await self.converser_cog.elaborate_action(ctx, message)
@discord.message_command(
name="Elaborate",
name="Summarize",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def elaborate_action(self, ctx, message: discord.Message):
await self.converser_cog.elaborate_action(ctx, message)
async def summarize_action(self, ctx, message: discord.Message):
await self.converser_cog.summarize_action(ctx, message)
# Search slash commands
@discord.slash_command(

@ -11,6 +11,7 @@ import json
import discord
from models.embed_statics_model import EmbedStatics
from models.openai_model import Override
from services.environment_service import EnvService
from services.message_queue_service import Message
from services.moderations_service import Moderation
@ -696,7 +697,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
top_p: float,
frequency_penalty: float,
presence_penalty: float,
from_action=None,
from_ask_action=None,
from_other_action=None,
):
"""Command handler. Requests and returns a generation with no extras to the completion endpoint
@ -720,18 +722,18 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.defer()
overrides = Override(temperature, top_p, frequency_penalty, presence_penalty)
await TextService.encapsulated_send(
self,
user.id,
prompt,
ctx,
temp_override=temperature,
top_p_override=top_p,
frequency_penalty_override=frequency_penalty,
presence_penalty_override=presence_penalty,
overrides=overrides,
from_ask_command=True,
custom_api_key=user_api_key,
from_action=from_action,
from_ask_action=from_ask_action,
from_other_action=from_other_action,
)
async def edit_command(
@ -766,13 +768,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.defer()
overrides = Override(temperature, top_p, 0, 0)
await TextService.encapsulated_send(
self,
user.id,
prompt=text,
ctx=ctx,
temp_override=temperature,
top_p_override=top_p,
overrides=overrides,
instruction=instruction,
from_edit_command=True,
codex=codex,
@ -963,6 +966,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self.conversation_threads[thread.id].count += 1
overrides = Override(
overrides["temperature"],
overrides["top_p"],
overrides["frequency_penalty"],
overrides["presence_penalty"],
)
await TextService.encapsulated_send(
self,
thread.id,
@ -972,10 +982,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
[item.text for item in self.conversation_threads[thread.id].history]
),
thread_message,
temp_override=overrides["temperature"],
top_p_override=overrides["top_p"],
frequency_penalty_override=overrides["frequency_penalty"],
presence_penalty_override=overrides["presence_penalty"],
overrides=overrides,
user=user,
model=self.conversation_threads[thread.id].model,
custom_api_key=user_api_key,
@ -1043,6 +1050,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Otherwise, process the settings change
await self.process_settings(ctx, parameter, value)
async def settings_reset_command(self, ctx: discord.ApplicationContext):
"""Command handler. Resets all settings to default"""
await ctx.defer()
self.model.reset_settings()
await ctx.respond("Settings reset to default")
#
# Text-based context menu commands from here
#
@ -1050,24 +1063,74 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
async def ask_gpt_action(self, ctx, message: discord.Message):
"""Message command. Return the message"""
prompt = await self.mention_to_username(ctx, message.content)
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
await self.ask_command(
ctx, prompt, None, None, None, None, from_ask_action=prompt
)
async def paraphrase_action(self, ctx, message: discord.Message):
"""Message command. paraphrase the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt + "\nParaphrased:"
# Construct the paraphrase prompt
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \n\nParaphrased:"
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \nParaphrased:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 1000:
await ctx.respond(
f"This message is too long to paraphrase.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
await self.ask_command(
ctx, prompt, None, None, None, None, from_other_action=from_other_action
)
async def elaborate_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt + "\nElaboration:"
# Construct the paraphrase prompt
prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 500:
await ctx.respond(
f"This message is too long to elaborate on.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(
ctx, prompt, None, None, None, None, from_other_action=from_other_action
)
async def summarize_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = (
"Message at message link: " + message.jump_url + "\nSummarized:"
)
# Construct the paraphrase prompt
prompt = f"Elaborate upon the subject of the following message: {prompt} \n\nElaboration:"
prompt = f"Summarize the following message, be as short and concise as possible: {prompt} \n\nSummary:"
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 300:
await ctx.respond(
f"Your prompt is too long. It has {tokens} tokens, but the maximum is {self.model.max_tokens-300}.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(
ctx, prompt, None, None, None, None, from_other_action=from_other_action
)

@ -30,7 +30,7 @@ from services.environment_service import EnvService
from models.openai_model import Model
__version__ = "8.7.6"
__version__ = "9.0.2"
PID_FILE = Path("bot.pid")
@ -237,7 +237,9 @@ def init():
signal.signal(signal.SIGTERM, cleanup_pid_file)
if check_process_file(PID_FILE):
print("Process ID file already exists")
print(
"Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid"
)
sys.exit(1)
else:
with PID_FILE.open("w") as f:

@ -15,6 +15,15 @@ import discord
import requests
from PIL import Image
from discord import File
from sqlitedict import SqliteDict
try:
print("Attempting to retrieve the settings DB")
SETTINGS_DB = SqliteDict("main_db.sqlite", tablename="settings", autocommit=True)
print("Retrieved the settings DB")
except Exception as e:
print("Failed to retrieve the settings DB. The bot is terminating.")
raise e
class Mode:
@ -24,6 +33,14 @@ class Mode:
ALL_MODES = [TEMPERATURE, TOP_P]
class Override:
def __init__(self, temp=None, top_p=None, frequency=None, presence=None):
self.temperature = temp
self.top_p = top_p
self.frequency_penalty = frequency
self.presence_penalty = presence
class Models:
# Text models
DAVINCI = "text-davinci-003"
@ -106,36 +123,136 @@ class ModelLimits:
MIN_BEST_OF = 1
MAX_BEST_OF = 3
MIN_PROMPT_MIN_LENGTH = 10
MAX_PROMPT_MIN_LENGTH = 4096
MIN_PROMPT_MIN_LENGTH = 5
MAX_PROMPT_MIN_LENGTH = 4000
class Model:
def __init__(self, usage_service):
self._mode = Mode.TEMPERATURE
self._temp = 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence
self._top_p = 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self._max_tokens = 4000 # The maximum number of tokens the model can generate
self._presence_penalty = (
0 # Penalize new tokens based on whether they appear in the text so far
)
def set_initial_state(self, usage_service):
self.mode = Mode.TEMPERATURE
self.temp = (
SETTINGS_DB["temp"] if "temp" in SETTINGS_DB else 0.8
) # Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p = (
SETTINGS_DB["top_p"] if "top_p" in SETTINGS_DB else 1
) # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.max_tokens = (
SETTINGS_DB["max_tokens"] if "max_tokens" in SETTINGS_DB else 4000
) # The maximum number of tokens the model can generate
self.presence_penalty = (
SETTINGS_DB["presence_penalty"]
if "presence_penalty" in SETTINGS_DB
else 0.0
) # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text
# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self._frequency_penalty = 0
self._best_of = 1 # Number of responses to compare the loglikelihoods of
self._prompt_min_length = 8
self._max_conversation_length = 100
self._model = Models.DEFAULT
self.frequency_penalty = (
SETTINGS_DB["frequency_penalty"]
if "frequency_penalty" in SETTINGS_DB
else 0.0
)
self.best_of = (
SETTINGS_DB["best_of"] if "best_of" in SETTINGS_DB else 1
) # Number of responses to compare the loglikelihoods of
self.prompt_min_length = (
SETTINGS_DB["prompt_min_length"]
if "prompt_min_length" in SETTINGS_DB
else 6
) # The minimum length of the prompt
self.max_conversation_length = (
SETTINGS_DB["max_conversation_length"]
if "max_conversation_length" in SETTINGS_DB
else 100
) # The maximum number of conversation items to keep in memory
self.model = (
SETTINGS_DB["model"] if "model" in SETTINGS_DB else Models.DEFAULT
) # The model to use
self._low_usage_mode = False
self.usage_service = usage_service
self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"]
self._image_size = ImageSize.MEDIUM
self._num_images = 2
self._summarize_conversations = True
self._summarize_threshold = 3000
self.image_size = (
SETTINGS_DB["image_size"]
if "image_size" in SETTINGS_DB
else ImageSize.MEDIUM
)
self.num_images = (
SETTINGS_DB["num_images"] if "num_images" in SETTINGS_DB else 2
)
self.summarize_conversations = (
bool(SETTINGS_DB["summarize_conversations"])
if "summarize_conversations" in SETTINGS_DB
else True
)
self.summarize_threshold = (
SETTINGS_DB["summarize_threshold"]
if "summarize_threshold" in SETTINGS_DB
else 3000
)
self.model_max_tokens = 4024
self._welcome_message_enabled = False
self._num_static_conversation_items = 10
self._num_conversation_lookback = 5
self.welcome_message_enabled = (
bool(SETTINGS_DB["welcome_message_enabled"])
if "welcome_message_enabled" in SETTINGS_DB
else False
)
self.num_static_conversation_items = (
SETTINGS_DB["num_static_conversation_items"]
if "num_static_conversation_items" in SETTINGS_DB
else 10
)
self.num_conversation_lookback = (
SETTINGS_DB["num_conversation_lookback"]
if "num_conversation_lookback" in SETTINGS_DB
else 5
)
def reset_settings(self):
keys = [
"temp",
"top_p",
"max_tokens",
"presence_penalty",
"frequency_penalty",
"best_of",
"prompt_min_length",
"max_conversation_length",
"model",
"image_size",
"num_images",
"summarize_conversations",
"summarize_threshold",
"welcome_message_enabled",
"num_static_conversation_items",
"num_conversation_lookback",
]
for key in keys:
try:
del SETTINGS_DB[key]
except:
pass
self.set_initial_state(self.usage_service)
def __init__(self, usage_service):
self._num_conversation_lookback = None
self._num_static_conversation_items = None
self._welcome_message_enabled = None
self.model_max_tokens = None
self._summarize_threshold = None
self._summarize_conversations = None
self._num_images = None
self._image_size = None
self.DAVINCI_ROLES = None
self.usage_service = None
self._low_usage_mode = None
self._model = None
self._max_conversation_length = None
self._prompt_min_length = None
self._best_of = None
self._frequency_penalty = None
self._presence_penalty = None
self._max_tokens = None
self._top_p = None
self._temp = None
self._mode = None
self.set_initial_state(usage_service)
try:
self.IMAGE_SAVE_PATH = os.environ["IMAGE_SAVE_PATH"]
@ -177,6 +294,7 @@ class Model:
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
)
self._num_static_conversation_items = value
SETTINGS_DB["num_static_conversation_items"] = value
@property
def num_conversation_lookback(self):
@ -194,6 +312,7 @@ class Model:
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback = value
SETTINGS_DB["num_conversation_lookback"] = value
@property
def welcome_message_enabled(self):
@ -201,12 +320,15 @@ class Model:
@welcome_message_enabled.setter
def welcome_message_enabled(self, value):
if value.lower() == "true":
self._welcome_message_enabled = True
elif value.lower() == "false":
self._welcome_message_enabled = False
else:
raise ValueError("Value must be either `true` or `false`!")
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._welcome_message_enabled = value
SETTINGS_DB["welcome_message_enabled"] = self._welcome_message_enabled
@property
def summarize_threshold(self):
@ -223,6 +345,7 @@ class Model:
f"Summarize threshold should be a number between {ModelLimits.MIN_SUMMARIZE_THRESHOLD} and {ModelLimits.MAX_SUMMARIZE_THRESHOLD}!"
)
self._summarize_threshold = value
SETTINGS_DB["summarize_threshold"] = value
@property
def summarize_conversations(self):
@ -231,13 +354,15 @@ class Model:
@summarize_conversations.setter
def summarize_conversations(self, value):
# convert value string into boolean
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._summarize_conversations = value
SETTINGS_DB["summarize_conversations"] = value
@property
def image_size(self):
@ -247,6 +372,7 @@ class Model:
def image_size(self, value):
if value in ImageSize.ALL_SIZES:
self._image_size = value
SETTINGS_DB["image_size"] = value
else:
raise ValueError(
f"Image size must be one of the following: {ImageSize.ALL_SIZES}"
@ -264,6 +390,7 @@ class Model:
f"Number of images to generate should be a number between {ModelLimits.MIN_NUM_IMAGES} and {ModelLimits.MAX_NUM_IMAGES}!"
)
self._num_images = value
SETTINGS_DB["num_images"] = value
@property
def low_usage_mode(self):
@ -300,6 +427,7 @@ class Model:
# Set the token count
self._max_tokens = Models.get_max_tokens(self._model)
SETTINGS_DB["model"] = model
@property
def max_conversation_length(self):
@ -317,6 +445,7 @@ class Model:
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length = value
SETTINGS_DB["max_conversation_length"] = value
@property
def mode(self):
@ -337,6 +466,7 @@ class Model:
raise ValueError(f"Unknown mode: {value}")
self._mode = value
SETTINGS_DB["mode"] = value
@property
def temp(self):
@ -351,6 +481,7 @@ class Model:
)
self._temp = value
SETTINGS_DB["temp"] = value
@property
def top_p(self):
@ -364,6 +495,7 @@ class Model:
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p = value
SETTINGS_DB["top_p"] = value
@property
def max_tokens(self):
@ -377,6 +509,7 @@ class Model:
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens = value
SETTINGS_DB["max_tokens"] = value
@property
def presence_penalty(self):
@ -393,6 +526,7 @@ class Model:
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty = value
SETTINGS_DB["presence_penalty"] = value
@property
def frequency_penalty(self):
@ -409,6 +543,7 @@ class Model:
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty = value
SETTINGS_DB["frequency_penalty"] = value
@property
def best_of(self):
@ -422,6 +557,7 @@ class Model:
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of = value
SETTINGS_DB["best_of"] = value
@property
def prompt_min_length(self):
@ -438,6 +574,7 @@ class Model:
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"
)
self._prompt_min_length = value
SETTINGS_DB["prompt_min_length"] = value
def backoff_handler(details):
print(

@ -8,7 +8,7 @@ from discord.ext import pages
from models.embed_statics_model import EmbedStatics
from services.deletion_service import Deletion
from models.openai_model import Model
from models.openai_model import Model, Override
from models.user_model import EmbeddedConversationItem, RedoUser
from services.environment_service import EnvService
@ -26,10 +26,7 @@ class TextService:
prompt,
ctx,
response_message=None,
temp_override=None,
top_p_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
overrides=None,
instruction=None,
from_ask_command=False,
from_edit_command=False,
@ -39,7 +36,8 @@ class TextService:
custom_api_key=None,
edited_request=False,
redo_request=False,
from_action=False,
from_ask_action=False,
from_other_action=None,
):
"""General service function for sending and receiving gpt generations
@ -268,8 +266,8 @@ class TextService:
response = await converser_cog.model.send_edit_request(
text=new_prompt,
instruction=instruction,
temp_override=temp_override,
top_p_override=top_p_override,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
codex=codex,
custom_api_key=custom_api_key,
)
@ -277,10 +275,10 @@ class TextService:
response = await converser_cog.model.send_request(
new_prompt,
tokens=tokens,
temp_override=temp_override,
top_p_override=top_p_override,
frequency_penalty_override=frequency_penalty_override,
presence_penalty_override=presence_penalty_override,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
frequency_penalty_override=overrides.frequency_penalty,
presence_penalty_override=overrides.presence_penalty,
model=model,
stop=stop if not from_ask_command else None,
custom_api_key=custom_api_key,
@ -291,7 +289,9 @@ class TextService:
str(response["choices"][0]["text"])
)
if from_ask_command or from_action:
if from_other_action:
response_text = f"***{from_other_action}*** {response_text}"
elif from_ask_command or from_ask_action:
response_text = f"***{prompt}***{response_text}"
elif from_edit_command:
if codex:
@ -486,7 +486,7 @@ class TextService:
# Error catching for OpenAI model value errors
except ValueError as e:
embed = EmbedStatics.get_invalid_value_embed(e)
if from_action:
if from_ask_action:
await ctx.respond(embed=embed, ephemeral=True)
elif from_context:
await ctx.send_followup(embed=embed, ephemeral=True)
@ -622,19 +622,22 @@ class TextService:
)
# set conversation overrides
overrides = converser_cog.conversation_threads[
conversation_overrides = converser_cog.conversation_threads[
message.channel.id
].get_overrides()
overrides = Override(
conversation_overrides["temperature"],
conversation_overrides["top_p"],
conversation_overrides["frequency_penalty"],
conversation_overrides["presence_penalty"],
)
await TextService.encapsulated_send(
converser_cog,
message.channel.id,
primary_prompt,
message,
temp_override=overrides["temperature"],
top_p_override=overrides["top_p"],
frequency_penalty_override=overrides["frequency_penalty"],
presence_penalty_override=overrides["presence_penalty"],
overrides=overrides,
model=converser_cog.conversation_threads[message.channel.id].model,
custom_api_key=user_api_key,
)
@ -836,6 +839,7 @@ class RedoButton(discord.ui.Button["ConversationView"]):
await TextService.encapsulated_send(
self.converser_cog,
overrides=Override(None, None, None, None),
id=user_id,
prompt=prompt,
instruction=instruction,

Loading…
Cancel
Save