persistent settings

Kaveen Kumarasinghe 2 years ago
parent b4ddee81f7
commit 595feaf539

@ -96,6 +96,16 @@ class Commands(discord.Cog, name="Commands"):
):
await self.converser_cog.settings_command(ctx, parameter, value)
@add_to_group("system")
@discord.slash_command(
name="settings-reset",
description="Reset all settings for GPT3Discord",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def settings_reset(self, ctx: discord.ApplicationContext):
await self.converser_cog.settings_reset_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="local-size",

@ -1043,6 +1043,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Otherwise, process the settings change
await self.process_settings(ctx, parameter, value)
async def settings_reset_command(self, ctx: discord.ApplicationContext):
"""Command handler. Resets all settings to default"""
await ctx.defer()
self.model.reset_settings()
await ctx.respond("Settings reset to default")
#
# Text-based context menu commands from here
#

@ -15,7 +15,15 @@ import discord
import requests
from PIL import Image
from discord import File
from sqlitedict import SqliteDict
try:
print("Attempting to retrieve the settings DB")
SETTINGS_DB = SqliteDict("main_db.sqlite", tablename="settings", autocommit=True)
print("Retrieved the settings DB")
except Exception as e:
print("Failed to retrieve the settings DB. The bot is terminating.")
raise e
class Mode:
TEMPERATURE = "temperature"
@ -106,36 +114,68 @@ class ModelLimits:
MIN_BEST_OF = 1
MAX_BEST_OF = 3
MIN_PROMPT_MIN_LENGTH = 10
MAX_PROMPT_MIN_LENGTH = 4096
MIN_PROMPT_MIN_LENGTH = 5
MAX_PROMPT_MIN_LENGTH = 4000
class Model:
def __init__(self, usage_service):
self._mode = Mode.TEMPERATURE
self._temp = 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence
self._top_p = 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self._max_tokens = 4000 # The maximum number of tokens the model can generate
self._presence_penalty = (
0 # Penalize new tokens based on whether they appear in the text so far
)
def set_initial_state(self, usage_service):
self.mode = Mode.TEMPERATURE
self.temp = SETTINGS_DB['temp'] if 'temp' in SETTINGS_DB else 0.8 # Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p = SETTINGS_DB['top_p'] if 'top_p' in SETTINGS_DB else 1 # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.max_tokens = SETTINGS_DB['max_tokens'] if 'max_tokens' in SETTINGS_DB else 4000 # The maximum number of tokens the model can generate
self.presence_penalty = SETTINGS_DB['presence_penalty'] if 'presence_penalty' in SETTINGS_DB else 0.0 # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text
# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self._frequency_penalty = 0
self._best_of = 1 # Number of responses to compare the loglikelihoods of
self._prompt_min_length = 8
self._max_conversation_length = 100
self._model = Models.DEFAULT
self.frequency_penalty = SETTINGS_DB['frequency_penalty'] if 'frequency_penalty' in SETTINGS_DB else 0.0
self.best_of = SETTINGS_DB['best_of'] if 'best_of' in SETTINGS_DB else 1 # Number of responses to compare the loglikelihoods of
self.prompt_min_length = SETTINGS_DB['prompt_min_length'] if 'prompt_min_length' in SETTINGS_DB else 6 # The minimum length of the prompt
self.max_conversation_length = SETTINGS_DB['max_conversation_length'] if 'max_conversation_length' in SETTINGS_DB else 100 # The maximum number of conversation items to keep in memory
self.model = SETTINGS_DB['model'] if 'model' in SETTINGS_DB else Models.DEFAULT # The model to use
self._low_usage_mode = False
self.usage_service = usage_service
self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"]
self._image_size = ImageSize.MEDIUM
self._num_images = 2
self._summarize_conversations = True
self._summarize_threshold = 3000
self.image_size = SETTINGS_DB['image_size'] if 'image_size' in SETTINGS_DB else ImageSize.MEDIUM
self.num_images = SETTINGS_DB['num_images'] if 'num_images' in SETTINGS_DB else 2
self.summarize_conversations = bool(SETTINGS_DB['summarize_conversations']) if 'summarize_conversations' in SETTINGS_DB else True
self.summarize_threshold = SETTINGS_DB['summarize_threshold'] if 'summarize_threshold' in SETTINGS_DB else 3000
self.model_max_tokens = 4024
self._welcome_message_enabled = False
self._num_static_conversation_items = 10
self._num_conversation_lookback = 5
self.welcome_message_enabled = bool(SETTINGS_DB['welcome_message_enabled']) if 'welcome_message_enabled' in SETTINGS_DB else False
self.num_static_conversation_items = SETTINGS_DB['num_static_conversation_items'] if 'num_static_conversation_items' in SETTINGS_DB else 10
self.num_conversation_lookback = SETTINGS_DB['num_conversation_lookback'] if 'num_conversation_lookback' in SETTINGS_DB else 5
def reset_settings(self):
keys = ['temp', 'top_p', 'max_tokens', 'presence_penalty', 'frequency_penalty', 'best_of', 'prompt_min_length', 'max_conversation_length', 'model', 'image_size', 'num_images', 'summarize_conversations', 'summarize_threshold', 'welcome_message_enabled', 'num_static_conversation_items', 'num_conversation_lookback']
for key in keys:
try:
del SETTINGS_DB[key]
except:
pass
self.set_initial_state(self.usage_service)
def __init__(self, usage_service):
self._num_conversation_lookback = None
self._num_static_conversation_items = None
self._welcome_message_enabled = None
self.model_max_tokens = None
self._summarize_threshold = None
self._summarize_conversations = None
self._num_images = None
self._image_size = None
self.DAVINCI_ROLES = None
self.usage_service = None
self._low_usage_mode = None
self._model = None
self._max_conversation_length = None
self._prompt_min_length = None
self._best_of = None
self._frequency_penalty = None
self._presence_penalty = None
self._max_tokens = None
self._top_p = None
self._temp = None
self._mode = None
self.set_initial_state(usage_service)
try:
self.IMAGE_SAVE_PATH = os.environ["IMAGE_SAVE_PATH"]
@ -177,6 +217,7 @@ class Model:
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
)
self._num_static_conversation_items = value
SETTINGS_DB['num_static_conversation_items'] = value
@property
def num_conversation_lookback(self):
@ -194,6 +235,7 @@ class Model:
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback = value
SETTINGS_DB['num_conversation_lookback'] = value
@property
def welcome_message_enabled(self):
@ -201,12 +243,15 @@ class Model:
@welcome_message_enabled.setter
def welcome_message_enabled(self, value):
if value.lower() == "true":
self._welcome_message_enabled = True
elif value.lower() == "false":
self._welcome_message_enabled = False
else:
raise ValueError("Value must be either `true` or `false`!")
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._welcome_message_enabled = value
SETTINGS_DB['welcome_message_enabled'] = self._welcome_message_enabled
@property
def summarize_threshold(self):
@ -223,6 +268,7 @@ class Model:
f"Summarize threshold should be a number between {ModelLimits.MIN_SUMMARIZE_THRESHOLD} and {ModelLimits.MAX_SUMMARIZE_THRESHOLD}!"
)
self._summarize_threshold = value
SETTINGS_DB['summarize_threshold'] = value
@property
def summarize_conversations(self):
@ -231,13 +277,15 @@ class Model:
@summarize_conversations.setter
def summarize_conversations(self, value):
# convert value string into boolean
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._summarize_conversations = value
SETTINGS_DB['summarize_conversations'] = value
@property
def image_size(self):
@ -247,6 +295,7 @@ class Model:
def image_size(self, value):
if value in ImageSize.ALL_SIZES:
self._image_size = value
SETTINGS_DB['image_size'] = value
else:
raise ValueError(
f"Image size must be one of the following: {ImageSize.ALL_SIZES}"
@ -264,6 +313,7 @@ class Model:
f"Number of images to generate should be a number between {ModelLimits.MIN_NUM_IMAGES} and {ModelLimits.MAX_NUM_IMAGES}!"
)
self._num_images = value
SETTINGS_DB['num_images'] = value
@property
def low_usage_mode(self):
@ -300,6 +350,7 @@ class Model:
# Set the token count
self._max_tokens = Models.get_max_tokens(self._model)
SETTINGS_DB['model'] = model
@property
def max_conversation_length(self):
@ -317,6 +368,7 @@ class Model:
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length = value
SETTINGS_DB['max_conversation_length'] = value
@property
def mode(self):
@ -337,6 +389,7 @@ class Model:
raise ValueError(f"Unknown mode: {value}")
self._mode = value
SETTINGS_DB['mode'] = value
@property
def temp(self):
@ -351,6 +404,7 @@ class Model:
)
self._temp = value
SETTINGS_DB['temp'] = value
@property
def top_p(self):
@ -364,6 +418,7 @@ class Model:
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p = value
SETTINGS_DB['top_p'] = value
@property
def max_tokens(self):
@ -377,6 +432,7 @@ class Model:
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens = value
SETTINGS_DB['max_tokens'] = value
@property
def presence_penalty(self):
@ -393,6 +449,7 @@ class Model:
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty = value
SETTINGS_DB['presence_penalty'] = value
@property
def frequency_penalty(self):
@ -409,6 +466,7 @@ class Model:
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty = value
SETTINGS_DB['frequency_penalty'] = value
@property
def best_of(self):
@ -422,6 +480,7 @@ class Model:
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of = value
SETTINGS_DB['best_of'] = value
@property
def prompt_min_length(self):
@ -438,6 +497,7 @@ class Model:
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"
)
self._prompt_min_length = value
SETTINGS_DB['prompt_min_length'] = value
def backoff_handler(details):
print(

Loading…
Cancel
Save