print("Failed to retrieve the settings DB. The bot is terminating.")
raisee
classMode:
TEMPERATURE="temperature"
@ -106,36 +114,68 @@ class ModelLimits:
MIN_BEST_OF=1
MAX_BEST_OF=3
MIN_PROMPT_MIN_LENGTH=10
MAX_PROMPT_MIN_LENGTH=4096
MIN_PROMPT_MIN_LENGTH=5
MAX_PROMPT_MIN_LENGTH=4000
classModel:
def__init__(self,usage_service):
self._mode=Mode.TEMPERATURE
self._temp=0.8# Higher value means more random, lower value means more likely to be a coherent sentence
self._top_p=1# 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self._max_tokens=4000# The maximum number of tokens the model can generate
self._presence_penalty=(
0# Penalize new tokens based on whether they appear in the text so far
)
defset_initial_state(self,usage_service):
self.mode=Mode.TEMPERATURE
self.temp=SETTINGS_DB['temp']if'temp'inSETTINGS_DBelse0.8# Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p=SETTINGS_DB['top_p']if'top_p'inSETTINGS_DBelse1# 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.max_tokens=SETTINGS_DB['max_tokens']if'max_tokens'inSETTINGS_DBelse4000# The maximum number of tokens the model can generate
self.presence_penalty=SETTINGS_DB['presence_penalty']if'presence_penalty'inSETTINGS_DBelse0.0# The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text
# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self._frequency_penalty =0
self._best_of =1# Number of responses to compare the loglikelihoods of
self.best_of =SETTINGS_DB['best_of']if'best_of'inSETTINGS_DBelse1# Number of responses to compare the loglikelihoods of
self.prompt_min_length =SETTINGS_DB['prompt_min_length']if'prompt_min_length'inSETTINGS_DBelse6# The minimum length of the prompt
self.max_conversation_length =SETTINGS_DB['max_conversation_length']if'max_conversation_length'inSETTINGS_DBelse100# The maximum number of conversation items to keep in memory
self.model =SETTINGS_DB['model']if'model'inSETTINGS_DBelseModels.DEFAULT# The model to use
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback=value
SETTINGS_DB['num_conversation_lookback']=value
@property
defwelcome_message_enabled(self):
@ -201,12 +243,15 @@ class Model:
@welcome_message_enabled.setter
defwelcome_message_enabled(self,value):
ifvalue.lower()=="true":
self._welcome_message_enabled=True
elifvalue.lower()=="false":
self._welcome_message_enabled=False
else:
raiseValueError("Value must be either `true` or `false`!")
ifnotisinstance(value,bool):
ifvalue.lower()=="true":
value=True
elifvalue.lower()=="false":
value=False
else:
raiseValueError("Value must be either `true` or `false`!")
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length=value
SETTINGS_DB['max_conversation_length']=value
@property
defmode(self):
@ -337,6 +389,7 @@ class Model:
raiseValueError(f"Unknown mode: {value}")
self._mode=value
SETTINGS_DB['mode']=value
@property
deftemp(self):
@ -351,6 +404,7 @@ class Model:
)
self._temp=value
SETTINGS_DB['temp']=value
@property
deftop_p(self):
@ -364,6 +418,7 @@ class Model:
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p=value
SETTINGS_DB['top_p']=value
@property
defmax_tokens(self):
@ -377,6 +432,7 @@ class Model:
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens=value
SETTINGS_DB['max_tokens']=value
@property
defpresence_penalty(self):
@ -393,6 +449,7 @@ class Model:
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty=value
SETTINGS_DB['presence_penalty']=value
@property
deffrequency_penalty(self):
@ -409,6 +466,7 @@ class Model:
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty=value
SETTINGS_DB['frequency_penalty']=value
@property
defbest_of(self):
@ -422,6 +480,7 @@ class Model:
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of=value
SETTINGS_DB['best_of']=value
@property
defprompt_min_length(self):
@ -438,6 +497,7 @@ class Model:
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"