prompt=f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt}\n\nElaboration:"
self.temp=SETTINGS_DB['temp']if'temp'inSETTINGS_DBelse0.8# Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p=SETTINGS_DB['top_p']if'top_p'inSETTINGS_DBelse1# 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.max_tokens=SETTINGS_DB['max_tokens']if'max_tokens'inSETTINGS_DBelse4000# The maximum number of tokens the model can generate
self.presence_penalty=SETTINGS_DB['presence_penalty']if'presence_penalty'inSETTINGS_DBelse0.0# The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text
self.temp=(
SETTINGS_DB["temp"]if"temp"inSETTINGS_DBelse0.8
)# Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p=(
SETTINGS_DB["top_p"]if"top_p"inSETTINGS_DBelse1
)# 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.best_of=SETTINGS_DB['best_of']if'best_of'inSETTINGS_DBelse1# Number of responses to compare the loglikelihoods of
self.prompt_min_length=SETTINGS_DB['prompt_min_length']if'prompt_min_length'inSETTINGS_DBelse6# The minimum length of the prompt
self.max_conversation_length=SETTINGS_DB['max_conversation_length']if'max_conversation_length'inSETTINGS_DBelse100# The maximum number of conversation items to keep in memory
self.model=SETTINGS_DB['model']if'model'inSETTINGS_DBelseModels.DEFAULT# The model to use
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback=value
SETTINGS_DB['num_conversation_lookback']=value
SETTINGS_DB["num_conversation_lookback"]=value
@property
defwelcome_message_enabled(self):
@ -258,7 +328,7 @@ class Model:
else:
raiseValueError("Value must be either `true` or `false`!")
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length=value
SETTINGS_DB['max_conversation_length']=value
SETTINGS_DB["max_conversation_length"]=value
@property
defmode(self):
@ -396,7 +466,7 @@ class Model:
raiseValueError(f"Unknown mode: {value}")
self._mode=value
SETTINGS_DB['mode']=value
SETTINGS_DB["mode"]=value
@property
deftemp(self):
@ -411,7 +481,7 @@ class Model:
)
self._temp=value
SETTINGS_DB['temp']=value
SETTINGS_DB["temp"]=value
@property
deftop_p(self):
@ -425,7 +495,7 @@ class Model:
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p=value
SETTINGS_DB['top_p']=value
SETTINGS_DB["top_p"]=value
@property
defmax_tokens(self):
@ -439,7 +509,7 @@ class Model:
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens=value
SETTINGS_DB['max_tokens']=value
SETTINGS_DB["max_tokens"]=value
@property
defpresence_penalty(self):
@ -456,7 +526,7 @@ class Model:
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty=value
SETTINGS_DB['presence_penalty']=value
SETTINGS_DB["presence_penalty"]=value
@property
deffrequency_penalty(self):
@ -473,7 +543,7 @@ class Model:
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty=value
SETTINGS_DB['frequency_penalty']=value
SETTINGS_DB["frequency_penalty"]=value
@property
defbest_of(self):
@ -487,7 +557,7 @@ class Model:
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of=value
SETTINGS_DB['best_of']=value
SETTINGS_DB["best_of"]=value
@property
defprompt_min_length(self):
@ -504,7 +574,7 @@ class Model:
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"