prompt=f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt}\n\nElaboration:"
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback=value
SETTINGS_DB["num_conversation_lookback"]=value
@property
defwelcome_message_enabled(self):
@ -201,12 +320,15 @@ class Model:
@welcome_message_enabled.setter
defwelcome_message_enabled(self,value):
ifvalue.lower()=="true":
self._welcome_message_enabled=True
elifvalue.lower()=="false":
self._welcome_message_enabled=False
else:
raiseValueError("Value must be either `true` or `false`!")
ifnotisinstance(value,bool):
ifvalue.lower()=="true":
value=True
elifvalue.lower()=="false":
value=False
else:
raiseValueError("Value must be either `true` or `false`!")
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length=value
SETTINGS_DB["max_conversation_length"]=value
@property
defmode(self):
@ -337,6 +466,7 @@ class Model:
raiseValueError(f"Unknown mode: {value}")
self._mode=value
SETTINGS_DB["mode"]=value
@property
deftemp(self):
@ -351,6 +481,7 @@ class Model:
)
self._temp=value
SETTINGS_DB["temp"]=value
@property
deftop_p(self):
@ -364,6 +495,7 @@ class Model:
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p=value
SETTINGS_DB["top_p"]=value
@property
defmax_tokens(self):
@ -377,6 +509,7 @@ class Model:
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens=value
SETTINGS_DB["max_tokens"]=value
@property
defpresence_penalty(self):
@ -393,6 +526,7 @@ class Model:
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty=value
SETTINGS_DB["presence_penalty"]=value
@property
deffrequency_penalty(self):
@ -409,6 +543,7 @@ class Model:
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty=value
SETTINGS_DB["frequency_penalty"]=value
@property
defbest_of(self):
@ -422,6 +557,7 @@ class Model:
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of=value
SETTINGS_DB["best_of"]=value
@property
defprompt_min_length(self):
@ -438,6 +574,7 @@ class Model:
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"