self._frequency_penalty=0# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self._best_of=1# Number of responses to compare the loglikelihoods of
self._prompt_min_length=25
self._max_conversation_length=5
openai.api_key=os.getenv('OPENAI_TOKEN')
# Use the @property and @setter decorators for all the self fields to provide value checking
@property
defmax_conversation_length(self):
returnself._max_conversation_length
@max_conversation_length.setter
defmax_conversation_length(self,value):
ifvalue<1:
raiseValueError("Max conversation length must be greater than 1")
ifvalue>20:
raiseValueError("Max conversation length must be less than 20, this will start using credits quick.")
self._max_conversation_length=value
@property
defmode(self):
returnself._mode
@ -36,11 +51,11 @@ class Model:
ifvaluenotin[Mode.TOP_P,Mode.TEMPERATURE]:
raiseValueError("mode must be either 'top_p' or 'temperature'")
ifvalue==Mode.TOP_P:
self._top_p=0.5
self._temp=1
elifvalue==Mode.TEMPERATURE:
self._top_p=1
self._top_p=0.1
self._temp=0.7
elifvalue==Mode.TEMPERATURE:
self._top_p=0.9
self._temp=0.6
self._mode=value
@ -53,8 +68,6 @@ class Model:
value=float(value)
ifvalue<0orvalue>1:
raiseValueError("temperature must be greater than 0 and less than 1, it is currently "+str(value))
ifself._mode==Mode.TOP_P:
raiseValueError("Cannot set temperature when in top_p mode")
self._temp=value
@ -67,8 +80,6 @@ class Model:
value=float(value)
ifvalue<0orvalue>1:
raiseValueError("top_p must be greater than 0 and less than 1, it is currently "+str(value))
ifself._mode==Mode.TEMPERATURE:
raiseValueError("Cannot set top_p when in temperature mode")
self._top_p=value
@property
@ -110,7 +121,8 @@ class Model:
defbest_of(self,value):
value=int(value)
ifvalue<1orvalue>3:
raiseValueError("best_of must be greater than 0 and ideally less than 3 to save tokens, it is currently "+str(value))
raiseValueError(
"best_of must be greater than 0 and ideally less than 3 to save tokens, it is currently "+str(value))
self._best_of=value
@property
@ -121,7 +133,8 @@ class Model:
defprompt_min_length(self,value):
value=int(value)
ifvalue<10orvalue>4096:
raiseValueError("prompt_min_length must be greater than 10 and less than 4096, it is currently "+str(value))
raiseValueError(
"prompt_min_length must be greater than 10 and less than 4096, it is currently "+str(value))
# Tell the user the remaining global cooldown time, respond to the user's original message as a "reply"
awaitmessage.reply("You must wait "+str(round(GLOBAL_COOLDOWN_TIME-(time.time()-last_used[message.author.id])))+" seconds before using the bot again")
awaitmessage.reply("You must wait "+str(round(GLOBAL_COOLDOWN_TIME-(
time.time()-last_used[message.author.id])))+" seconds before using the bot again")
return
last_used[message.author.id]=time.time()
@ -186,18 +229,29 @@ class DiscordBot:
ifmessage.content=="!g":
# create a discord embed with help text
embed=discord.Embed(title="GPT3Bot Help",description="The current commands",color=0x00ff00)
embed.add_field(name="!g <prompt>",value="Ask GPT3 something. Be clear, long, and concise in your prompt. Don't waste tokens.",inline=False)
embed.add_field(name="!g <prompt>",
value="Ask GPT3 something. Be clear, long, and concise in your prompt. Don't waste tokens.",
inline=False)
embed.add_field(name="!g converse",
value="Start a conversation with GPT3",
inline=False)
embed.add_field(name="!g end",
value="End a conversation with GPT3",
inline=False)
embed.add_field(name="!gp",value="Print the current settings of the model",inline=False)
embed.add_field(name="!gs <model parameter> <value>",value="Change the parameter of the model named by <model parameter> to new value <value>",inline=False)
# Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later
conversating_users[message.author.id].history+="You are an artificial intelligence that is able to do anything, and answer any question, I want you to be my personal assisstant and help me with some tasks."
awaitmessage.reply("You are now conversing with GPT3. End the conversation with !g end")
return
# If the prompt is just "end", end the conversation with GPT3
ifprompt=="end":
# If the user is not conversating, don't let them end the conversation
ifmessage.author.idnotinconversating_users:
awaitmessage.reply("You are not conversing with GPT3. Start a conversation with !g converse")
return
# If the user is conversating, end the conversation
conversating_users.pop(message.author.id)
awaitmessage.reply("You have ended the conversation with GPT3. Start a conversation with !g converse")
return
# We want to have conversationality functionality. To have gpt3 remember context, we need to append the conversation/prompt
# history to the prompt. We can do this by checking if the user is in the conversating_users dictionary, and if they are,