@ -60,8 +60,13 @@ class Models:
EDIT = " text-davinci-edit-001 "
CODE_EDIT = " code-davinci-edit-001 "
# ChatGPT Models
TURBO = " gpt-3.5-turbo "
TURBO_DEV = " gpt-3.5-turbo-0301 "
# Model collections
TEXT_MODELS = [ DAVINCI , CURIE , BABBAGE , ADA , CODE_DAVINCI , CODE_CUSHMAN ]
TEXT_MODELS = [ DAVINCI , CURIE , BABBAGE , ADA , CODE_DAVINCI , CODE_CUSHMAN , TURBO , TURBO_DEV ]
CHATGPT_MODELS = [ TURBO , TURBO_DEV ]
EDIT_MODELS = [ EDIT , CODE_EDIT ]
DEFAULT = DAVINCI
@ -75,6 +80,8 @@ class Models:
" text-ada-001 " : 2024 ,
" code-davinci-002 " : 7900 ,
" code-cushman-001 " : 2024 ,
TURBO : 4096 ,
TURBO_DEV : 4096 ,
}
@staticmethod
@ -817,7 +824,7 @@ class Model:
max_tries = 4 ,
on_backoff = backoff_handler_request ,
)
async def send_chatgpt_ request(
async def send_chatgpt_ chat_ request(
self ,
prompt_history ,
bot_name ,
@ -830,16 +837,11 @@ class Model:
max_tokens_override = None ,
stop = None ,
custom_api_key = None ,
) - > (
Tuple [ dict , bool ]
) : # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request
print ( f " The prompt about to be sent is { prompt_history } " )
print (
f " Overrides -> temp: { temp_override } , top_p: { top_p_override } frequency: { frequency_penalty_override } , presence: { presence_penalty_override } "
)
# Clean up the user display name
user_displayname_clean = self . cleanse_username ( user_displayname )
@ -915,6 +917,7 @@ class Model:
model = None ,
stop = None ,
custom_api_key = None ,
is_chatgpt_request = False ,
) - > (
Tuple [ dict , bool ]
) : # The response, and a boolean indicating whether or not the context limit was reached.
@ -924,42 +927,72 @@ class Model:
if model :
max_tokens_override = Models . get_max_tokens ( model ) - tokens
# print(f"The prompt about to be sent is {prompt}" )
# print (
# f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
# )
print ( f " The prompt about to be sent is { prompt } " )
print (
f " Overrides -> temp: { temp_override } , top_p: { top_p_override } frequency: { frequency_penalty_override } , presence: { presence_penalty_override } "
)
async with aiohttp . ClientSession ( raise_for_status = False ) as session :
payload = {
" model " : self . model if model is None else model ,
" prompt " : prompt ,
" stop " : " " if stop is None else stop ,
" temperature " : self . temp if temp_override is None else temp_override ,
" top_p " : self . top_p if top_p_override is None else top_p_override ,
" max_tokens " : self . max_tokens - tokens
if max_tokens_override is None
else max_tokens_override ,
" presence_penalty " : self . presence_penalty
if presence_penalty_override is None
else presence_penalty_override ,
" frequency_penalty " : self . frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override ,
" best_of " : self . best_of if not best_of_override else best_of_override ,
}
headers = {
" Authorization " : f " Bearer { self . openai_key if not custom_api_key else custom_api_key } "
}
async with session . post (
" https://api.openai.com/v1/completions " , json = payload , headers = headers
) as resp :
response = await resp . json ( )
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self . valid_text_request ( response )
print ( f " Response -> { response } " )
# Non-ChatGPT simple completion models.
if not is_chatgpt_request :
async with aiohttp . ClientSession ( raise_for_status = False ) as session :
payload = {
" model " : self . model if model is None else model ,
" prompt " : prompt ,
" stop " : " " if stop is None else stop ,
" temperature " : self . temp if temp_override is None else temp_override ,
" top_p " : self . top_p if top_p_override is None else top_p_override ,
" max_tokens " : self . max_tokens - tokens
if max_tokens_override is None
else max_tokens_override ,
" presence_penalty " : self . presence_penalty
if presence_penalty_override is None
else presence_penalty_override ,
" frequency_penalty " : self . frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override ,
" best_of " : self . best_of if not best_of_override else best_of_override ,
}
headers = {
" Authorization " : f " Bearer { self . openai_key if not custom_api_key else custom_api_key } "
}
async with session . post (
" https://api.openai.com/v1/completions " , json = payload , headers = headers
) as resp :
response = await resp . json ( )
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self . valid_text_request ( response )
print ( f " Response -> { response } " )
return response
else : # ChatGPT Simple completion
async with aiohttp . ClientSession ( raise_for_status = False ) as session :
payload = {
" model " : self . model if not model else model ,
" messages " : [ { " role " : " user " , " content " : prompt } ] ,
" stop " : " " if stop is None else stop ,
" temperature " : self . temp if temp_override is None else temp_override ,
" top_p " : self . top_p if top_p_override is None else top_p_override ,
" presence_penalty " : self . presence_penalty
if presence_penalty_override is None
else presence_penalty_override ,
" frequency_penalty " : self . frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override ,
}
headers = {
" Authorization " : f " Bearer { self . openai_key if not custom_api_key else custom_api_key } "
}
async with session . post (
" https://api.openai.com/v1/chat/completions " , json = payload , headers = headers
) as resp :
response = await resp . json ( )
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self . valid_text_request ( response )
print ( f " Response -> { response } " )
return response
return response
@staticmethod
async def send_test_request ( api_key ) :