Format Python code with psf/black push

github-actions 2 years ago
parent de463a652d
commit 0dd2083a3e

@ -808,7 +808,6 @@ class Model:
text = re.sub(r"[^a-zA-Z0-9]", "_", text)
return text
@backoff.on_exception(
backoff.expo,
ValueError,
@ -830,7 +829,6 @@ class Model:
max_tokens_override=None,
stop=None,
custom_api_key=None,
) -> (
Tuple[dict, bool]
): # The response, and a boolean indicating whether or not the context limit was reached.
@ -850,20 +848,30 @@ class Model:
# Format the request body into the messages format that the API is expecting
# "messages": [{"role": "user", "content": "Hello!"}]
messages = []
for number,message in enumerate(prompt_history):
for number, message in enumerate(prompt_history):
if number == 0:
# If this is the first message, it is the context prompt.
messages.append({"role": "user", "name":"System_Instructor", "content": message.text})
messages.append(
{
"role": "user",
"name": "System_Instructor",
"content": message.text,
}
)
continue
if user_displayname in message.text:
text = message.text.replace(user_displayname+":", "")
text = message.text.replace(user_displayname + ":", "")
text = text.replace("<|endofstatement|>", "")
messages.append({"role": "user", "name":user_displayname_clean, "content": text})
messages.append(
{"role": "user", "name": user_displayname_clean, "content": text}
)
else:
text = message.text.replace(bot_name, "")
text = text.replace("<|endofstatement|>", "")
messages.append({"role": "assistant", "name":bot_name_clean, "content": text})
messages.append(
{"role": "assistant", "name": bot_name_clean, "content": text}
)
print(f"Messages -> {messages}")
async with aiohttp.ClientSession(raise_for_status=False) as session:
@ -884,7 +892,9 @@ class Model:
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}"
}
async with session.post(
"https://api.openai.com/v1/chat/completions", json=payload, headers=headers
"https://api.openai.com/v1/chat/completions",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
# print(f"Payload -> {payload}")

@ -113,7 +113,6 @@ class TextService:
new_prompt = unidecode.unidecode(new_prompt)
prompt_less_author = f"{new_prompt} <|endofstatement|>\n"
new_prompt = f"\n{user_displayname}: {new_prompt} <|endofstatement|>\n"
# new_prompt = new_prompt.encode("ascii", "ignore").decode()
@ -275,7 +274,9 @@ class TextService:
return
if not converser_cog.pinecone_service:
_prompt_with_history = converser_cog.conversation_threads[ctx.channel.id].history
_prompt_with_history = converser_cog.conversation_threads[
ctx.channel.id
].history
print("The prompt with history is ", _prompt_with_history)
# Send the request to the model
@ -291,7 +292,6 @@ class TextService:
presence_penalty_override=overrides.presence_penalty,
stop=stop if not from_ask_command else None,
custom_api_key=custom_api_key,
)
elif from_edit_command:
@ -317,9 +317,13 @@ class TextService:
)
# Clean the request response
response_text = converser_cog.cleanse_response(
str(response["choices"][0]["text"])
) if not chatgpt else converser_cog.cleanse_response(str(response["choices"][0]["message"]["content"]))
response_text = (
converser_cog.cleanse_response(str(response["choices"][0]["text"]))
if not chatgpt
else converser_cog.cleanse_response(
str(response["choices"][0]["message"]["content"])
)
)
if from_message_context:
response_text = f"{response_text}"

Loading…
Cancel
Save