tentative ChatGPT support

Kaveen Kumarasinghe 1 year ago
parent 0c20b0652d
commit 0216e393f6

@ -6,4 +6,3 @@ The conversations are in this format, there can be an arbitrary amount of newlin
<yourname> and <username> will be given to you in an actual conversation.
...
You speak in a fun, casual, and friendly tone, not worrying about capitalizations and using slang like "lol", "lmao", and etc, like you're talking to a friend.

@ -90,9 +90,11 @@ class Settings_autocompleter:
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
return [
models = [
value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower())
]
models.append("chatgpt")
return models
async def get_value_moderations(
ctx: discord.AutocompleteContext,

@ -2,6 +2,7 @@ import asyncio
import functools
import math
import os
import re
import tempfile
import traceback
import uuid
@ -799,6 +800,100 @@ class Model:
return response
def cleanse_username(self, text):
text = text.strip()
text = text.replace(":", "_")
text = text.replace(" ", "")
# Replace any character that's not a letter or number with an underscore
text = re.sub(r"[^a-zA-Z0-9]", "_", text)
return text
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_chatgpt_request(
self,
prompt_history,
bot_name,
user_displayname,
temp_override=None,
top_p_override=None,
best_of_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
max_tokens_override=None,
stop=None,
custom_api_key=None,
) -> (
Tuple[dict, bool]
): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request
print(f"The prompt about to be sent is {prompt_history}")
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
# Clean up the user display name
user_displayname_clean = self.cleanse_username(user_displayname)
# Clean up the bot name
bot_name_clean = self.cleanse_username(bot_name)
# Format the request body into the messages format that the API is expecting
# "messages": [{"role": "user", "content": "Hello!"}]
messages = []
for number,message in enumerate(prompt_history):
if number == 0:
# If this is the first message, it is the context prompt.
messages.append({"role": "user", "name":"System_Instructor", "content": message.text})
continue
if user_displayname in message.text:
text = message.text.replace(user_displayname+":", "")
text = text.replace("<|endofstatement|>", "")
messages.append({"role": "user", "name":user_displayname_clean, "content": text})
else:
text = message.text.replace(bot_name, "")
text = text.replace("<|endofstatement|>", "")
messages.append({"role": "assistant", "name":bot_name_clean, "content": text})
print(f"Messages -> {messages}")
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": "gpt-3.5-turbo-0301",
"messages": messages,
"stop": "" if stop is None else stop,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"presence_penalty": self.presence_penalty
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override,
}
headers = {
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}"
}
async with session.post(
"https://api.openai.com/v1/chat/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response)
print(f"Response -> {response}")
return response
@backoff.on_exception(
backoff.expo,
ValueError,
@ -829,10 +924,10 @@ class Model:
if model:
max_tokens_override = Models.get_max_tokens(model) - tokens
print(f"The prompt about to be sent is {prompt}")
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
# print(f"The prompt about to be sent is {prompt}")
# print(
# f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
# )
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {

@ -73,6 +73,11 @@ class TextService:
else prompt
), prompt
# Determine if we're sending a ChatGPT model request
chatgpt = False
if model and "chatgpt" in model.lower():
chatgpt = True
stop = f"{ctx.author.display_name if user is None else user.display_name}:"
from_context = isinstance(ctx, discord.ApplicationContext)
@ -85,6 +90,10 @@ class TextService:
) + converser_cog.usage_service.count_tokens(instruction)
try:
user_displayname = (
ctx.author.display_name if not user else user.display_name
)
# Pinecone is enabled, we will create embeddings for this conversation.
if (
converser_cog.pinecone_service
@ -104,9 +113,6 @@ class TextService:
new_prompt = unidecode.unidecode(new_prompt)
prompt_less_author = f"{new_prompt} <|endofstatement|>\n"
user_displayname = (
ctx.author.display_name if not user else user.display_name
)
new_prompt = f"\n{user_displayname}: {new_prompt} <|endofstatement|>\n"
@ -158,12 +164,12 @@ class TextService:
# When we are in embeddings mode, only the pre-text is contained in converser_cog.conversation_threads[message.channel.id].history, so we
# can use that as a base to build our new prompt
prompt_with_history = [
_prompt_with_history = [
converser_cog.conversation_threads[ctx.channel.id].history[0]
]
# Append the similar prompts to the prompt with history
prompt_with_history += [
_prompt_with_history += [
EmbeddedConversationItem(prompt, timestamp)
for prompt, timestamp in similar_prompts
]
@ -180,37 +186,37 @@ class TextService:
converser_cog.model.num_static_conversation_items,
),
):
prompt_with_history.append(
_prompt_with_history.append(
converser_cog.conversation_threads[ctx.channel.id].history[
-i
]
)
# remove duplicates from prompt_with_history and set the conversation history
prompt_with_history = list(dict.fromkeys(prompt_with_history))
_prompt_with_history = list(dict.fromkeys(_prompt_with_history))
# Sort the prompt_with_history by increasing timestamp if pinecone is enabled
if converser_cog.pinecone_service:
prompt_with_history.sort(key=lambda x: x.timestamp)
_prompt_with_history.sort(key=lambda x: x.timestamp)
# Remove the last two entries after sort, this is from the end of the list as prompt(redo), answer, prompt(original), leaving only prompt(original) and further history
if redo_request:
prompt_with_history = prompt_with_history[:-2]
_prompt_with_history = _prompt_with_history[:-2]
converser_cog.conversation_threads[
ctx.channel.id
].history = prompt_with_history
].history = _prompt_with_history
# Ensure that the last prompt in this list is the prompt we just sent (new_prompt_item)
if prompt_with_history[-1].text != new_prompt_item.text:
if _prompt_with_history[-1].text != new_prompt_item.text:
try:
prompt_with_history.remove(new_prompt_item)
_prompt_with_history.remove(new_prompt_item)
except ValueError:
pass
prompt_with_history.append(new_prompt_item)
_prompt_with_history.append(new_prompt_item)
prompt_with_history = "".join(
[item.text for item in prompt_with_history]
[item.text for item in _prompt_with_history]
)
new_prompt = prompt_with_history + "\n" + BOT_NAME
@ -268,8 +274,27 @@ class TextService:
await converser_cog.end_conversation(ctx)
return
if not converser_cog.pinecone_service:
_prompt_with_history = converser_cog.conversation_threads[ctx.channel.id].history
print("The prompt with history is ", _prompt_with_history)
# Send the request to the model
if from_edit_command:
if chatgpt:
response = await converser_cog.model.send_chatgpt_request(
_prompt_with_history,
bot_name=BOT_NAME,
user_displayname=user_displayname,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
frequency_penalty_override=overrides.frequency_penalty,
presence_penalty_override=overrides.presence_penalty,
stop=stop if not from_ask_command else None,
custom_api_key=custom_api_key,
)
elif from_edit_command:
response = await converser_cog.model.send_edit_request(
text=new_prompt,
instruction=instruction,
@ -294,7 +319,7 @@ class TextService:
# Clean the request response
response_text = converser_cog.cleanse_response(
str(response["choices"][0]["text"])
)
) if not chatgpt else converser_cog.cleanse_response(str(response["choices"][0]["message"]["content"]))
if from_message_context:
response_text = f"{response_text}"

Loading…
Cancel
Save