From a025f810c170ec0fc6642abd958a538f717da1ba Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Wed, 1 Mar 2023 19:40:14 -0500 Subject: [PATCH] ChatGPT Integration polished some more --- README.md | 2 + cogs/commands.py | 7 ++- cogs/prompt_optimizer_cog.py | 5 +- cogs/text_service_cog.py | 2 + gpt3discord.py | 2 +- models/autocomplete_model.py | 17 ++++++ models/openai_model.py | 115 ++++++++++++++++++++++------------- services/text_service.py | 28 +++++---- 8 files changed, 121 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index 278d00c..5e721f1 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,8 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can try out the

# Recent Notable Updates + +- **ChatGPT API Integration** - The ChatGPT API has been released and our bot is now fully integrated with it! Change to model to one of the ChatGPT turbo models with `/system settings`, or include the model as a param in your `/gpt converse`, `/gpt ask`, etc requests! The two currently available ChatGPT models are `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` - **AI-Assisted Google Search** - Use GPT3 to browse the internet, you can search the internet for a query and GPT3 will look at the top websites for you automatically and formulate an answer to your query! You can also ask follow-up questions, this is kinda like BingGPT, but much better lol!

diff --git a/cogs/commands.py b/cogs/commands.py index 3cf5614..f1e7bae 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -309,6 +309,9 @@ class Commands(discord.Cog, name="Commands"): @discord.option( name="prompt", description="The prompt to send to GPT3", required=True ) + @discord.option( + name="model", description="The model to use for the request", required=False, autocomplete=Settings_autocompleter.get_models + ) @discord.option( name="private", description="Will only be visible to you", required=False ) @@ -345,6 +348,7 @@ class Commands(discord.Cog, name="Commands"): self, ctx: discord.ApplicationContext, prompt: str, + model: str, private: bool, temperature: float, top_p: float, @@ -359,6 +363,7 @@ class Commands(discord.Cog, name="Commands"): top_p, frequency_penalty, presence_penalty, + model=model, ) @add_to_group("gpt") @@ -449,7 +454,7 @@ class Commands(discord.Cog, name="Commands"): description="Which model to use with the bot", required=False, default=False, - autocomplete=Settings_autocompleter.get_models, + autocomplete=Settings_autocompleter.get_converse_models, ) @discord.option( name="temperature", diff --git a/cogs/prompt_optimizer_cog.py b/cogs/prompt_optimizer_cog.py index e575f3a..324801c 100644 --- a/cogs/prompt_optimizer_cog.py +++ b/cogs/prompt_optimizer_cog.py @@ -4,7 +4,7 @@ import traceback import discord from sqlitedict import SqliteDict -from models.openai_model import Override +from models.openai_model import Override, Models from services.environment_service import EnvService from models.user_model import RedoUser from services.image_service import ImageService @@ -102,7 +102,8 @@ class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"): # twice because of the best_of_override=2 parameter. This is to ensure that the model does a lot of analysis, but is # also relatively cost-effective - response_text = response["choices"][0]["text"] + response_text = str(response["choices"][0]["text"]) if not self.model.model in Models.CHATGPT_MODELS else response["choices"][0]["message"]["content"] + # escape any mentions response_text = discord.utils.escape_mentions(response_text) diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index b8b20fd..9c37b72 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -800,6 +800,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): from_ask_action=None, from_other_action=None, from_message_context=None, + model=None, ): """Command handler. Requests and returns a generation with no extras to the completion endpoint @@ -850,6 +851,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"): from_ask_action=from_ask_action, from_other_action=from_other_action, from_message_context=from_message_context, + model=model, ) async def edit_command( diff --git a/gpt3discord.py b/gpt3discord.py index 76c4be1..a07513f 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -31,7 +31,7 @@ from services.environment_service import EnvService from models.openai_model import Model -__version__ = "10.8.8" +__version__ = "10.9.0" PID_FILE = Path("bot.pid") diff --git a/models/autocomplete_model.py b/models/autocomplete_model.py index 89d0c74..cd3c092 100644 --- a/models/autocomplete_model.py +++ b/models/autocomplete_model.py @@ -88,12 +88,29 @@ class Settings_autocompleter: async def get_models( ctx: discord.AutocompleteContext, + ): + """Gets all models""" + models = [ + value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower()) + ] + return models + + async def get_converse_models( + ctx: discord.AutocompleteContext, ): """Gets all models""" models = [ value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower()) ] models.append("chatgpt") + + # We won't let the user directly use these models but we will decide which one to use based on the status. + attempt_removes = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301"] + + for attempt_remove in attempt_removes: + if attempt_remove in models: + models.remove(attempt_remove) + return models async def get_value_moderations( diff --git a/models/openai_model.py b/models/openai_model.py index 9cb237f..cf24886 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -60,8 +60,13 @@ class Models: EDIT = "text-davinci-edit-001" CODE_EDIT = "code-davinci-edit-001" + # ChatGPT Models + TURBO = "gpt-3.5-turbo" + TURBO_DEV = "gpt-3.5-turbo-0301" + # Model collections - TEXT_MODELS = [DAVINCI, CURIE, BABBAGE, ADA, CODE_DAVINCI, CODE_CUSHMAN] + TEXT_MODELS = [DAVINCI, CURIE, BABBAGE, ADA, CODE_DAVINCI, CODE_CUSHMAN, TURBO, TURBO_DEV] + CHATGPT_MODELS = [TURBO, TURBO_DEV] EDIT_MODELS = [EDIT, CODE_EDIT] DEFAULT = DAVINCI @@ -75,6 +80,8 @@ class Models: "text-ada-001": 2024, "code-davinci-002": 7900, "code-cushman-001": 2024, + TURBO: 4096, + TURBO_DEV: 4096, } @staticmethod @@ -817,7 +824,7 @@ class Model: max_tries=4, on_backoff=backoff_handler_request, ) - async def send_chatgpt_request( + async def send_chatgpt_chat_request( self, prompt_history, bot_name, @@ -830,16 +837,11 @@ class Model: max_tokens_override=None, stop=None, custom_api_key=None, - ) -> ( Tuple[dict, bool] ): # The response, and a boolean indicating whether or not the context limit was reached. # Validate that all the parameters are in a good state before we send the request - print(f"The prompt about to be sent is {prompt_history}") - print( - f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}" - ) # Clean up the user display name user_displayname_clean = self.cleanse_username(user_displayname) @@ -915,6 +917,7 @@ class Model: model=None, stop=None, custom_api_key=None, + is_chatgpt_request=False, ) -> ( Tuple[dict, bool] ): # The response, and a boolean indicating whether or not the context limit was reached. @@ -924,42 +927,72 @@ class Model: if model: max_tokens_override = Models.get_max_tokens(model) - tokens - # print(f"The prompt about to be sent is {prompt}") - # print( - # f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}" - # ) + print(f"The prompt about to be sent is {prompt}") + print( + f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}" + ) - async with aiohttp.ClientSession(raise_for_status=False) as session: - payload = { - "model": self.model if model is None else model, - "prompt": prompt, - "stop": "" if stop is None else stop, - "temperature": self.temp if temp_override is None else temp_override, - "top_p": self.top_p if top_p_override is None else top_p_override, - "max_tokens": self.max_tokens - tokens - if max_tokens_override is None - else max_tokens_override, - "presence_penalty": self.presence_penalty - if presence_penalty_override is None - else presence_penalty_override, - "frequency_penalty": self.frequency_penalty - if frequency_penalty_override is None - else frequency_penalty_override, - "best_of": self.best_of if not best_of_override else best_of_override, - } - headers = { - "Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}" - } - async with session.post( - "https://api.openai.com/v1/completions", json=payload, headers=headers - ) as resp: - response = await resp.json() - # print(f"Payload -> {payload}") - # Parse the total tokens used for this request and response pair from the response - await self.valid_text_request(response) - print(f"Response -> {response}") + # Non-ChatGPT simple completion models. + if not is_chatgpt_request: + async with aiohttp.ClientSession(raise_for_status=False) as session: + payload = { + "model": self.model if model is None else model, + "prompt": prompt, + "stop": "" if stop is None else stop, + "temperature": self.temp if temp_override is None else temp_override, + "top_p": self.top_p if top_p_override is None else top_p_override, + "max_tokens": self.max_tokens - tokens + if max_tokens_override is None + else max_tokens_override, + "presence_penalty": self.presence_penalty + if presence_penalty_override is None + else presence_penalty_override, + "frequency_penalty": self.frequency_penalty + if frequency_penalty_override is None + else frequency_penalty_override, + "best_of": self.best_of if not best_of_override else best_of_override, + } + headers = { + "Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}" + } + async with session.post( + "https://api.openai.com/v1/completions", json=payload, headers=headers + ) as resp: + response = await resp.json() + # print(f"Payload -> {payload}") + # Parse the total tokens used for this request and response pair from the response + await self.valid_text_request(response) + print(f"Response -> {response}") + + return response + else: # ChatGPT Simple completion + async with aiohttp.ClientSession(raise_for_status=False) as session: + payload = { + "model": self.model if not model else model, + "messages": [{"role": "user", "content": prompt}], + "stop": "" if stop is None else stop, + "temperature": self.temp if temp_override is None else temp_override, + "top_p": self.top_p if top_p_override is None else top_p_override, + "presence_penalty": self.presence_penalty + if presence_penalty_override is None + else presence_penalty_override, + "frequency_penalty": self.frequency_penalty + if frequency_penalty_override is None + else frequency_penalty_override, + } + headers = { + "Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}" + } + async with session.post( + "https://api.openai.com/v1/chat/completions", json=payload, headers=headers + ) as resp: + response = await resp.json() + # print(f"Payload -> {payload}") + # Parse the total tokens used for this request and response pair from the response + await self.valid_text_request(response) + print(f"Response -> {response}") - return response + return response @staticmethod async def send_test_request(api_key): diff --git a/services/text_service.py b/services/text_service.py index 525c549..46a2a8b 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -10,7 +10,7 @@ import unidecode from models.embed_statics_model import EmbedStatics from services.deletion_service import Deletion -from models.openai_model import Model, Override +from models.openai_model import Model, Override, Models from models.user_model import EmbeddedConversationItem, RedoUser from services.environment_service import EnvService from services.moderations_service import Moderation @@ -73,10 +73,13 @@ class TextService: else prompt ), prompt - # Determine if we're sending a ChatGPT model request - chatgpt = False - if model and "chatgpt" in model.lower(): - chatgpt = True + # Determine if we're sending a ChatGPT model request. If chatgpt is in the model name or the default model is a ChatGPT model. + # chatgpt_conversation = False + # chatgpt = False + # if (model and "chatgpt" in model.lower()) or (not model and converser_cog.model.model.lower() in Models.CHATGPT_MODELS): + # chatgpt = True + # if ctx.channel.id in converser_cog.conversation_threads: + # chatgpt_conversation = True stop = f"{ctx.author.display_name if user is None else user.display_name}:" @@ -274,14 +277,14 @@ class TextService: await converser_cog.end_conversation(ctx) return - if not converser_cog.pinecone_service: - _prompt_with_history = converser_cog.conversation_threads[ctx.channel.id].history - print("The prompt with history is ", _prompt_with_history) - # Send the request to the model + is_chatgpt_conversation = ctx.channel.id in converser_cog.conversation_threads and not from_ask_command and not from_edit_command and ((model is not None and (model in Models.CHATGPT_MODELS or model == "chatgpt")) or (model is None and converser_cog.model.model in Models.CHATGPT_MODELS)) + delegator = model or converser_cog.model.model + is_chatgpt_request = delegator in Models.CHATGPT_MODELS - if chatgpt: - response = await converser_cog.model.send_chatgpt_request( + if is_chatgpt_conversation: + _prompt_with_history = converser_cog.conversation_threads[ctx.channel.id].history + response = await converser_cog.model.send_chatgpt_chat_request( _prompt_with_history, bot_name=BOT_NAME, user_displayname=user_displayname, @@ -314,12 +317,13 @@ class TextService: model=model, stop=stop if not from_ask_command else None, custom_api_key=custom_api_key, + is_chatgpt_request=is_chatgpt_request, ) # Clean the request response response_text = converser_cog.cleanse_response( str(response["choices"][0]["text"]) - ) if not chatgpt else converser_cog.cleanse_response(str(response["choices"][0]["message"]["content"])) + ) if not is_chatgpt_request and not is_chatgpt_conversation else converser_cog.cleanse_response(str(response["choices"][0]["message"]["content"])) if from_message_context: response_text = f"{response_text}"