Merge remote-tracking branch 'Remote_Origin/main' into embeds

Rene Teigen 2 years ago
commit 1652c0be47

@ -23,6 +23,9 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
</p> </p>
# Recent Notable Updates # Recent Notable Updates
- **Edit Requests** - Ask GPT to edit a piece of text with a given instruction using a specific OpenAI edits model! `/gpt edit`!
- **Automatic retry on API errors** - The bot will automatically retry API requests if they fail due to some issue with OpenAI's APIs, this is becoming increasingly important now as their APIs become under heavy load. - **Automatic retry on API errors** - The bot will automatically retry API requests if they fail due to some issue with OpenAI's APIs, this is becoming increasingly important now as their APIs become under heavy load.
@ -32,9 +35,6 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **Permanent memory with embeddings and PineconeDB finished!** - An initial alpha version of permanent memory is now done! This allows you to chat with GPT3 infinitely and accurately, and save tokens, by using embeddings. *Please read the Permanent Memory section for more information!* - **Permanent memory with embeddings and PineconeDB finished!** - An initial alpha version of permanent memory is now done! This allows you to chat with GPT3 infinitely and accurately, and save tokens, by using embeddings. *Please read the Permanent Memory section for more information!*
- **Multi-user, group chats with GPT3** - Multiple users can converse with GPT3 in a chat now, and it will know that there are multiple distinct users chatting with it!
- **AI-BASED SERVER MODERATION** - GPT3Discord now has a built-in AI-based moderation system that can automatically detect and remove toxic messages from your server. This is a great way to keep your server safe and clean, and it's completely automatic and **free**! Check out the commands section to learn how to enable it! - **AI-BASED SERVER MODERATION** - GPT3Discord now has a built-in AI-based moderation system that can automatically detect and remove toxic messages from your server. This is a great way to keep your server safe and clean, and it's completely automatic and **free**! Check out the commands section to learn how to enable it!
@ -47,6 +47,8 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **DALL-E Image Prompt Optimization** - Given some text that you're trying to generate an image for, the bot will automatically optimize the text to be more DALL-E friendly! `/dalle optimize <prompt>` - **DALL-E Image Prompt Optimization** - Given some text that you're trying to generate an image for, the bot will automatically optimize the text to be more DALL-E friendly! `/dalle optimize <prompt>`
- **Edit Requests** - Ask GPT to edit a piece of text or code with a given instruction. `/gpt edit <instruction> <text>`
- **Redo Requests** - A simple button after the GPT3 response or DALL-E generation allows you to redo the initial prompt you asked. You can also redo conversation messages by just editing your message! - **Redo Requests** - A simple button after the GPT3 response or DALL-E generation allows you to redo the initial prompt you asked. You can also redo conversation messages by just editing your message!
- **Automatic AI-Based Server Moderation** - Moderate your server automatically with AI! - **Automatic AI-Based Server Moderation** - Moderate your server automatically with AI!

@ -123,7 +123,15 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
if from_context: if from_context:
result_message = await ctx.fetch_message(result_message.id) result_message = await ctx.fetch_message(result_message.id)
redo_users[user_id] = RedoUser(prompt=prompt, message=ctx, ctx=ctx, response=response_message, instruction=None,codex=False,paginator=None) redo_users[user_id] = RedoUser(
prompt=prompt,
message=ctx,
ctx=ctx,
response=response_message,
instruction=None,
codex=False,
paginator=None
)
else: else:
if not vary: # Editing case if not vary: # Editing case
@ -177,7 +185,15 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
) )
) )
redo_users[user_id] = RedoUser(prompt=prompt, message=ctx, ctx=ctx, response=response_message, instruction=None,codex=False,paginator=None) redo_users[user_id] = RedoUser(
prompt=prompt,
message=ctx,
ctx=ctx,
response=response_message,
instruction=None,
codex=False,
paginator=None,
)
self.converser_cog.users_to_interactions[user_id].append( self.converser_cog.users_to_interactions[user_id].append(
response_message.id response_message.id

@ -42,7 +42,20 @@ if USER_INPUT_API_KEYS:
print( print(
"This server was configured to enforce user input API keys. Doing the required database setup now" "This server was configured to enforce user input API keys. Doing the required database setup now"
) )
USER_KEY_DB = SqliteDict("user_key_db.sqlite") # Get USER_KEY_DB from the environment variable
USER_KEY_DB_PATH = EnvService.get_user_key_db_path()
# Check if USER_KEY_DB_PATH is valid
if not USER_KEY_DB_PATH:
print(
"No user key database path was provided. Defaulting to user_key_db.sqlite"
)
USER_KEY_DB_PATH = "user_key_db.sqlite"
else:
# append "user_key_db.sqlite" to USER_KEY_DB_PATH if it doesn't already end with .sqlite
if not USER_KEY_DB_PATH.match("*.sqlite"):
# append "user_key_db.sqlite" to USER_KEY_DB_PATH
USER_KEY_DB_PATH = USER_KEY_DB_PATH / "user_key_db.sqlite"
USER_KEY_DB = SqliteDict(USER_KEY_DB_PATH)
print("Retrieved/created the user key database") print("Retrieved/created the user key database")
@ -774,14 +787,20 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
edited_request=False, edited_request=False,
redo_request=False, redo_request=False,
): ):
new_prompt = prompt + "\nGPTie: " if not from_ask_command and not from_edit_command else prompt new_prompt = (
prompt + "\nGPTie: "
if not from_ask_command and not from_edit_command
else prompt
)
from_context = isinstance(ctx, discord.ApplicationContext) from_context = isinstance(ctx, discord.ApplicationContext)
if not instruction: if not instruction:
tokens = self.usage_service.count_tokens(new_prompt) tokens = self.usage_service.count_tokens(new_prompt)
else: else:
tokens = self.usage_service.count_tokens(new_prompt) + self.usage_service.count_tokens(instruction) tokens = self.usage_service.count_tokens(
new_prompt
) + self.usage_service.count_tokens(instruction)
try: try:
@ -980,10 +999,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
elif from_edit_command: elif from_edit_command:
if codex: if codex:
response_text = response_text.strip() response_text = response_text.strip()
response_text = f"***Prompt:{prompt}***\n***Instruction:{instruction}***\n\n```\n{response_text}\n```" response_text = f"***Prompt: {prompt}***\n***Instruction: {instruction}***\n\n```\n{response_text}\n```"
else: else:
response_text = response_text.strip() response_text = response_text.strip()
response_text = f"***Prompt:{prompt}***\n***Instruction:{instruction}***\n\n{response_text}\n" response_text = f"***Prompt: {prompt}***\n***Instruction: {instruction}***\n\n{response_text}\n"
# If gpt3 tries writing a user mention try to replace it with their name # If gpt3 tries writing a user mention try to replace it with their name
response_text = await self.mention_to_username(ctx, response_text) response_text = await self.mention_to_username(ctx, response_text)
@ -1058,21 +1077,35 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
response_message = await ctx.reply( response_message = await ctx.reply(
response_text, response_text,
view=ConversationView( view=ConversationView(
ctx, self, ctx.channel.id, model, custom_api_key=custom_api_key ctx,
self,
ctx.channel.id,
model,
custom_api_key=custom_api_key,
), ),
) )
elif from_edit_command: elif from_edit_command:
response_message = await ctx.respond( response_message = await ctx.respond(
response_text, response_text,
view=ConversationView( view=ConversationView(
ctx, self, ctx.channel.id, model, from_edit_command=from_edit_command, custom_api_key=custom_api_key ctx,
self,
ctx.channel.id,
model,
from_edit_command=from_edit_command,
custom_api_key=custom_api_key
), ),
) )
else: else:
response_message = await ctx.respond( response_message = await ctx.respond(
response_text, response_text,
view=ConversationView( view=ConversationView(
ctx, self, ctx.channel.id, model, from_ask_command=from_ask_command, custom_api_key=custom_api_key ctx,
self,
ctx.channel.id,
model,
from_ask_command=from_ask_command,
custom_api_key=custom_api_key
), ),
) )
@ -1085,7 +1118,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
) )
self.redo_users[ctx.author.id] = RedoUser( self.redo_users[ctx.author.id] = RedoUser(
prompt=new_prompt, instruction=instruction, ctx=ctx, message=ctx, response=actual_response_message, codex=codex, paginator=paginator prompt=new_prompt,
instruction=instruction,
ctx=ctx,
message=ctx,
response=actual_response_message,
codex=codex,
paginator=paginator
) )
self.redo_users[ctx.author.id].add_interaction( self.redo_users[ctx.author.id].add_interaction(
actual_response_message.id actual_response_message.id
@ -1123,7 +1162,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.send_followup(message) await ctx.send_followup(message)
else: else:
await ctx.reply(message) await ctx.reply(message)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command) self.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
# Error catching for OpenAI model value errors # Error catching for OpenAI model value errors
except ValueError as e: except ValueError as e:
@ -1131,7 +1172,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.send_followup(e) await ctx.send_followup(e)
else: else:
await ctx.reply(e) await ctx.reply(e)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command) self.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
# General catch case for everything # General catch case for everything
except Exception: except Exception:
@ -1140,7 +1183,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.send_followup(message) if from_context else await ctx.reply( await ctx.send_followup(message) if from_context else await ctx.reply(
message message
) )
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command) self.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
traceback.print_exc() traceback.print_exc()
try: try:
@ -1296,7 +1341,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
input = await self.mention_to_username(ctx, input.strip()) input = await self.mention_to_username(ctx, input.strip())
instruction = await self.mention_to_username(ctx, instruction.strip()) instruction = await self.mention_to_username(ctx, instruction.strip())
user_api_key = None user_api_key = None
if USER_INPUT_API_KEYS: if USER_INPUT_API_KEYS:
user_api_key = await GPT3ComCon.get_user_api_key(user.id, ctx) user_api_key = await GPT3ComCon.get_user_api_key(user.id, ctx)
@ -1610,7 +1654,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# VIEWS AND MODALS # VIEWS AND MODALS
class ConversationView(discord.ui.View): class ConversationView(discord.ui.View):
def __init__(self, ctx, converser_cog, id, model, from_ask_command=False, from_edit_command=False, custom_api_key=None): def __init__(
self,
ctx,
converser_cog,
id,
model,
from_ask_command=False,
from_edit_command=False,
custom_api_key=None,
):
super().__init__(timeout=3600) # 1 hour interval to redo. super().__init__(timeout=3600) # 1 hour interval to redo.
self.converser_cog = converser_cog self.converser_cog = converser_cog
self.ctx = ctx self.ctx = ctx
@ -1619,7 +1672,13 @@ class ConversationView(discord.ui.View):
self.from_edit_command = from_edit_command self.from_edit_command = from_edit_command
self.custom_api_key = custom_api_key self.custom_api_key = custom_api_key
self.add_item( self.add_item(
RedoButton(self.converser_cog, model=model, from_ask_command=from_ask_command, from_edit_command=from_edit_command, custom_api_key=self.custom_api_key) RedoButton(
self.converser_cog,
model=model,
from_ask_command=from_ask_command,
from_edit_command=from_edit_command,
custom_api_key=self.custom_api_key,
)
) )
if id in self.converser_cog.conversation_threads: if id in self.converser_cog.conversation_threads:
@ -1708,7 +1767,7 @@ class RedoButton(discord.ui.Button["ConversationView"]):
custom_api_key=self.custom_api_key, custom_api_key=self.custom_api_key,
redo_request=True, redo_request=True,
from_ask_command=self.from_ask_command, from_ask_command=self.from_ask_command,
from_edit_command=self.from_edit_command from_edit_command=self.from_edit_command,
) )
else: else:
await interaction.response.send_message( await interaction.response.send_message(

@ -114,7 +114,13 @@ class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"):
) )
self.converser_cog.redo_users[user.id] = RedoUser( self.converser_cog.redo_users[user.id] = RedoUser(
prompt=final_prompt, message=ctx, ctx=ctx, response=response_message, instruction=None,codex=False,paginator=None prompt=final_prompt,
message=ctx,
ctx=ctx,
response=response_message,
instruction=None,
codex=False,
paginator=None,
) )
self.converser_cog.redo_users[user.id].add_interaction(response_message.id) self.converser_cog.redo_users[user.id].add_interaction(response_message.id)
await response_message.edit( await response_message.edit(

@ -25,7 +25,7 @@ from models.openai_model import Model
from models.usage_service_model import UsageService from models.usage_service_model import UsageService
from models.env_service_model import EnvService from models.env_service_model import EnvService
__version__ = "5.4" __version__ = "6.0"
""" """
The pinecone service is used to store and retrieve conversation embeddings. The pinecone service is used to store and retrieve conversation embeddings.

@ -1,6 +1,8 @@
import os import os
import sys import sys
from pathlib import Path from pathlib import Path
from typing import Union
from dotenv import load_dotenv from dotenv import load_dotenv
@ -193,3 +195,14 @@ class EnvService:
return False return False
except: except:
return False return False
@staticmethod
def get_user_key_db_path() -> Union[Path, None]:
try:
user_key_db_path = os.getenv("USER_KEY_DB_PATH")
if user_key_db_path is None:
return None
else:
return Path(user_key_db_path)
except:
return None

@ -399,28 +399,35 @@ class Model:
max_tries=6, max_tries=6,
on_backoff=backoff_handler, on_backoff=backoff_handler,
) )
async def send_edit_request(self, instruction, input=None, temp_override=None, top_p_override=None, codex=False, custom_api_key=None): async def send_edit_request(
self,
instruction,
input=None,
temp_override=None,
top_p_override=None,
codex=False,
custom_api_key=None,
):
# Validate that all the parameters are in a good state before we send the request # Validate that all the parameters are in a good state before we send the request
if len(instruction) < self.prompt_min_length: if len(instruction) < self.prompt_min_length:
raise ValueError( raise ValueError(
"Instruction must be greater than 8 characters, it is currently " "Instruction must be greater than 8 characters, it is currently "
+ str(len(instruction)) + str(len(instruction))
) )
print(f"The text about to be edited is [{input}] with instructions [{instruction}] codex [{codex}]")
print( print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override}" f"The text about to be edited is [{input}] with instructions [{instruction}] codex [{codex}]"
) )
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override}")
async with aiohttp.ClientSession(raise_for_status=True) as session: async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = { payload = {
"model": Models.EDIT if codex is False else Models.CODE_EDIT, "model": Models.EDIT if codex is False else Models.CODE_EDIT,
"input": "" if input is None else input, "input": "" if input is None else input,
"instruction": instruction, "instruction": instruction,
"temperature": self.temp if temp_override is None else temp_override, "temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override "top_p": self.top_p if top_p_override is None else top_p_override,
} }
headers = { headers = {
"Content-Type": "application/json", "Content-Type": "application/json",

@ -27,6 +27,7 @@ dependencies = [
"aiofiles", "aiofiles",
"pinecone-client", "pinecone-client",
"sqlitedict", "sqlitedict",
"backoff",
] ]
dynamic = ["version"] dynamic = ["version"]
[project.scripts] [project.scripts]

@ -6,4 +6,5 @@ transformers==4.25.1
pycord-multicog==1.0.2 pycord-multicog==1.0.2
aiofiles==22.1.0 aiofiles==22.1.0
pinecone-client==2.1.0 pinecone-client==2.1.0
sqlitedict==2.1.0 sqlitedict==2.1.0
backoff==2.2.1

@ -18,4 +18,7 @@ WELCOME_MESSAGE = "Hi There! Welcome to our Discord server. We hope you'll enjoy
USER_INPUT_API_KEYS="False" # If True, users must use their own API keys for OpenAI. If False, the bot will use the API key in the .env file. USER_INPUT_API_KEYS="False" # If True, users must use their own API keys for OpenAI. If False, the bot will use the API key in the .env file.
# Moderations Service alert channel, this is where moderation alerts will be sent as a default if enabled # Moderations Service alert channel, this is where moderation alerts will be sent as a default if enabled
MODERATIONS_ALERT_CHANNEL = "977697652147892304" MODERATIONS_ALERT_CHANNEL = "977697652147892304"
# User API key db path configuration. This is where the user API keys will be stored.
USER_KEY_DB_PATH = user_key_db.sqlite
Loading…
Cancel
Save