Merge remote-tracking branch 'origin/main' into ping-to-name

Rene Teigen 1 year ago
commit 33eb5f4d76

@ -3,6 +3,7 @@ on:
push:
branches:
- main
- alpha
jobs:
build:
runs-on: ubuntu-latest
@ -80,7 +81,7 @@ jobs:
host: 104.248.105.234
username: root
password: ${{ secrets.SSH_PASS }}
- name: copy file via ssh password
- name: copy file via ssh password (OAI)
uses: appleboy/scp-action@master
with:
host: 104.248.105.234
@ -89,7 +90,7 @@ jobs:
port: 22
source: gpt3discord.py
target: /home/gptbotopenai/
- name: copy file via ssh password
- name: copy file via ssh password (OAI)
uses: appleboy/scp-action@master
with:
host: 104.248.105.234
@ -98,7 +99,7 @@ jobs:
port: 22
source: conversation_starter_pretext.txt
target: /home/gptbotopenai/
- name: copy file via ssh password
- name: copy file via ssh password (OAI)
uses: appleboy/scp-action@master
with:
host: 104.248.105.234
@ -107,7 +108,7 @@ jobs:
port: 22
source: image_optimizer_pretext.txt
target: /home/gptbotopenai/
- name: Copy via ssh
- name: Copy via ssh (OAI)
uses: garygrossgarten/github-action-scp@release
with:
local: cogs
@ -115,7 +116,7 @@ jobs:
host: 104.248.105.234
username: root
password: ${{ secrets.SSH_PASS }}
- name: Copy via ssh
- name: Copy via ssh (OAI)
uses: garygrossgarten/github-action-scp@release
with:
local: models
@ -123,7 +124,7 @@ jobs:
host: 104.248.105.234
username: root
password: ${{ secrets.SSH_PASS }}
- name: Copy via ssh
- name: Copy via ssh (OAI)
uses: garygrossgarten/github-action-scp@release
with:
local: openers

@ -1 +1,3 @@
Please contribute on your own branch and make a pull request for review. There are not many other guidelines to follow, I'd love to see all of your good ideas.
Please feel free to pull any issue and work on it! Join our linked server in the README if you want to discuss further!

@ -172,7 +172,7 @@ The Moderations service still uses the main API key defined in the `.env` file.
[**GPT3Discord Guides**](https://github.com/Kav-K/GPT3Discord/tree/main/detailed_guides)
If you follow the link above, you will now get to detailed step-by-step guides that will help you to install and set up your GPT3Discord bot quickly and easily. If you still run into problems or have suggestions for improving the guides, you can join the [**Discord-Server**](https://discord.gg/WvAHXDMS7Q) and we try will help you. Keep in mind that the maintainers are volunteers and will try to help you on their schedule.
If you follow the link above, you will now get to detailed step-by-step guides that will help you to install and set up your GPT3Discord bot quickly and easily. If you still run into problems or have suggestions for improving the guides, you can join the [**Discord-Server**](https://discord.gg/WvAHXDMS7Q) and we will try to help you. Keep in mind that the maintainers are volunteers and will try to help you on their schedule.
*The number and content of the guides is constantly adapted to current requirements.*

@ -37,8 +37,7 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
self.message_queue = message_queue
self.deletion_queue = deletion_queue
self.converser_cog = converser_cog
print("Draw service init")
print("Draw service initialized")
async def encapsulated_send(
self,

@ -29,6 +29,9 @@ if sys.platform == "win32":
else:
separator = "/"
"""
Get the user key service if it is enabled.
"""
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = None
if USER_INPUT_API_KEYS:
@ -39,6 +42,22 @@ if USER_INPUT_API_KEYS:
print("Retrieved/created the user key database")
"""
Obtain the Moderation table and the General table, these are two SQLite tables that contain
information about the server that are used for persistence and to auto-restart the moderation service.
"""
MOD_DB = None
GENERAL_DB = None
try:
print("Attempting to retrieve the General and Moderations DB")
MOD_DB = SqliteDict("main_db.sqlite", tablename="moderations", autocommit=True)
GENERAL_DB = SqliteDict("main_db.sqlite", tablename="general", autocommit=True)
print("Retrieved the General and Moderations DB")
except Exception as e:
print("Failed to retrieve the General and Moderations DB. The bot is terminating.")
raise e
class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
def __init__(
self,
@ -53,12 +72,23 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
pinecone_service,
):
super().__init__()
self.GLOBAL_COOLDOWN_TIME = 0.25
# Environment
self.data_path = data_path
self.debug_channel = None
# Services and models
self.bot = bot
self._last_member_ = None
self.conversation_threads = {}
self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"]
self.usage_service = usage_service
self.model = model
self.deletion_queue = deletion_queue
# Data specific to all text based GPT interactions
self.users_to_interactions = defaultdict(list)
self.redo_users = {}
# Conversations-specific data
self.END_PROMPTS = [
"end",
"end conversation",
@ -66,20 +96,19 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
"that's all",
"that'll be all",
]
self.last_used = {}
self.GLOBAL_COOLDOWN_TIME = 0.25
self.usage_service = usage_service
self.model = model
self.summarize = self.model.summarize_conversations
self.deletion_queue = deletion_queue
self.users_to_interactions = defaultdict(list)
self.redo_users = {}
self.awaiting_responses = []
self.awaiting_thread_responses = []
self.conversation_threads = {}
self.summarize = self.model.summarize_conversations
# Moderation service data
self.moderation_queues = {}
self.moderation_alerts_channel = EnvService.get_moderations_alert_channel()
self.moderation_enabled_guilds = []
self.moderation_tasks = {}
self.moderations_launched = []
# Pinecone data
self.pinecone_service = pinecone_service
try:
@ -194,19 +223,16 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
await member.send(content=None, embed=welcome_embed)
@discord.Cog.listener()
async def on_member_remove(self, member):
pass
@discord.Cog.listener()
async def on_ready(self):
self.debug_channel = self.bot.get_guild(self.DEBUG_GUILD).get_channel(
self.DEBUG_CHANNEL
)
if USER_INPUT_API_KEYS:
print(
"This bot was set to use user input API keys. Doing the required SQLite setup now"
)
print("The debug channel was acquired")
# Check moderation service for each guild
for guild in self.bot.guilds:
await self.check_and_launch_moderations(guild.id)
await self.bot.sync_commands(
commands=None,
@ -217,7 +243,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
check_guilds=[],
delete_existing=True,
)
print(f"The debug channel was acquired and commands registered")
print(f"Commands synced")
@add_to_group("system")
@discord.slash_command(
@ -255,17 +281,15 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
for thread in guild.threads:
thread_name = thread.name.lower()
if "with gpt" in thread_name or "closed-gpt" in thread_name:
await thread.delete()
try:
await thread.delete()
except:
pass
await ctx.respond("All conversation threads have been deleted.")
# TODO: add extra condition to check if multi is enabled for the thread, stated in conversation_threads
def check_conversing(self, user_id, channel_id, message_content, multi=None):
cond1 = (
channel_id
in self.conversation_threads
# and user_id in self.conversation_thread_owners
# and channel_id == self.conversation_thread_owners[user_id]
)
cond1 = channel_id in self.conversation_threads
# If the trimmed message starts with a Tilde, then we want to not contribute this to the conversation
try:
cond2 = not message_content.strip().startswith("~")
@ -292,6 +316,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
"Only the conversation starter can end this.", delete_after=5
)
return
# TODO Possible bug here, if both users have a conversation active and one user tries to end the other, it may
# allow them to click the end button on the other person's thread and it will end their own convo.
self.conversation_threads.pop(channel_id)
if isinstance(ctx, discord.ApplicationContext):
@ -382,6 +409,11 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
value="Optimize an image prompt for use with DALL-E2, Midjourney, SD, etc.",
inline=False,
)
embed.add_field(
name="/system moderations",
value="The automatic moderations service",
inline=False,
)
embed.add_field(name="/help", value="See this help text", inline=False)
await ctx.respond(embed=embed)
@ -615,10 +647,31 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self.redo_users[after.author.id].prompt = after.content
async def check_and_launch_moderations(self, guild_id, alert_channel_override=None):
# Create the moderations service.
print("Checking and attempting to launch moderations service...")
if self.check_guild_moderated(guild_id):
self.moderation_queues[guild_id] = asyncio.Queue()
moderations_channel = await self.bot.fetch_channel(
self.get_moderated_alert_channel(guild_id)
if not alert_channel_override
else alert_channel_override
)
self.moderation_tasks[guild_id] = asyncio.ensure_future(
Moderation.process_moderation_queue(
self.moderation_queues[guild_id], 1, 1, moderations_channel
)
)
print("Launched the moderations service for guild " + str(guild_id))
self.moderations_launched.append(guild_id)
return moderations_channel
return None
@discord.Cog.listener()
async def on_message(self, message):
# Get the message from context
if message.author == self.bot.user:
return
@ -646,9 +699,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await self.end_conversation(message)
return
# GPT3 command
if conversing:
# Extract all the text after the !g and use it as the prompt.
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await GPT3ComCon.get_user_api_key(
@ -661,9 +712,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await self.check_conversation_limit(message)
# We want to have conversationality functionality. To have gpt3 remember context, we need to append the conversation/prompt
# history to the prompt. We can do this by checking if the user is in the conversating_users dictionary, and if they are,
# we can append their history to the prompt.
# If the user is in a conversation thread
if message.channel.id in self.conversation_threads:
# Since this is async, we don't want to allow the user to send another prompt while a conversation
@ -772,7 +821,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
try:
# This is the EMBEDDINGS CASE
# Pinecone is enabled, we will create embeddings for this conversation.
if self.pinecone_service and ctx.channel.id in self.conversation_threads:
# The conversation_id is the id of the thread
conversation_id = ctx.channel.id
@ -788,8 +837,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
new_prompt = new_prompt.encode("ascii", "ignore").decode()
# print("Creating embedding for ", prompt)
# Print the current timestamp
timestamp = int(
str(datetime.datetime.now().timestamp()).replace(".", "")
)
@ -808,7 +855,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
# Create and upsert the embedding for the conversation id, prompt, timestamp
embedding = await self.pinecone_service.upsert_conversation_embedding(
await self.pinecone_service.upsert_conversation_embedding(
self.model,
conversation_id,
new_prompt,
@ -818,8 +865,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
embedding_prompt_less_author = await self.model.send_embedding_request(
prompt_less_author, custom_api_key=custom_api_key
) # Use the version of
# the prompt without the author's name for better clarity on retrieval.
) # Use the version of the prompt without the author's name for better clarity on retrieval.
# Now, build the new prompt by getting the X most similar with pinecone
similar_prompts = self.pinecone_service.get_n_similar(
@ -874,7 +920,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
tokens = self.usage_service.count_tokens(new_prompt)
# Summarize case
# No pinecone, we do conversation summarization for long term memory instead
elif (
id in self.conversation_threads
and tokens > self.model.summarize_threshold
@ -1121,10 +1167,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
user = ctx.user
prompt = await self.replace_mention(ctx, prompt.strip())
# If the prompt isn't empty and the last character isn't a punctuation character, add a period.
if prompt and prompt[-1] not in [".", "!", "?"]:
prompt += "."
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await GPT3ComCon.get_user_api_key(user.id, ctx)
@ -1133,10 +1175,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx.defer()
# CONVERSE Checks here TODO
# Send the request to the model
# If conversing, the prompt to send is the history, otherwise, it's just the prompt
await self.encapsulated_send(
user.id,
prompt,
@ -1342,29 +1380,30 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
return
if status == "on":
# Create the moderations service.
self.moderation_queues[ctx.guild_id] = asyncio.Queue()
if self.moderation_alerts_channel or alert_channel_id:
moderations_channel = await self.bot.fetch_channel(
self.moderation_alerts_channel
if not alert_channel_id
else alert_channel_id
)
else:
moderations_channel = self.moderation_alerts_channel # None
# Check if the current guild is already in the database and if so, if the moderations is on
if self.check_guild_moderated(ctx.guild_id):
await ctx.respond("Moderations is already enabled for this guild")
return
self.moderation_tasks[ctx.guild_id] = asyncio.ensure_future(
Moderation.process_moderation_queue(
self.moderation_queues[ctx.guild_id], 1, 1, moderations_channel
)
# Create the moderations service.
self.set_guild_moderated(ctx.guild_id)
moderations_channel = await self.check_and_launch_moderations(
ctx.guild_id,
self.moderation_alerts_channel
if not alert_channel_id
else alert_channel_id,
)
self.set_moderated_alert_channel(ctx.guild_id, moderations_channel.id)
await ctx.respond("Moderations service enabled")
elif status == "off":
# Cancel the moderations service.
self.set_guild_moderated(ctx.guild_id, False)
self.moderation_tasks[ctx.guild_id].cancel()
self.moderation_tasks[ctx.guild_id] = None
self.moderation_queues[ctx.guild_id] = None
self.moderations_launched.remove(ctx.guild_id)
await ctx.respond("Moderations service disabled")
@add_to_group("gpt")
@ -1474,6 +1513,27 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Otherwise, process the settings change
await self.process_settings_command(ctx, parameter, value)
def check_guild_moderated(self, guild_id):
return guild_id in MOD_DB and MOD_DB[guild_id]["moderated"]
def get_moderated_alert_channel(self, guild_id):
return MOD_DB[guild_id]["alert_channel"]
def set_moderated_alert_channel(self, guild_id, channel_id):
MOD_DB[guild_id] = {"moderated": True, "alert_channel": channel_id}
MOD_DB.commit()
def set_guild_moderated(self, guild_id, status=True):
if guild_id not in MOD_DB:
MOD_DB[guild_id] = {"moderated": status, "alert_channel": 0}
MOD_DB.commit()
return
MOD_DB[guild_id] = {
"moderated": status,
"alert_channel": self.get_moderated_alert_channel(guild_id),
}
MOD_DB.commit()
class ConversationView(discord.ui.View):
def __init__(self, ctx, converser_cog, id, custom_api_key=None):

@ -104,6 +104,14 @@ class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"):
# escape any mentions
response_text = discord.utils.escape_mentions(response_text)
# If the response_message is > 75 words, concatenate to the last 70th word
# TODO Temporary workaround until prompt is adjusted to make the optimized prompts shorter.
try:
if len(response_text.split()) > 75:
response_text = " ".join(response_text.split()[-70:])
except:
pass
response_message = await ctx.respond(
response_text.replace("Optimized Prompt:", "")
.replace("Output Prompt:", "")

@ -24,7 +24,7 @@ from models.openai_model import Model
from models.usage_service_model import UsageService
from models.env_service_model import EnvService
__version__ = "5.0.1"
__version__ = "5.1.1"
"""
The pinecone service is used to store and retrieve conversation embeddings.

@ -16,3 +16,6 @@ GPT_ROLES = "openai,gpt"
WELCOME_MESSAGE = "Hi There! Welcome to our Discord server. We hope you'll enjoy our server and we look forward to engaging with you!" # This is a fallback message if gpt3 fails to generate a welcome message.
USER_INPUT_API_KEYS="False" # If True, users must use their own API keys for OpenAI. If False, the bot will use the API key in the .env file.
# Moderations Service alert channel, this is where moderation alerts will be sent as a default if enabled
MODERATIONS_ALERT_CHANNEL = "977697652147892304"
Loading…
Cancel
Save