@ -1,4 +1,3 @@
import asyncio
import datetime
import re
import traceback
@ -11,7 +10,6 @@ import json
import discord
from models . check_model import Check
from services . environment_service import EnvService
from services . message_queue_service import Message
from services . moderations_service import Moderation
@ -28,9 +26,9 @@ if sys.platform == "win32":
else :
separator = " / "
"""
Get the user key service if it is enabled .
"""
#
#Get the user key service if it is enabled .
#
USER_INPUT_API_KEYS = EnvService . get_user_input_api_keys ( )
USER_KEY_DB = None
if USER_INPUT_API_KEYS :
@ -54,10 +52,10 @@ if USER_INPUT_API_KEYS:
print ( " Retrieved/created the user key database " )
"""
Obtain the Moderation table and the General table , these are two SQLite tables that contain
information about the server that are used for persistence and to auto - restart the moderation service .
"""
#
#Obtain the Moderation table and the General table, these are two SQLite tables that contain
#information about the server that are used for persistence and to auto-restart the moderation service .
#
MOD_DB = None
GENERAL_DB = None
try :
@ -158,6 +156,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self . conversation_thread_owners = { }
async def load_file ( self , file , ctx ) :
''' Take filepath, return content or respond if not found '''
try :
async with aiofiles . open ( file , " r " ) as f :
return await f . read ( )
@ -170,6 +169,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
@discord.Cog.listener ( )
async def on_member_join ( self , member ) :
''' When members join send welcome message if enabled '''
if self . model . welcome_message_enabled :
query = f " Please generate a welcome message for { member . name } who has just joined the server. "
@ -178,7 +178,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
query , tokens = self . usage_service . count_tokens ( query )
)
welcome_message = str ( welcome_message_response [ " choices " ] [ 0 ] [ " text " ] )
except :
except Exception :
welcome_message = None
if not welcome_message :
@ -195,6 +195,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
@discord.Cog.listener ( )
async def on_ready ( self ) :
''' When ready to recieve data set debug channel and sync commands '''
self . debug_channel = self . bot . get_guild ( self . DEBUG_GUILD ) . get_channel (
self . DEBUG_CHANNEL
)
@ -209,10 +210,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
check_guilds = [ ] ,
delete_existing = True ,
)
print ( f " Commands synced " )
print ( " Commands synced " )
# TODO: add extra condition to check if multi is enabled for the thread, stated in conversation_threads
def check_conversing ( self , user_id , channel_id , message_content , multi = None ) :
def check_conversing ( self , channel_id , message_content ) :
''' given channel id and a message, return true if it ' s a conversation thread, false if not, or if the message starts with " ~ " '''
cond1 = channel_id in self . conversation_threads
# If the trimmed message starts with a Tilde, then we want to not contribute this to the conversation
try :
@ -226,6 +227,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
async def end_conversation (
self , ctx , opener_user_id = None , conversation_limit = False
) :
''' end the thread of the user interacting with the bot, if the conversation has reached the limit close it for the owner '''
normalized_user_id = opener_user_id if opener_user_id else ctx . author . id
if (
conversation_limit
@ -234,7 +236,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
else :
try :
channel_id = self . conversation_thread_owners [ normalized_user_id ]
except :
except Exception :
await ctx . delete ( delay = 5 )
await ctx . reply (
" Only the conversation starter can end this. " , delete_after = 5
@ -276,12 +278,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
thread = await self . bot . fetch_channel ( channel_id )
await thread . edit ( locked = True )
await thread . edit ( name = " Closed-GPT " )
except :
except Exception :
traceback . print_exc ( )
pass
except :
except Exception :
traceback . print_exc ( )
pass
else :
if normalized_user_id in self . conversation_thread_owners :
thread_id = self . conversation_thread_owners [ normalized_user_id ]
@ -292,11 +292,11 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
thread = await self . bot . fetch_channel ( thread_id )
await thread . edit ( locked = True )
await thread . edit ( name = " Closed-GPT " )
except :
except Exception :
traceback . print_exc ( )
pass
async def send_settings_text ( self , ctx ) :
''' compose and return the settings menu to the interacting user '''
embed = discord . Embed (
title = " GPT3Bot Settings " ,
description = " The current settings of the model " ,
@ -325,9 +325,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
) ,
inline = True ,
)
await ctx . respond ( embed = embed )
await ctx . respond ( embed = embed , ephemeral = True )
async def process_settings ( self , ctx , parameter , value ) :
''' Given a parameter and value set the corresponding parameter in storage to the value '''
# Check if the parameter is a valid parameter
if hasattr ( self . model , parameter ) :
@ -354,12 +355,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx . respond ( " The parameter is not a valid parameter " )
def generate_debug_message ( self , prompt , response ) :
''' create a debug message with a prompt and a response field '''
debug_message = " ---------------------------------------------------------------------------------- \n "
debug_message + = " Prompt: \n ``` \n " + prompt + " \n ``` \n "
debug_message + = " Response: \n ``` \n " + json . dumps ( response , indent = 4 ) + " \n ``` \n "
return debug_message
async def paginate_and_send ( self , response_text , ctx ) :
''' paginate a response to a text cutoff length and send it in chunks '''
from_context = isinstance ( ctx , discord . ApplicationContext )
response_text = [
@ -382,7 +385,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx . channel . send ( chunk )
async def paginate_embed ( self , response_text , codex , prompt = None , instruction = None ) :
''' Given a response text make embed pages and return a list of the pages. Codex makes it a codeblock in the embed '''
if codex : # clean codex input
response_text = response_text . replace ( " ``` " , " " )
response_text = response_text . replace ( f " ***Prompt: { prompt } *** \n " , " " )
@ -416,9 +419,11 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
return pages
async def queue_debug_message ( self , debug_message , debug_channel ) :
''' Put a message into the debug queue '''
await self . message_queue . put ( Message ( debug_message , debug_channel ) )
async def queue_debug_chunks ( self , debug_message , debug_channel ) :
''' Put a message as chunks into the debug queue '''
debug_message_chunks = [
debug_message [ i : i + self . TEXT_CUTOFF ]
for i in range ( 0 , len ( debug_message ) , self . TEXT_CUTOFF )
@ -445,6 +450,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await self . message_queue . put ( Message ( chunk , debug_channel ) )
async def send_debug_message ( self , debug_message , debug_channel ) :
''' process a debug message and put directly into queue or chunk it '''
# Send the debug message
try :
if len ( debug_message ) > self . TEXT_CUTOFF :
@ -458,6 +464,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
async def check_conversation_limit ( self , message ) :
''' Check if a conversation has reached the set limit and end it if it has '''
# After each response, check if the user has reached the conversation limit in terms of messages or time.
if message . channel . id in self . conversation_threads :
# If the user has reached the max conversation length, end the conversation
@ -471,6 +478,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await self . end_conversation ( message , conversation_limit = True )
async def summarize_conversation ( self , message , prompt ) :
''' Takes a conversation history filled prompt and summarizes it to then start a new history with it as the base '''
response = await self . model . send_summary_request ( prompt )
summarized_text = response [ " choices " ] [ 0 ] [ " text " ]
@ -502,6 +510,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# A listener for message edits to redo prompts if they are edited
@discord.Cog.listener ( )
async def on_message_edit ( self , before , after ) :
''' When a message is edited run moderation if enabled, and process if it a prompt that should be redone '''
if after . author . id == self . bot . user . id :
return
@ -524,11 +533,10 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
@discord.Cog.listener ( )
async def on_message ( self , message ) :
''' On a new message check if it should be moderated then process it for conversation '''
if message . author == self . bot . user :
return
content = message . content . strip ( )
# Moderations service is done here.
if (
hasattr ( message , " guild " )
@ -550,6 +558,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
original_message [ message . author . id ] = message . id
def cleanse_response ( self , response_text ) :
''' Cleans history tokens from response '''
response_text = response_text . replace ( " GPTie: \n " , " " )
response_text = response_text . replace ( " GPTie: " , " " )
response_text = response_text . replace ( " GPTie: " , " " )
@ -559,6 +568,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
def remove_awaiting (
self , author_id , channel_id , from_ask_command , from_edit_command
) :
''' Remove user from ask/edit command response wait, if not any of those then process the id to remove user from thread response wait '''
if author_id in self . awaiting_responses :
self . awaiting_responses . remove ( author_id )
if not from_ask_command and not from_edit_command :
@ -566,22 +576,23 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self . awaiting_thread_responses . remove ( channel_id )
async def mention_to_username ( self , ctx , message ) :
''' replaces discord mentions with their server nickname in text, if the user is not found keep the mention as is '''
if not discord . utils . raw_mentions ( message ) :
return message
else :
for mention in discord . utils . raw_mentions ( message ) :
try :
user = await discord . utils . get_or_fetch (
ctx . guild , " member " , mention
)
message = message . replace ( f " <@ { str ( mention ) } > " , user . display_name )
except :
pass
return message
for mention in discord . utils . raw_mentions ( message ) :
try :
user = await discord . utils . get_or_fetch (
ctx . guild , " member " , mention
)
message = message . replace ( f " <@ { str ( mention ) } > " , user . display_name )
except Exception :
pass
return message
# COMMANDS
async def help_command ( self , ctx ) :
''' Command handler. Generates a help message and sends it to the user '''
await ctx . defer ( )
embed = discord . Embed (
title = " GPT3Bot Help " , description = " The current commands " , color = 0xC730C7
@ -631,11 +642,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
embed . add_field ( name = " /help " , value = " See this help text " , inline = False )
await ctx . respond ( embed = embed )
await ctx . respond ( embed = embed , ephemeral = True )
async def set_usage_command (
self , ctx : discord . ApplicationContext , usage_amount : float
) :
''' Command handler. Sets the usage file to the given value '''
await ctx . defer ( )
# Attempt to convert the input usage value into a float
@ -643,26 +655,27 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
usage = float ( usage_amount )
await self . usage_service . set_usage ( usage )
await ctx . respond ( f " Set the usage to { usage } " )
except :
except Exception :
await ctx . respond ( " The usage value must be a valid float. " )
return
async def delete_all_conversation_threads_command (
self , ctx : discord . ApplicationContext
) :
''' Command handler. Deletes all threads made by the bot in the current guild '''
await ctx . defer ( )
for guild in self . bot . guilds :
for thread in guild . threads :
thread_name = thread . name . lower ( )
if " with gpt " in thread_name or " closed-gpt " in thread_name :
try :
await thread . delete ( )
except :
pass
await ctx . respond ( " All conversation threads have been deleted. " )
for thread in ctx . guild . threads :
thread_name = thread . name . lower ( )
if " with gpt " in thread_name or " closed-gpt " in thread_name :
try :
await thread . delete ( )
except Exception :
pass
await ctx . respond ( " All conversation threads in this server have been deleted. " )
async def usage_command ( self , ctx ) :
''' Command handler. Responds with the current usage of the bot '''
await ctx . defer ( )
embed = discord . Embed (
title = " GPT3Bot Usage " , description = " The current usage " , color = 0x00FF00
@ -690,6 +703,17 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
presence_penalty : float ,
from_action = None ,
) :
""" Command handler. Requests and returns a generation with no extras to the completion endpoint
Args :
ctx ( discord . ApplicationContext ) : Command interaction
prompt ( str ) : A prompt to use for generation
temperature ( float ) : Sets the temperature override
top_p ( float ) : Sets the top p override
frequency_penalty ( float ) : Sets the frequency penalty override
presence_penalty ( float ) : Sets the presence penalty override
from_action ( bool , optional ) : Enables ephemeral . Defaults to None .
"""
user = ctx . user
prompt = await self . mention_to_username ( ctx , prompt . strip ( ) )
@ -712,26 +736,36 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
presence_penalty_override = presence_penalty ,
from_ask_command = True ,
custom_api_key = user_api_key ,
from_action = prompt ,
from_action = from_action ,
)
async def edit_command (
self ,
ctx : discord . ApplicationContext ,
instruction : str ,
inpu t: str ,
tex t: str ,
temperature : float ,
top_p : float ,
codex : bool ,
) :
""" Command handler. Requests and returns a generation with no extras to the edit endpoint
Args :
ctx ( discord . ApplicationContext ) : Command interaction
instruction ( str ) : The modification instructions
text ( str ) : The text that should be modified
temperature ( float ) : Sets the temperature override
top_p ( float ) : Sets the top p override
codex ( bool ) : Enables the codex edit model
"""
user = ctx . user
input = await self . mention_to_username ( ctx , input . strip ( ) )
tex t = await self . mention_to_username ( ctx , tex t. strip ( ) )
instruction = await self . mention_to_username ( ctx , instruction . strip ( ) )
user_api_key = None
if USER_INPUT_API_KEYS :
user_api_key = await GPT3ComCon . get_user_api_key ( user . id , ctx )
user_api_key = await TextService . get_user_api_key ( user . id , ctx , USER_KEY_DB )
if not user_api_key :
return
@ -740,7 +774,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await TextService . encapsulated_send (
self ,
user . id ,
prompt = inpu t,
prompt = tex t,
ctx = ctx ,
temp_override = temperature ,
top_p_override = top_p ,
@ -751,6 +785,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
)
async def private_test_command ( self , ctx : discord . ApplicationContext ) :
''' Command handler. Creates a private thread in the current channel '''
await ctx . defer ( ephemeral = True )
await ctx . respond ( " Your private test thread " )
thread = await ctx . channel . create_thread (
@ -769,12 +804,22 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
private : bool ,
minimal : bool ,
) :
""" Command handler. Starts a conversation with the bot
Args :
ctx ( discord . ApplicationContext ) : Command interaction
opener ( str ) : The first prompt to send in the conversation
opener_file ( str ) : A . txt or . json file which is appended before the opener
private ( bool ) : If the thread should be private
minimal ( bool ) : If a minimal starter should be used
"""
user = ctx . user
# If we are in user input api keys mode, check if the user has entered their api key before letting them continue
user_api_key = None
if USER_INPUT_API_KEYS :
user_api_key = await GPT3ComCon . get_user_api_key ( user . id , ctx )
user_api_key = await TextService . get_user_api_key ( user . id , ctx , USER_KEY_DB )
if not user_api_key :
return
@ -784,7 +829,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await ctx . defer ( )
if user . id in self . conversation_thread_owners :
message = await ctx . respond (
await ctx . respond (
" You ' ve already created a thread, end it before creating a new one " ,
delete_after = 5 ,
)
@ -852,12 +897,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
opener_file . get ( " text " , " error getting text " )
+ opener
)
except : # Parse as just regular text
except Exception : # Parse as just regular text
if not opener :
opener = opener_file
else :
opener = opener_file + opener
except :
except Exception :
opener_file = None # Just start a regular thread if the file fails to load
# Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later
@ -876,7 +921,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
await thread . send (
f " <@ { str ( user_id_normalized ) } > You are now conversing with GPT3. *Say hi to start!* \n "
f " Overrides for this thread is **temp= { overrides [ ' temperature ' ] } **, **top_p= { overrides [ ' top_p ' ] } **, **frequency penalty= { overrides [ ' frequency_penalty ' ] } **, **presence penalty= { overrides [ ' presence_penalty ' ] } ** \n "
f " Overrides for this thread is **temp= { overrides [ ' temperature ' ] } **, **top_p= { overrides [ ' top_p ' ] } ** "
f " , **frequency penalty= { overrides [ ' frequency_penalty ' ] } **, **presence penalty= { overrides [ ' presence_penalty ' ] } ** \n "
f " The model used is ** { self . conversation_threads [ thread . id ] . model } ** \n "
f " End the conversation by saying `end`. \n \n "
f " If you want GPT3 to ignore your messages, start your messages with `~` \n \n "
@ -921,11 +967,12 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self . awaiting_thread_responses . remove ( thread . id )
async def end_command ( self , ctx : discord . ApplicationContext ) :
''' Command handler. Gets the user ' s thread and ends it '''
await ctx . defer ( ephemeral = True )
user_id = ctx . user . id
try :
thread_id = self . conversation_thread_owners [ user_id ]
except :
except Exception :
await ctx . respond (
" You haven ' t started any conversations " , ephemeral = True , delete_after = 10
)
@ -936,13 +983,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
except Exception as e :
print ( e )
traceback . print_exc ( )
pass
else :
await ctx . respond (
" You ' re not in any conversations " , ephemeral = True , delete_after = 10
)
async def setup_command ( self , ctx : discord . ApplicationContext ) :
''' Command handler. Opens the setup modal '''
if not USER_INPUT_API_KEYS :
await ctx . respond (
" This server doesn ' t support user input API keys. " ,
@ -956,6 +1003,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
async def settings_command (
self , ctx : discord . ApplicationContext , parameter : str = None , value : str = None
) :
''' Command handler. Returns current settings or sets new values '''
await ctx . defer ( )
if parameter is None and value is None :
await self . send_settings_text ( ctx )
@ -976,13 +1024,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Otherwise, process the settings change
await self . process_settings ( ctx , parameter , value )
"""
Text - based context menu commands from here
"""
#
#Text-based context menu commands from here
#
async def ask_gpt_action (
self , ctx , message : discord . Message
) : # message commands return the message
) :
''' Message command. Return the message '''
await self . ask_command (
ctx , message . content , None , None , None , None , from_action = message . content
)