Merge remote-tracking branch 'Remote_Origin/main' into edit-endpoint

Rene Teigen 1 year ago
commit baa37f65fd

@ -8,9 +8,10 @@
[![GitHub license](https://img.shields.io/github/license/Kav-K/GPT3Discord)](https://github.com/Kav-K/GPT3Discord/blob/master/LICENSE)
[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com)
# Overview
A robust, all-in-one GPT3 interface for Discord. Chat just like ChatGPT right inside Discord! Generate beautiful AI art using DALL-E 2! Automatically moderate your server using AI! A thorough integration with permanent conversation memory, automatic request retry, fault tolerance and reliability for servers of any scale, and much more.
SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the bot here, it is for setup support ONLY)
# Screenshots
<p align="center">
@ -22,6 +23,9 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
</p>
# Recent Notable Updates
- **Automatic retry on API errors** - The bot will automatically retry API requests if they fail due to some issue with OpenAI's APIs, this is becoming increasingly important now as their APIs become under heavy load.
- **Allow each individual user to enter their own API Key!** - Each request that a user makes will be made using their own API key! Check out the User-Input API Key section in this README for more details.
@ -34,9 +38,6 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **AI-BASED SERVER MODERATION** - GPT3Discord now has a built-in AI-based moderation system that can automatically detect and remove toxic messages from your server. This is a great way to keep your server safe and clean, and it's completely automatic and **free**! Check out the commands section to learn how to enable it!
- **AUTOMATIC CHAT SUMMARIZATION!** - When the context limit of a conversation is reached, the bot will use GPT3 itself to summarize the conversation to reduce the tokens, and continue conversing with you, this allows you to chat for a long time!
# Features
- **Directly prompt GPT3 with `/gpt ask <prompt>`**
@ -50,6 +51,8 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **Automatic AI-Based Server Moderation** - Moderate your server automatically with AI!
- **Auto-retry on API errors** - Automatically resend failed requests to OpenAI's APIs!
- Automatically re-send your prompt and update the response in place if you edit your original prompt!
- Async and fault tolerant, **can handle hundreds of users at once**, if the upstream API permits!
@ -127,6 +130,10 @@ These commands are grouped, so each group has a prefix but you can easily tab co
- The bot uses numerical thresholds to determine whether a message is toxic or not, and I have manually tested and fine tuned these thresholds to a point that I think is good, please open an issue if you have any suggestions for the thresholds!
- There are two thresholds for the bot, there are instances in which the bot will outright delete a message and an instance where the bot will send a message to the alert channel notifying admins and giving them quick options to delete and timeout the user (check out the screenshots at the beginning of the README to see this).
If you'd like to help us test and fine tune our thresholds for the moderation service, please join this test server: https://discord.gg/CWhsSgNdrP. You can let off some steam in a controlled environment ;)
** The above server is NOT for support or discussions about GPT3Discord **
# Permanent Memory
Permanent memory has now been implemented into the bot, using the OpenAI Ada embeddings endpoint, and Pinecone DB.

@ -4,6 +4,7 @@ import tempfile
import traceback
from io import BytesIO
import aiohttp
import discord
from PIL import Image
from pycord.multicog import add_to_group
@ -60,16 +61,23 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
vary=vary if not draw_from_optimizer else None,
custom_api_key=custom_api_key,
)
# Error catching for API errors
except aiohttp.ClientResponseError as e:
message = (
f"The API returned an invalid response: **{e.status}: {e.message}**"
)
await ctx.channel.send(message) if not from_context else await ctx.respond(
message
)
return
except ValueError as e:
(
await ctx.channel.send(
f"Error: {e}. Please try again with a different prompt."
)
if not from_context
else await ctx.respond(
f"Error: {e}. Please try again with a different prompt."
)
message = f"Error: {e}. Please try again with a different prompt."
await ctx.channel.send(message) if not from_context else await ctx.respond(
message
)
return
# Start building an embed to send to the user with the results of the image generation

@ -9,6 +9,8 @@ from pathlib import Path
import aiofiles
import json
import aiohttp
import discord
from pycord.multicog import add_to_group
@ -833,6 +835,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
response_text = response_text.replace("<|endofstatement|>", "")
return response_text
def remove_awaiting(self, author_id, channel_id, from_g_command, from_edit_command):
if author_id in self.awaiting_responses:
self.awaiting_responses.remove(author_id)
if not from_g_command and not from_edit_command:
if channel_id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(channel_id)
async def mention_to_username(self, ctx, message):
if not discord.utils.raw_mentions(message):
return message
@ -1182,31 +1191,33 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
# Error catching for AIOHTTP Errors
except aiohttp.ClientResponseError as e:
message = (
f"The API returned an invalid response: **{e.status}: {e.message}**"
)
if from_context:
await ctx.send_followup(message)
else:
await ctx.reply(message)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command, from_edit_command)
# Error catching for OpenAI model value errors
except ValueError as e:
if from_context:
await ctx.send_followup(e)
else:
await ctx.reply(e)
if ctx.author.id in self.awaiting_responses:
self.awaiting_responses.remove(ctx.author.id)
if not from_g_command and not from_edit_command:
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command, from_edit_command)
# General catch case for everything
except Exception:
message = "Something went wrong, please try again later. This may be due to upstream issues on the API, or rate limiting."
await ctx.send_followup(message) if from_context else await ctx.reply(
message
)
if ctx.author.id in self.awaiting_responses:
self.awaiting_responses.remove(ctx.author.id)
if not from_g_command:
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command, from_edit_command)
traceback.print_exc()
try:
@ -1355,6 +1366,23 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
custom_api_key=user_api_key,
)
@discord.slash_command(
name="private-test",
description="Private thread for testing. Only visible to you and server admins.",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def private_test(self, ctx: discord.ApplicationContext):
await ctx.defer(ephemeral=True)
await ctx.respond("Your private test thread")
thread = await ctx.channel.create_thread(
name=ctx.user.name + "'s private test conversation",
auto_archive_duration=60,
)
await thread.send(
f"<@{str(ctx.user.id)}> This is a private thread for testing. Only you and server admins can see this thread."
)
@add_to_group("gpt")
@discord.slash_command(
name="converse",
@ -1870,6 +1898,15 @@ class SetupModal(discord.ui.Modal):
ephemeral=True,
delete_after=10,
)
except aiohttp.ClientResponseError as e:
await interaction.response.send_message(
f"The API returned an invalid response: **{e.status}: {e.message}**",
ephemeral=True,
delete_after=30,
)
return
except Exception as e:
await interaction.response.send_message(
f"Your API key looks invalid, the API returned: {e}. Please check that your API key is correct before proceeding",

@ -75,7 +75,7 @@ class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"):
final_prompt = self.OPTIMIZER_PRETEXT
# replace mentions with nicknames for the prompt
final_prompt += await self.converser_cog.replace_mention(ctx, prompt)
final_prompt += await self.converser_cog.mention_to_username(ctx, prompt)
# If the prompt doesn't end in a period, terminate it.
if not final_prompt.endswith("."):

@ -24,7 +24,7 @@ from models.openai_model import Model
from models.usage_service_model import UsageService
from models.env_service_model import EnvService
__version__ = "5.3"
__version__ = "5.4"
"""
The pinecone service is used to store and retrieve conversation embeddings.

@ -120,8 +120,10 @@ class Moderation:
@staticmethod
def determine_moderation_result(text, response):
warn_set = ThresholdSet(0.005, 0.05, 0.05, 0.91, 0.1, 0.04, 0.1)
delete_set = ThresholdSet(0.1, 0.1, 0.1, 0.95, 0.03, 0.6, 0.4)
# warn_set = ThresholdSet(0.005, 0.05, 0.05, 0.91, 0.1, 0.04, 0.1)
# delete_set = ThresholdSet(0.26, 0.26, 0.1, 0.95, 0.03, 0.85, 0.4)
warn_set = ThresholdSet(0.01, 0.05, 0.05, 0.91, 0.1, 0.45, 0.1)
delete_set = ThresholdSet(0.26, 0.26, 0.1, 0.95, 0.03, 0.85, 0.4)
warn_result, flagged_warn = warn_set.moderate(text, response)
delete_result, flagged_delete = delete_set.moderate(text, response)
@ -188,6 +190,12 @@ class Moderation:
to_moderate.message
),
)
# Attempt to react to the to_moderate.message with a warning icon
try:
await to_moderate.message.add_reaction("⚠️")
except discord.errors.Forbidden:
pass
await response_message.edit(
view=ModerationAdminView(
to_moderate.message, response_message

@ -8,11 +8,13 @@ import uuid
from typing import Tuple, List, Any
import aiohttp
import backoff
import discord
# An enum of two modes, TOP_P or TEMPERATURE
import requests
from PIL import Image
from aiohttp import RequestInfo
from discord import File
@ -343,6 +345,12 @@ class Model:
)
self._prompt_min_length = value
def backoff_handler(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].status}: {details['exception'].message}"
)
async def valid_text_request(self, response):
try:
tokens_used = int(response["usage"]["total_tokens"])
@ -353,8 +361,16 @@ class Model:
+ str(response["error"]["message"])
)
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
)
async def send_embedding_request(self, text, custom_api_key=None):
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": Models.EMBEDDINGS,
"input": text,
@ -370,11 +386,19 @@ class Model:
try:
return response["data"][0]["embedding"]
except Exception as e:
except Exception:
print(response)
traceback.print_exc()
return
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=6,
on_backoff=backoff_handler,
)
async def send_edit_request(self, instruction, input=None, temp_override=None, top_p_override=None, codex=False, custom_api_key=None):
# Validate that all the parameters are in a good state before we send the request
@ -390,7 +414,7 @@ class Model:
f"Overrides -> temp:{temp_override}, top_p:{top_p_override}"
)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": Models.EDIT if codex is False else Models.CODE_EDIT,
"input": "" if input is None else input,
@ -409,9 +433,17 @@ class Model:
await self.valid_text_request(response)
return response
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=6,
on_backoff=backoff_handler,
)
async def send_moderations_request(self, text):
# Use aiohttp to send the above request:
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key}",
@ -424,6 +456,14 @@ class Model:
) as response:
return await response.json()
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
)
async def send_summary_request(self, prompt, custom_api_key=None):
"""
Sends a summary request to the OpenAI API
@ -439,7 +479,7 @@ class Model:
tokens = self.usage_service.count_tokens(summary_request_text)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,
@ -465,6 +505,14 @@ class Model:
return response
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
)
async def send_request(
self,
prompt,
@ -494,7 +542,7 @@ class Model:
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": self.model if model is None else model,
"prompt": prompt,
@ -548,6 +596,14 @@ class Model:
return response
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
)
async def send_image_request(
self, ctx, prompt, vary=None, custom_api_key=None
) -> tuple[File, list[Any]]:
@ -570,15 +626,16 @@ class Model:
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with session.post(
"https://api.openai.com/v1/images/generations",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
else:
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
data = aiohttp.FormData()
data.add_field("n", str(self.num_images))
data.add_field("size", self.image_size)
@ -596,7 +653,7 @@ class Model:
) as resp:
response = await resp.json()
# print(response)
print(response)
image_urls = []
for result in response["data"]:

Loading…
Cancel
Save