Merge pull request #95 from Kav-K/api-backoff

api backoff
Kaveen Kumarasinghe 1 year ago committed by GitHub
commit fc1a5c5c32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -8,9 +8,10 @@
[![GitHub license](https://img.shields.io/github/license/Kav-K/GPT3Discord)](https://github.com/Kav-K/GPT3Discord/blob/master/LICENSE)
[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com)
# Overview
A robust, all-in-one GPT3 interface for Discord. Chat just like ChatGPT right inside Discord! Generate beautiful AI art using DALL-E 2! Automatically moderate your server using AI! A thorough integration with permanent conversation memory, automatic request retry, fault tolerance and reliability for servers of any scale, and much more.
SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the bot here, it is for setup support ONLY)
# Screenshots
<p align="center">
@ -22,6 +23,9 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
</p>
# Recent Notable Updates
- **Automatic retry on API errors** - The bot will automatically retry API requests if they fail due to some issue with OpenAI's APIs, this is becoming increasingly important now as their APIs become under heavy load.
- **Allow each individual user to enter their own API Key!** - Each request that a user makes will be made using their own API key! Check out the User-Input API Key section in this README for more details.
@ -34,9 +38,6 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **AI-BASED SERVER MODERATION** - GPT3Discord now has a built-in AI-based moderation system that can automatically detect and remove toxic messages from your server. This is a great way to keep your server safe and clean, and it's completely automatic and **free**! Check out the commands section to learn how to enable it!
- **AUTOMATIC CHAT SUMMARIZATION!** - When the context limit of a conversation is reached, the bot will use GPT3 itself to summarize the conversation to reduce the tokens, and continue conversing with you, this allows you to chat for a long time!
# Features
- **Directly prompt GPT3 with `/gpt ask <prompt>`**
@ -50,6 +51,8 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can NOT use the
- **Automatic AI-Based Server Moderation** - Moderate your server automatically with AI!
- **Auto-retry on API errors** - Automatically resend failed requests to OpenAI's APIs!
- Automatically re-send your prompt and update the response in place if you edit your original prompt!
- Async and fault tolerant, **can handle hundreds of users at once**, if the upstream API permits!

@ -4,6 +4,7 @@ import tempfile
import traceback
from io import BytesIO
import aiohttp
import discord
from PIL import Image
from pycord.multicog import add_to_group
@ -60,18 +61,21 @@ class DrawDallEService(discord.Cog, name="DrawDallEService"):
vary=vary if not draw_from_optimizer else None,
custom_api_key=custom_api_key,
)
except ValueError as e:
(
await ctx.channel.send(
f"Error: {e}. Please try again with a different prompt."
)
if not from_context
else await ctx.respond(
f"Error: {e}. Please try again with a different prompt."
)
# Error catching for API errors
except aiohttp.ClientResponseError as e:
message = f"The API returned an invalid response: **{e.status}: {e.message}**"
await ctx.channel.send(message) if not from_context else await ctx.respond(
message
)
return
except ValueError as e:
message = f"Error: {e}. Please try again with a different prompt."
await ctx.channel.send( message )if not from_context else await ctx.respond( message )
return
# Start building an embed to send to the user with the results of the image generation
embed = discord.Embed(
title="Image Generation Results"

@ -9,6 +9,8 @@ from pathlib import Path
import aiofiles
import json
import aiohttp
import discord
from pycord.multicog import add_to_group
@ -826,6 +828,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
response_text = response_text.replace("<|endofstatement|>", "")
return response_text
def remove_awaiting(self, author_id, channel_id, from_g_command):
if author_id in self.awaiting_responses:
self.awaiting_responses.remove(author_id)
if not from_g_command:
if channel_id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(channel_id)
async def mention_to_username(self, ctx, message):
if not discord.utils.raw_mentions(message):
return message
@ -1148,31 +1157,33 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
# Error catching for AIOHTTP Errors
except aiohttp.ClientResponseError as e:
message = f"The API returned an invalid response: **{e.status}: {e.message}**"
if from_context:
await ctx.send_followup(message)
else:
await ctx.reply(message)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command)
# Error catching for OpenAI model value errors
except ValueError as e:
if from_context:
await ctx.send_followup(e)
else:
await ctx.reply(e)
if ctx.author.id in self.awaiting_responses:
self.awaiting_responses.remove(ctx.author.id)
if not from_g_command:
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command)
# General catch case for everything
except Exception:
message = "Something went wrong, please try again later. This may be due to upstream issues on the API, or rate limiting."
await ctx.send_followup(message) if from_context else await ctx.reply(
message
)
if ctx.author.id in self.awaiting_responses:
self.awaiting_responses.remove(ctx.author.id)
if not from_g_command:
if ctx.channel.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(ctx.channel.id)
self.remove_awaiting(ctx.author.id, ctx.channel.id, from_g_command)
traceback.print_exc()
try:
@ -1786,6 +1797,15 @@ class SetupModal(discord.ui.Modal):
ephemeral=True,
delete_after=10,
)
except aiohttp.ClientResponseError as e:
await interaction.response.send_message(
f"The API returned an invalid response: **{e.status}: {e.message}**",
ephemeral=True,
delete_after=30,
)
return
except Exception as e:
await interaction.response.send_message(
f"Your API key looks invalid, the API returned: {e}. Please check that your API key is correct before proceeding",

@ -24,7 +24,7 @@ from models.openai_model import Model
from models.usage_service_model import UsageService
from models.env_service_model import EnvService
__version__ = "5.3.2"
__version__ = "5.4"
"""
The pinecone service is used to store and retrieve conversation embeddings.

@ -8,11 +8,13 @@ import uuid
from typing import Tuple, List, Any
import aiohttp
import backoff
import discord
# An enum of two modes, TOP_P or TEMPERATURE
import requests
from PIL import Image
from aiohttp import RequestInfo
from discord import File
@ -341,6 +343,10 @@ class Model:
)
self._prompt_min_length = value
def backoff_handler(details):
print (f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].status}: {details['exception'].message}")
async def valid_text_request(self, response):
try:
tokens_used = int(response["usage"]["total_tokens"])
@ -351,8 +357,9 @@ class Model:
+ str(response["error"]["message"])
)
@backoff.on_exception(backoff.expo, aiohttp.ClientResponseError, factor=3, base=5, max_tries=4, on_backoff=backoff_handler)
async def send_embedding_request(self, text, custom_api_key=None):
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": Models.EMBEDDINGS,
"input": text,
@ -368,14 +375,15 @@ class Model:
try:
return response["data"][0]["embedding"]
except Exception as e:
except Exception:
print(response)
traceback.print_exc()
return
@backoff.on_exception(backoff.expo, aiohttp.ClientResponseError, factor=3, base=5, max_tries=6, on_backoff=backoff_handler)
async def send_moderations_request(self, text):
# Use aiohttp to send the above request:
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key}",
@ -388,6 +396,7 @@ class Model:
) as response:
return await response.json()
@backoff.on_exception(backoff.expo, aiohttp.ClientResponseError, factor=3, base=5, max_tries=4, on_backoff=backoff_handler)
async def send_summary_request(self, prompt, custom_api_key=None):
"""
Sends a summary request to the OpenAI API
@ -403,7 +412,7 @@ class Model:
tokens = self.usage_service.count_tokens(summary_request_text)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,
@ -429,6 +438,7 @@ class Model:
return response
@backoff.on_exception(backoff.expo, aiohttp.ClientResponseError, factor=3, base=5, max_tries=4, on_backoff=backoff_handler)
async def send_request(
self,
prompt,
@ -457,7 +467,7 @@ class Model:
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"model": self.model,
"prompt": prompt,
@ -511,6 +521,7 @@ class Model:
return response
@backoff.on_exception(backoff.expo, aiohttp.ClientResponseError, factor=3, base=5, max_tries=4, on_backoff=backoff_handler)
async def send_image_request(
self, ctx, prompt, vary=None, custom_api_key=None
) -> tuple[File, list[Any]]:
@ -533,15 +544,16 @@ class Model:
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
async with session.post(
"https://api.openai.com/v1/images/generations",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
else:
async with aiohttp.ClientSession() as session:
async with aiohttp.ClientSession(raise_for_status=True) as session:
data = aiohttp.FormData()
data.add_field("n", str(self.num_images))
data.add_field("size", self.image_size)
@ -559,7 +571,7 @@ class Model:
) as resp:
response = await resp.json()
# print(response)
print(response)
image_urls = []
for result in response["data"]:

Loading…
Cancel
Save