Merge branch 'Kav-K:main' into main

Cooper Lees 2 years ago committed by GitHub
commit 57e863167d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,13 @@
# These are supported funding model platforms
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: https://paypal.me/kaveenkk9

@ -10,11 +10,16 @@
- **AUTOMATIC CHAT SUMMARIZATION!** - When the context limit of a conversation is reached, the bot will use GPT3 itself to summarize the conversation to reduce the tokens, and continue conversing with you, this allows you to chat for a long time!
- **DALL-E Image Generation**
- **PERMANENT MEMORY FOR CONVERSATIONS COMING SOON USING EMBEDDINGS!**
- **Image prompt optimizer overhauled** - The optimizer works much better now, and makes beautiful image prompts that work even with Midjourney, SD, etc!
- **REDO ON EDIT** - When you edit a prompt, it will automatically be resent to GPT3 and the response updated!
- **Fully async!** - The bot will never be blocked when processing someone else's request, allowing for use in large servers with multiple messages per second!
- **Fully async and fault tolerant - REVAMPED** - The bot will never be blocked when processing someone else's request, allowing for use in large servers with multiple messages per second!
- No need for the OpenAI and Asgiref libraries anymore!
# Features
- **Directly prompt GPT3 with `!g <prompt>`**
@ -72,6 +77,8 @@ First, you want to get a server, for this guide, I will be using DigitalOcean as
For instructions on how to get a server from start to finish, they are available on DigitalOcean's website directly from the community, available here: https://www.digitalocean.com/community/tutorials/how-to-set-up-an-ubuntu-20-04-server-on-a-digitalocean-droplet. Ignore the part about setting up an "ssh key", and just use a password instead.
**Please sign up for a DigitalOcean account using my referral link if you'd like to support me https://m.do.co/c/e31eff1231a4**
After you set up the server, the DigitalOcean GUI will give you an IP address, copy this IP address. Afterwards, you will need to SSH into the server. This can be done using a program such as "PuTTy", or by using your commandline, if it's supported. To login to the server, your username will be "root", your password will be the password that you defined earlier when setting up the droplet, and the IP address will be the IP address you copied after the droplet was finished creation.
To connect with ssh, run the following command in terminal:

@ -96,7 +96,9 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"):
message = await response_message.edit(
embed=embed,
file=file,
view=SaveView(image_urls, self, self.converser_cog),
)
await message.edit(
view=SaveView(image_urls, self, self.converser_cog, message)
)
else: # Varying case
if not draw_from_optimizer:
@ -104,8 +106,13 @@ class DrawDallEService(commands.Cog, name="DrawDallEService"):
content="Image variation completed!",
embed=embed,
file=file,
view=SaveView(image_urls, self, self.converser_cog, True),
)
await result_message.edit(
view=SaveView(
image_urls, self, self.converser_cog, result_message, True
)
)
redo_users[message.author.id] = RedoUser(
prompt, message, result_message
)
@ -214,7 +221,7 @@ class SaveView(discord.ui.View):
self, image_urls, cog, converser_cog, message, no_retry=False, only_save=None
):
super().__init__(
timeout=10 if not only_save else None
timeout=3600 if not only_save else None
) # 10 minute timeout for Retry, Save
self.image_urls = image_urls
self.cog = cog

@ -51,13 +51,14 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
"that'll be all",
]
self.last_used = {}
self.GLOBAL_COOLDOWN_TIME = 1
self.GLOBAL_COOLDOWN_TIME = 0.25
self.usage_service = usage_service
self.model = model
self.summarize = self.model.summarize_conversations
self.deletion_queue = deletion_queue
self.users_to_interactions = defaultdict(list)
self.redo_users = {}
self.awaiting_responses = []
try:
conversation_file_path = data_path / "conversation_starter_pretext.txt"
@ -199,6 +200,16 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
value="Change the parameter of the model named by <model parameter> to new value <value>",
inline=False,
)
embed.add_field(
name="!draw <image prompt>",
value="Use DALL-E2 to draw an image based on a text prompt",
inline=False,
)
embed.add_field(
name="!imgoptimize <image prompt>",
value="Optimize an image prompt for use with DALL-E2, Midjourney, SD, etc.",
inline=False,
)
embed.add_field(name="!g", value="See this help text", inline=False)
await message.channel.send(embed=embed)
@ -442,6 +453,9 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
)
self.check_conversing(message)
# We got a response, we can allow the user to type again
self.awaiting_responses.remove(message.author.id)
# If the response text is > 3500 characters, paginate and send
debug_message = self.generate_debug_message(prompt, response)
@ -462,9 +476,6 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
self.redo_users[message.author.id].add_interaction(
response_message.id
)
print(
f"Added the interaction {response_message.id} to the redo user {message.author.id}"
)
original_message[message.author.id] = message.id
else:
# We have response_text available, this is the original message that we want to edit
@ -606,13 +617,15 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
if prompt == "converse" or prompt == "converse nothread":
# If the user is already conversating, don't let them start another conversation
if message.author.id in self.conversating_users:
await message.reply(
message = await message.reply(
"You are already conversating with GPT3. End the conversation with !g end or just say 'end' in a supported channel"
)
await self.deletion_queue(message)
return
# If the user is not already conversating, start a conversation with GPT3
self.conversating_users[message.author.id] = User(message.author.id)
# Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later
self.conversating_users[message.author.id].history.append(
self.CONVERSATION_STARTER_TEXT
@ -657,6 +670,28 @@ class GPT3ComCon(commands.Cog, name="GPT3ComCon"):
# history to the prompt. We can do this by checking if the user is in the conversating_users dictionary, and if they are,
# we can append their history to the prompt.
if message.author.id in self.conversating_users:
# Since this is async, we don't want to allow the user to send another prompt while a conversation
# prompt is processing, that'll mess up the conversation history!
if message.author.id in self.awaiting_responses:
message = await message.reply(
"You are already waiting for a response from GPT3. Please wait for it to respond before sending another message."
)
# get the current date, add 10 seconds to it, and then turn it into a timestamp.
# we need to use our deletion service because this isn't an interaction, it's a regular message.
deletion_time = datetime.datetime.now() + datetime.timedelta(
seconds=10
)
deletion_time = deletion_time.timestamp()
deletion_message = Deletion(message, deletion_time)
await self.deletion_queue.put(deletion_message)
return
self.awaiting_responses.append(message.author.id)
self.conversating_users[message.author.id].history.append(
"\nHuman: " + prompt + "<|endofstatement|>\n"
)

@ -2,6 +2,8 @@ import asyncio
import traceback
from datetime import datetime
import discord
class Deletion:
def __init__(self, message, timestamp):
@ -26,7 +28,11 @@ class Deletion:
# Check if the current timestamp is greater than the deletion timestamp
if datetime.now().timestamp() > deletion.timestamp:
# If the deletion timestamp has passed, delete the message
await deletion.message.delete_original_response()
# check if deletion.message is of type discord.Message
if isinstance(deletion.message, discord.Message):
await deletion.message.delete()
else:
await deletion.message.delete_original_response()
else:
await deletion_queue.put(deletion)

@ -4,14 +4,13 @@ import tempfile
import uuid
from typing import Tuple, List, Any
import aiohttp
import discord
import openai
# An enum of two modes, TOP_P or TEMPERATURE
import requests
from PIL import Image
from discord import File
from asgiref.sync import sync_to_async
class Mode:
@ -72,7 +71,7 @@ class Model:
"model_max_tokens",
]
openai.api_key = os.getenv("OPENAI_TOKEN")
self.openai_key = os.getenv("OPENAI_TOKEN")
# Use the @property and @setter decorators for all the self fields to provide value checking
@property
@ -305,22 +304,31 @@ class Model:
tokens = self.usage_service.count_tokens(summary_request_text)
response = await sync_to_async(openai.Completion.create)(
model=Models.DAVINCI,
prompt=summary_request_text,
temperature=0.5,
top_p=1,
max_tokens=self.max_tokens - tokens,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
best_of=self.best_of,
)
print(response["choices"][0]["text"])
tokens_used = int(response["usage"]["total_tokens"])
self.usage_service.update_usage(tokens_used)
return response
async with aiohttp.ClientSession() as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,
"temperature": 0.5,
"top_p": 1,
"max_tokens": self.max_tokens - tokens,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"best_of": self.best_of,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key}",
}
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
print(response["choices"][0]["text"])
tokens_used = int(response["usage"]["total_tokens"])
self.usage_service.update_usage(tokens_used)
return response
async def send_request(
self,
@ -347,31 +355,36 @@ class Model:
print("The prompt about to be sent is " + prompt)
response = await sync_to_async(openai.Completion.create)(
model=Models.DAVINCI
if any(role.name in self.DAVINCI_ROLES for role in message.author.roles)
else self.model, # Davinci override for admin users
prompt=prompt,
temperature=self.temp if not temp_override else temp_override,
top_p=self.top_p if not top_p_override else top_p_override,
max_tokens=self.max_tokens - tokens
if not max_tokens_override
else max_tokens_override,
presence_penalty=self.presence_penalty
if not presence_penalty_override
else presence_penalty_override,
frequency_penalty=self.frequency_penalty
if not frequency_penalty_override
else frequency_penalty_override,
best_of=self.best_of if not best_of_override else best_of_override,
)
# print(response.__dict__)
# Parse the total tokens used for this request and response pair from the response
tokens_used = int(response["usage"]["total_tokens"])
self.usage_service.update_usage(tokens_used)
return response
async with aiohttp.ClientSession() as session:
payload = {
"model": Models.DAVINCI
if any(role.name in self.DAVINCI_ROLES for role in message.author.roles)
else self.model,
"prompt": prompt,
"temperature": self.temp if not temp_override else temp_override,
"top_p": self.top_p if not top_p_override else top_p_override,
"max_tokens": self.max_tokens - tokens
if not max_tokens_override
else max_tokens_override,
"presence_penalty": self.presence_penalty
if not presence_penalty_override
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if not frequency_penalty_override
else frequency_penalty_override,
"best_of": self.best_of if not best_of_override else best_of_override,
}
headers = {"Authorization": f"Bearer {self.openai_key}"}
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
print(response)
# Parse the total tokens used for this request and response pair from the response
tokens_used = int(response["usage"]["total_tokens"])
self.usage_service.update_usage(tokens_used)
return response
async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]:
# Validate that all the parameters are in a good state before we send the request
@ -385,20 +398,41 @@ class Model:
# print("The prompt about to be sent is " + prompt)
self.usage_service.update_usage_image(self.image_size)
response = None
if not vary:
response = await sync_to_async(openai.Image.create)(
prompt=prompt,
n=self.num_images,
size=self.image_size,
)
payload = {"prompt": prompt, "n": self.num_images, "size": self.image_size}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key}",
}
async with aiohttp.ClientSession() as session:
async with session.post(
"https://api.openai.com/v1/images/generations",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
else:
response = await sync_to_async(openai.Image.create_variation)(
image=open(vary, "rb"),
n=self.num_images,
size=self.image_size,
)
print(response.__dict__)
async with aiohttp.ClientSession() as session:
data = aiohttp.FormData()
data.add_field("n", str(self.num_images))
data.add_field("size", self.image_size)
with open(vary, "rb") as f:
data.add_field(
"image", f, filename="file.png", content_type="image/png"
)
async with session.post(
"https://api.openai.com/v1/images/variations",
headers={
"Authorization": "Bearer " + self.openai_key,
},
data=data,
) as resp:
response = await resp.json()
print(response)
image_urls = []
for result in response["data"]:

@ -1,7 +1,5 @@
py-cord==2.3.2
openai==0.25.0
Pillow==9.3.0
python-dotenv==0.21.0
requests==2.28.1
transformers==4.25.1
asgiref==3.6.0
transformers==4.25.1
Loading…
Cancel
Save