ShareGPT Integration

Kaveen Kumarasinghe 1 year ago
parent dba1f934a5
commit 48b31d8d05

@ -9,6 +9,7 @@ import aiofiles
import json
import discord
from discord import ClientUser
from models.deepl_model import TranslationModel
from models.embed_statics_model import EmbedStatics
@ -20,6 +21,7 @@ from models.user_model import Thread, EmbeddedConversationItem
from collections import defaultdict
from sqlitedict import SqliteDict
from services.sharegpt_service import ShareGPTService
from services.text_service import SetupModal, TextService
original_message = {}
@ -99,11 +101,15 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
self.awaiting_responses = []
self.awaiting_thread_responses = []
self.conversation_threads = {}
self.full_conversation_history = defaultdict(list)
self.summarize = self.model.summarize_conversations
# Pinecone data
self.pinecone_service = pinecone_service
# Sharing service
self.sharegpt_service = ShareGPTService()
try:
conversation_file_path = EnvService.find_shared_file(
"conversation_starter_pretext.txt"
@ -242,6 +248,14 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
"This is not a conversation thread that you own!",
delete_after=5,
)
return
if normalized_user_id in self.awaiting_responses:
await ctx.reply(embed=discord.Embed(
title=f"Please wait for a response before ending the conversation.",
color=0x808080,
))
return
except Exception:
traceback.print_exc()
@ -277,7 +291,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
delete_after=10,
)
await ctx.channel.send(embed=EmbedStatics.generate_end_embed())
await ctx.channel.send(embed=EmbedStatics.generate_end_embed(), view=ShareView(self, ctx.channel.id))
# Close all conversation threads for the user
# If at conversation limit then fetch the owner and close the thread for them
@ -621,7 +635,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
def cleanse_response(self, response_text):
"""Cleans history tokens from response"""
response_text = response_text.replace("GPTie:\n", "")
response_text = response_text.replace("<yourname>:", "")
response_text = response_text.replace("You:", "")
response_text = response_text.replace(BOT_NAME.replace(" ", ""), "")
response_text = response_text.replace(BOT_NAME, "")
response_text = response_text.replace("<|endofstatement|>", "")
@ -1273,3 +1288,46 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
presence_penalty=None,
from_other_action=from_other_action,
)
class ShareView(discord.ui.View):
def __init__(
self,
converser_cog,
conversation_id,
):
super().__init__(timeout=3600) # 1 hour interval to share the conversation.
self.converser_cog = converser_cog
self.conversation_id = conversation_id
self.add_item(ShareButton(converser_cog, conversation_id))
async def on_timeout(self):
# Remove the button from the view/message
self.clear_items()
class ShareButton(discord.ui.Button["ShareView"]):
def __init__(self, converser_cog, conversation_id):
super().__init__(
style=discord.ButtonStyle.green,
label="Share Conversation",
custom_id="share_conversation",
)
self.converser_cog = converser_cog
self.conversation_id = conversation_id
async def callback(self, interaction: discord.Interaction):
# Get the user
try:
id = await self.converser_cog.sharegpt_service.format_and_share(self.converser_cog.full_conversation_history[self.conversation_id], self.converser_cog.bot.user.default_avatar.url if not self.converser_cog.bot.user.avatar else self.converser_cog.bot.user.avatar.url)
url = f"https://shareg.pt/{id}"
await interaction.response.send_message(embed=EmbedStatics.get_conversation_shared_embed(url))
except ValueError as e:
traceback.print_exc()
await interaction.response.send_message(embed=EmbedStatics.get_conversation_share_failed_embed("The ShareGPT API returned an error: "+str(e)), ephemeral=True, delete_after=15)
return
except Exception as e:
traceback.print_exc()
await interaction.response.send_message(embed=EmbedStatics.get_conversation_share_failed_embed(str(e)), ephemeral=True,
delete_after=15)
return

@ -19,7 +19,9 @@ The conversations are in this format:
<username>: [MESSAGE 1] <|endofstatement|>
<yourname>: [RESPONSE TO MESSAGE 1] <|endofstatement|>
Here is an example conversation:
The placeholder <username> is replaced with the username of the user you are talking to, and <yourname> is replaced with your name in an actual conversation.
Here is an example conversation with a user named Kaveen:
Kaveen: hey! how's it going today? <|endofstatement|>
<yourname>: it's going great, im just chilling watching some youtube videos, hbu?
@ -29,6 +31,6 @@ Kaveen: I'm making a discord bot <|endofstatement|>
<yourname>: oh cool, what language are you using? <|endofstatement|>
...
There can be an arbitrary amount of newlines between chat entries. <username> can be any name, pay attention to who's talking. The text "<|endofstatement|>" is used to separate chat entries and make it easier for you to understand the context
There can be an arbitrary amount of newlines between chat entries. <username> can be any name, pay attention to who's talking. The text "<|endofstatement|>" is used to separate chat entries and make it easier for you to understand the context.
You speak in a fun, casual, and friendly tone, not worrying about capitalizations and using slang like "lol", "lmao", and etc, like you're talking to a friend.

@ -31,7 +31,7 @@ from services.environment_service import EnvService
from models.openai_model import Model
__version__ = "10.7.5"
__version__ = "10.8.0"
PID_FILE = Path("bot.pid")

@ -244,3 +244,29 @@ class EmbedStatics:
url="https://i.imgur.com/txHhNzL.png"
)
return embed
@staticmethod
def get_conversation_shared_embed(url):
embed = discord.Embed(
title="Conversation Shared",
description=f"You can access your shared conversation at: {url}",
color=discord.Color.blurple(),
)
# thumbnail of https://i.imgur.com/VLJ32x7.png
embed.set_thumbnail(
url="https://i.imgur.com/8OIZc1A.png"
)
return embed
@staticmethod
def get_conversation_share_failed_embed(message):
embed = discord.Embed(
title="Conversation Sharing",
description=f"Conversation sharing failed: "+message,
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/VLJ32x7.png
embed.set_thumbnail(
url="https://i.imgur.com/VLJ32x7.png"
)
return embed

@ -66,3 +66,14 @@ class PineconeService:
# Sort the relevant phrases based on the timestamp
relevant_phrases.sort(key=lambda x: x[1])
return relevant_phrases
def get_all_conversation_items(self, conversation_id: int):
response = self.index.query(
vector=[0] * 1536,
top_k=1000, filter={"conversation_id": conversation_id}
)
phrases = [match["id"] for match in response["matches"]]
# Sort on timestamp
phrases.sort(key=lambda x: x[1])
return phrases

@ -0,0 +1,43 @@
import json
import aiohttp
class ShareGPTService:
def __init__(self):
self.API_URL = "https://sharegpt.com/api/conversations"
def format_conversation(self, conversation_history, avatar_url="https://i.imgur.com/SpuAF0v.png"):
# The format is { 'avatarUrl' : <url>, 'items': [ { 'from': 'human', 'text': <text> }, { 'from': 'bot', 'text': <text> } ] } "
# The conversation history is not in this format, its just in simple alternating human and bot conversation snippets
conversation = {"avatarUrl": avatar_url, "items": []}
# The conversation history alternates between human and bot
# So we need to add the human and bot items to the conversation
for i in range(len(conversation_history)):
if i % 2 == 0:
conversation["items"].append(
{"from": "human", "value": conversation_history[i]}
)
else:
conversation["items"].append(
{"from": "gpt", "value": conversation_history[i]}
)
return json.dumps(conversation)
async def format_and_share(self, conversation_history, avatar_url=None):
conversation = self.format_conversation(conversation_history, avatar_url)
print(conversation)
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(self.API_URL, data=conversation, headers=headers) as response:
if response.status == 200:
response_json = await response.json()
return response_json['id']
else:
raise ValueError(f"ShareGPT returned an invalid response: {await response.text()}")

@ -1,6 +1,7 @@
import datetime
import re
import traceback
from collections import defaultdict
import aiohttp
import discord
@ -17,7 +18,6 @@ from services.moderations_service import Moderation
BOT_NAME = EnvService.get_custom_bot_name()
PRE_MODERATE = EnvService.get_premoderate()
class TextService:
def __init__(self):
pass
@ -66,11 +66,11 @@ class TextService:
redo_request (bool, optional): If we're redoing a previous prompt. Defaults to False.
from_action (bool, optional): If the function is being called from a message action. Defaults to False.
"""
new_prompt = (
prompt + "\n" + BOT_NAME
new_prompt, _new_prompt_clean = (
prompt #+ "\n" + BOT_NAME
if not from_ask_command and not from_edit_command and not redo_request
else prompt
)
), prompt
stop = f"{ctx.author.display_name if user is None else user.display_name}:"
@ -109,6 +109,7 @@ class TextService:
)
new_prompt = f"\n{user_displayname}: {new_prompt} <|endofstatement|>\n"
# new_prompt = new_prompt.encode("ascii", "ignore").decode()
new_prompt = unidecode.unidecode(new_prompt)
@ -142,6 +143,7 @@ class TextService:
timestamp,
custom_api_key=custom_api_key,
)
# Print all phrases
embedding_prompt_less_author = await converser_cog.model.send_embedding_request(
prompt_less_author, custom_api_key=custom_api_key
@ -231,6 +233,7 @@ class TextService:
"give me one moment!"
)
await converser_cog.summarize_conversation(ctx, new_prompt)
# Check again if the prompt is about to go past the token limit
@ -337,11 +340,17 @@ class TextService:
):
conversation_id = ctx.channel.id
# A cleaner version for the convo history
response_text_clean = (
str(response_text)
)
# Create an embedding and timestamp for the prompt
response_text = (
"\n" + BOT_NAME + str(response_text) + "<|endofstatement|>\n"
)
# response_text = response_text.encode("ascii", "ignore").decode()
response_text = unidecode.unidecode(response_text)
@ -367,6 +376,10 @@ class TextService:
# Cleanse again
response_text = converser_cog.cleanse_response(response_text)
converser_cog.full_conversation_history[ctx.channel.id].append(
response_text
)
# escape any other mentions like @here or @everyone
response_text = discord.utils.escape_mentions(response_text)
@ -660,6 +673,7 @@ class TextService:
)
thinking_embed.set_footer(text="This may take a few seconds.")
thinking_message = await message.reply(embed=thinking_embed)
converser_cog.full_conversation_history[message.channel.id].append(prompt)
await TextService.encapsulated_send(
converser_cog,

Loading…
Cancel
Save