Kaveen Kumarasinghe 2 years ago
commit bb22d66f48

@ -587,7 +587,6 @@ class Commands(discord.Cog, name="Commands"):
async def paraphrase_action(self, ctx, message: discord.Message):
await self.converser_cog.paraphrase_action(ctx, message)
# Search slash commands
@discord.slash_command(
name="search",
@ -598,5 +597,4 @@ class Commands(discord.Cog, name="Commands"):
@discord.guild_only()
async def search(self, ctx: discord.ApplicationContext, query: str):
await ctx.respond("Not implemented yet")
#await self.search_cog.search_command(ctx, query)
# await self.search_cog.search_command(ctx, query)

@ -10,6 +10,7 @@ from services.environment_service import EnvService
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
class SearchService(discord.Cog, name="SearchService"):
"""Cog containing translation commands and retrieval of translation services"""
@ -24,14 +25,8 @@ class SearchService(discord.Cog, name="SearchService"):
self.model = Search(gpt_model, pinecone_service)
# Make a mapping of all the country codes and their full country names:
async def search_command(self, ctx, query):
"""Command handler for the translation command"""
await ctx.defer()
await self.model.search(query)
await ctx.respond("ok")

@ -1037,9 +1037,7 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
async def ask_gpt_action(self, ctx, message: discord.Message):
"""Message command. Return the message"""
prompt = await self.mention_to_username(ctx, message.content)
await self.ask_command(
ctx, prompt, None, None, None, None, from_action=prompt
)
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)
async def paraphrase_action(self, ctx, message: discord.Message):
"""Message command. paraphrase the current message content"""
@ -1049,6 +1047,4 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Construct the paraphrase prompt
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \n\nParaphrased:"
await self.ask_command(
ctx, prompt, None, None, None, None, from_action=prompt
)
await self.ask_command(ctx, prompt, None, None, None, None, from_action=prompt)

@ -168,7 +168,10 @@ async def main():
bot.add_cog(TranslationService(bot, TranslationModel()))
print("The translation service is enabled.")
if EnvService.get_google_search_api_key() and EnvService.get_google_search_engine_id():
if (
EnvService.get_google_search_api_key()
and EnvService.get_google_search_engine_id()
):
bot.add_cog(SearchService(bot, model, pinecone_search_service))
print("The Search service is enabled.")
@ -184,7 +187,7 @@ async def main():
bot.get_cog("ImgPromptOptimizer"),
bot.get_cog("ModerationsService"),
bot.get_cog("TranslationService"),
bot.get_cog("SearchService")
bot.get_cog("SearchService"),
)
)

@ -8,7 +8,6 @@ from services.usage_service import UsageService
class Search:
def __init__(self, gpt_model, pinecone_service):
self.model = gpt_model
self.pinecone_service = pinecone_service
@ -58,13 +57,17 @@ class Search:
# Create an embedding for the chunk
embedding = await self.model.send_embedding_request(chunk)
# Upsert the embedding for the conversation ID
self.pinecone_service.upsert_conversation_embedding(self.model, conversation_id, chunk,0)
self.pinecone_service.upsert_conversation_embedding(
self.model, conversation_id, chunk, 0
)
print("Finished creating embeddings for the text")
# Now that we have all the embeddings for the search, we can embed the query and then
# query pinecone for the top 5 results
query_embedding = await self.model.send_embedding_request(query)
results = self.pinecone_service.get_n_similar(conversation_id, query_embedding, n=3)
results = self.pinecone_service.get_n_similar(
conversation_id, query_embedding, n=3
)
# Get only the first elements of each result
results = [result[0] for result in results]
@ -72,6 +75,8 @@ class Search:
GPT_QUERY = f"This is a search query. I want to know the answer to the query: {query}. Here are some results from the web: {[str(result) for result in results]}. \n\n Answer:"
# Generate the answer
# Use the tokenizer to determine token amount of the query
await self.model.send_request(GPT_QUERY, UsageService.count_tokens_static(GPT_QUERY))
await self.model.send_request(
GPT_QUERY, UsageService.count_tokens_static(GPT_QUERY)
)
print(texts)

@ -325,4 +325,3 @@ class EnvService:
return google_search_engine_id
except Exception:
return None

@ -64,5 +64,3 @@ class UsageService:
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
res = tokenizer(text)["input_ids"]
return len(res)

Loading…
Cancel
Save