bump gpt index

Kaveen Kumarasinghe 1 year ago
parent f33939f4c0
commit 8c81d233f2

@ -32,7 +32,7 @@ from services.environment_service import EnvService
from models.openai_model import Model
__version__ = "10.9.4"
__version__ = "10.9.5"
PID_FILE = Path("bot.pid")

@ -629,7 +629,7 @@ class Index_handler:
)
total_usage_price = await self.usage_service.get_price(
llm_predictor_mock.last_token_usage,
chatgpt=False, # TODO Enable again when tree indexes are fixed
chatgpt=True, # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
embedding_model_mock.last_token_usage, embeddings=True
)
@ -639,24 +639,20 @@ class Index_handler:
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
llm_predictor_temp_non_cgpt = LLMPredictor(
llm=OpenAI(model_name="text-davinci-003")
) # TODO Get rid of this
tree_index = await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex,
documents=documents,
llm_predictor=llm_predictor_temp_non_cgpt,
llm_predictor=llm_predictor,
embed_model=embedding_model,
use_async=True,
),
)
await self.usage_service.update_usage(
llm_predictor_temp_non_cgpt.last_token_usage, chatgpt=False
) # Todo set to false
llm_predictor.last_token_usage, chatgpt=True
)
await self.usage_service.update_usage(
embedding_model.last_token_usage, embeddings=True
)

@ -98,6 +98,17 @@ class Search:
return embed
def build_search_determining_price_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nPre-determining index price...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_webpages_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
@ -357,10 +368,15 @@ class Search:
else:
llm_predictor_deep = ChatGPTLLMPredictor()
# # Try a mock call first
# llm_predictor_mock = MockLLMPredictor(4096)
# embed_model_mock = MockEmbedding(embed_dim=1536)
# Try a mock call first
llm_predictor_mock = MockLLMPredictor(4096)
embed_model_mock = MockEmbedding(embed_dim=1536)
# if ctx:
# await self.try_edit(
# in_progress_message, self.build_search_determining_price_embed(query_refined_text)
# )
#
# await self.loop.run_in_executor(
# None,
# partial(
@ -385,7 +401,7 @@ class Search:
# total_usage_price
# )
# )
# TODO Add back the mock when fixed!
# # TODO Add back the mock when fixed!
index = await self.loop.run_in_executor(
None,

@ -32,7 +32,7 @@ dependencies = [
"backoff==2.2.1",
"flask==2.2.3",
"beautifulsoup4==4.11.1",
"gpt-index==0.4.17",
"gpt-index==0.4.18",
"PyPDF2==3.0.1",
"youtube_transcript_api==0.5.0",
"sentencepiece==0.1.97",

@ -12,7 +12,7 @@ sqlitedict==2.1.0
backoff==2.2.1
flask==2.2.3
beautifulsoup4==4.11.1
gpt-index==0.4.17
gpt-index==0.4.18
PyPDF2==3.0.1
youtube_transcript_api==0.5.0
sentencepiece==0.1.97

@ -12,7 +12,7 @@ sqlitedict==2.1.0
backoff==2.2.1
flask==2.2.3
beautifulsoup4==4.11.1
gpt-index==0.4.17
gpt-index==0.4.18
PyPDF2==3.0.1
youtube_transcript_api==0.5.0
sentencepiece==0.1.97

Loading…
Cancel
Save