|
|
@ -19,7 +19,7 @@ from gpt_index import (
|
|
|
|
PromptHelper,
|
|
|
|
PromptHelper,
|
|
|
|
LLMPredictor,
|
|
|
|
LLMPredictor,
|
|
|
|
OpenAIEmbedding,
|
|
|
|
OpenAIEmbedding,
|
|
|
|
SimpleDirectoryReader,
|
|
|
|
SimpleDirectoryReader, GPTTreeIndex, MockLLMPredictor, MockEmbedding,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
from gpt_index.indices.knowledge_graph import GPTKnowledgeGraphIndex
|
|
|
|
from gpt_index.indices.knowledge_graph import GPTKnowledgeGraphIndex
|
|
|
|
from gpt_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
|
|
|
|
from gpt_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
|
|
|
@ -28,6 +28,7 @@ from langchain import OpenAI
|
|
|
|
from services.environment_service import EnvService, app_root_path
|
|
|
|
from services.environment_service import EnvService, app_root_path
|
|
|
|
from services.usage_service import UsageService
|
|
|
|
from services.usage_service import UsageService
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MAX_SEARCH_PRICE = EnvService.get_max_search_price()
|
|
|
|
|
|
|
|
|
|
|
|
class Search:
|
|
|
|
class Search:
|
|
|
|
def __init__(self, gpt_model, usage_service):
|
|
|
|
def __init__(self, gpt_model, usage_service):
|
|
|
@ -130,13 +131,12 @@ class Search:
|
|
|
|
f.write(data)
|
|
|
|
f.write(data)
|
|
|
|
f.close()
|
|
|
|
f.close()
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
return "An error occurred while downloading the PDF."
|
|
|
|
raise ValueError("Could not download PDF")
|
|
|
|
# Get the file path of this tempfile.NamedTemporaryFile
|
|
|
|
# Get the file path of this tempfile.NamedTemporaryFile
|
|
|
|
# Save this temp file to an actual file that we can put into something else to read it
|
|
|
|
# Save this temp file to an actual file that we can put into something else to read it
|
|
|
|
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
|
|
|
|
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
|
|
|
|
for document in documents:
|
|
|
|
for document in documents:
|
|
|
|
document.extra_info = {"URL": url}
|
|
|
|
document.extra_info = {"URL": url}
|
|
|
|
print("Loaded the PDF document data")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Delete the temporary file
|
|
|
|
# Delete the temporary file
|
|
|
|
return documents
|
|
|
|
return documents
|
|
|
@ -155,11 +155,7 @@ class Search:
|
|
|
|
[item["link"] for item in data["items"]],
|
|
|
|
[item["link"] for item in data["items"]],
|
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
print(
|
|
|
|
raise ValueError("Error while retrieving links")
|
|
|
|
"The Google Search API returned an error: "
|
|
|
|
|
|
|
|
+ str(response.status)
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
return ["An error occurred while searching.", None]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def try_edit(self, message, embed):
|
|
|
|
async def try_edit(self, message, embed):
|
|
|
|
try:
|
|
|
|
try:
|
|
|
@ -246,19 +242,17 @@ class Search:
|
|
|
|
pdf = False
|
|
|
|
pdf = False
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
async with session.get(link, timeout=2) as response:
|
|
|
|
async with session.get(link, timeout=1) as response:
|
|
|
|
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
|
|
|
|
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
|
|
|
|
if response.status not in [200, 203, 202, 204]:
|
|
|
|
if response.status not in [200, 203, 202, 204]:
|
|
|
|
for link2 in all_links:
|
|
|
|
for link2 in all_links:
|
|
|
|
if link2 not in links:
|
|
|
|
if link2 not in links:
|
|
|
|
print("Found a replacement link")
|
|
|
|
|
|
|
|
links.append(link2)
|
|
|
|
links.append(link2)
|
|
|
|
break
|
|
|
|
break
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
# Follow redirects
|
|
|
|
# Follow redirects
|
|
|
|
elif response.status in [301, 302, 303, 307, 308]:
|
|
|
|
elif response.status in [301, 302, 303, 307, 308]:
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
print("Adding redirect")
|
|
|
|
|
|
|
|
links.append(response.url)
|
|
|
|
links.append(response.url)
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
except:
|
|
|
|
except:
|
|
|
@ -266,7 +260,6 @@ class Search:
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
# Detect if the link is a PDF, if it is, we load it differently
|
|
|
|
# Detect if the link is a PDF, if it is, we load it differently
|
|
|
|
if response.headers["Content-Type"] == "application/pdf":
|
|
|
|
if response.headers["Content-Type"] == "application/pdf":
|
|
|
|
print("Found a PDF at the link " + link)
|
|
|
|
|
|
|
|
pdf = True
|
|
|
|
pdf = True
|
|
|
|
|
|
|
|
|
|
|
|
except:
|
|
|
|
except:
|
|
|
@ -275,7 +268,6 @@ class Search:
|
|
|
|
# Try to add a link from all_links, this is kind of messy.
|
|
|
|
# Try to add a link from all_links, this is kind of messy.
|
|
|
|
for link2 in all_links:
|
|
|
|
for link2 in all_links:
|
|
|
|
if link2 not in links:
|
|
|
|
if link2 not in links:
|
|
|
|
print("Found a replacement link")
|
|
|
|
|
|
|
|
links.append(link2)
|
|
|
|
links.append(link2)
|
|
|
|
break
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
except:
|
|
|
@ -307,9 +299,18 @@ class Search:
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
if not deep:
|
|
|
|
if not deep:
|
|
|
|
|
|
|
|
embed_model_mock = MockEmbedding(embed_dim=1536)
|
|
|
|
|
|
|
|
self.loop.run_in_executor(
|
|
|
|
|
|
|
|
None,
|
|
|
|
|
|
|
|
partial(GPTSimpleVectorIndex, documents, embed_model=embed_model_mock),
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
total_usage_price = await self.usage_service.get_price(embed_model_mock.last_token_usage, True)
|
|
|
|
|
|
|
|
if total_usage_price > 1.00:
|
|
|
|
|
|
|
|
raise ValueError("Doing this search would be prohibitively expensive. Please try a narrower search scope.")
|
|
|
|
|
|
|
|
|
|
|
|
index = await self.loop.run_in_executor(
|
|
|
|
index = await self.loop.run_in_executor(
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
partial(GPTSimpleVectorIndex, documents, embed_model=embedding_model),
|
|
|
|
partial(GPTSimpleVectorIndex, documents, embed_model=embedding_model, use_async=True),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# save the index to disk if not a redo
|
|
|
|
# save the index to disk if not a redo
|
|
|
|
if not redo:
|
|
|
|
if not redo:
|
|
|
@ -320,22 +321,54 @@ class Search:
|
|
|
|
else ctx.author.id,
|
|
|
|
else ctx.author.id,
|
|
|
|
query,
|
|
|
|
query,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
await self.usage_service.update_usage(
|
|
|
|
|
|
|
|
embedding_model.last_token_usage, embeddings=True
|
|
|
|
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
print("Doing a deep search")
|
|
|
|
llm_predictor_deep = LLMPredictor(llm=OpenAI(model_name="text-davinci-003"))
|
|
|
|
llm_predictor_deep = LLMPredictor(
|
|
|
|
# Try a mock call first
|
|
|
|
llm=OpenAI(model_name="text-davinci-002", temperature=0, max_tokens=-1)
|
|
|
|
llm_predictor_mock = MockLLMPredictor(4096)
|
|
|
|
|
|
|
|
embed_model_mock = MockEmbedding(embed_dim=1536)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
await self.loop.run_in_executor(
|
|
|
|
|
|
|
|
None,
|
|
|
|
|
|
|
|
partial(
|
|
|
|
|
|
|
|
GPTTreeIndex,
|
|
|
|
|
|
|
|
documents,
|
|
|
|
|
|
|
|
embed_model=embed_model_mock,
|
|
|
|
|
|
|
|
llm_predictor=llm_predictor_mock,
|
|
|
|
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
total_usage_price = await self.usage_service.get_price(llm_predictor_mock.last_token_usage) + await self.usage_service.get_price(embed_model_mock.last_token_usage, True)
|
|
|
|
|
|
|
|
if total_usage_price > MAX_SEARCH_PRICE:
|
|
|
|
|
|
|
|
raise ValueError("Doing this deep search would be prohibitively expensive. Please try a narrower search scope.")
|
|
|
|
|
|
|
|
|
|
|
|
index = await self.loop.run_in_executor(
|
|
|
|
index = await self.loop.run_in_executor(
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
partial(
|
|
|
|
partial(
|
|
|
|
GPTKnowledgeGraphIndex,
|
|
|
|
GPTTreeIndex,
|
|
|
|
documents,
|
|
|
|
documents,
|
|
|
|
chunk_size_limit=512,
|
|
|
|
|
|
|
|
max_triplets_per_chunk=2,
|
|
|
|
|
|
|
|
embed_model=embedding_model,
|
|
|
|
embed_model=embedding_model,
|
|
|
|
llm_predictor=llm_predictor_deep,
|
|
|
|
llm_predictor=llm_predictor_deep,
|
|
|
|
|
|
|
|
use_async=True,
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# llm_predictor_deep = LLMPredictor(
|
|
|
|
|
|
|
|
# llm=OpenAI(model_name="text-davinci-002", temperature=0, max_tokens=-1)
|
|
|
|
|
|
|
|
# )
|
|
|
|
|
|
|
|
# index = await self.loop.run_in_executor(
|
|
|
|
|
|
|
|
# None,
|
|
|
|
|
|
|
|
# partial(
|
|
|
|
|
|
|
|
# GPTKnowledgeGraphIndex,
|
|
|
|
|
|
|
|
# documents,
|
|
|
|
|
|
|
|
# chunk_size_limit=512,
|
|
|
|
|
|
|
|
# max_triplets_per_chunk=2,
|
|
|
|
|
|
|
|
# embed_model=embedding_model,
|
|
|
|
|
|
|
|
# llm_predictor=llm_predictor_deep,
|
|
|
|
|
|
|
|
# ),
|
|
|
|
|
|
|
|
# )
|
|
|
|
await self.usage_service.update_usage(
|
|
|
|
await self.usage_service.update_usage(
|
|
|
|
embedding_model.last_token_usage, embeddings=True
|
|
|
|
embedding_model.last_token_usage, embeddings=True
|
|
|
|
)
|
|
|
|
)
|
|
|
@ -348,10 +381,6 @@ class Search:
|
|
|
|
in_progress_message, self.build_search_indexed_embed(query_refined_text)
|
|
|
|
in_progress_message, self.build_search_indexed_embed(query_refined_text)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
await self.usage_service.update_usage(
|
|
|
|
|
|
|
|
embedding_model.last_token_usage, embeddings=True
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Now we can search the index for a query:
|
|
|
|
# Now we can search the index for a query:
|
|
|
|
embedding_model.last_token_usage = 0
|
|
|
|
embedding_model.last_token_usage = 0
|
|
|
|
|
|
|
|
|
|
|
@ -365,17 +394,31 @@ class Search:
|
|
|
|
llm_predictor=llm_predictor,
|
|
|
|
llm_predictor=llm_predictor,
|
|
|
|
similarity_top_k=nodes or DEFAULT_SEARCH_NODES,
|
|
|
|
similarity_top_k=nodes or DEFAULT_SEARCH_NODES,
|
|
|
|
text_qa_template=self.qaprompt,
|
|
|
|
text_qa_template=self.qaprompt,
|
|
|
|
|
|
|
|
use_async=True,
|
|
|
|
|
|
|
|
response_mode="tree_summarize",
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
|
|
|
|
# response = await self.loop.run_in_executor(
|
|
|
|
|
|
|
|
# None,
|
|
|
|
|
|
|
|
# partial(
|
|
|
|
|
|
|
|
# index.query,
|
|
|
|
|
|
|
|
# query,
|
|
|
|
|
|
|
|
# include_text=True,
|
|
|
|
|
|
|
|
# embed_model=embedding_model,
|
|
|
|
|
|
|
|
# llm_predictor=llm_predictor_deep,
|
|
|
|
|
|
|
|
# use_async=True,
|
|
|
|
|
|
|
|
# ),
|
|
|
|
|
|
|
|
# )
|
|
|
|
response = await self.loop.run_in_executor(
|
|
|
|
response = await self.loop.run_in_executor(
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
partial(
|
|
|
|
partial(
|
|
|
|
index.query,
|
|
|
|
index.query,
|
|
|
|
query,
|
|
|
|
query,
|
|
|
|
include_text=True,
|
|
|
|
child_branch_factor=2,
|
|
|
|
|
|
|
|
llm_predictor=llm_predictor,
|
|
|
|
embed_model=embedding_model,
|
|
|
|
embed_model=embedding_model,
|
|
|
|
llm_predictor=llm_predictor_deep,
|
|
|
|
use_async=True,
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|