You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
70 lines
2.5 KiB
70 lines
2.5 KiB
2 years ago
|
import pinecone
|
||
|
|
||
|
|
||
|
class PineconeService:
|
||
|
def __init__(self, index: pinecone.Index):
|
||
|
self.index = index
|
||
|
|
||
|
def upsert_basic(self, text, embeddings):
|
||
|
self.index.upsert([(text, embeddings)])
|
||
|
|
||
|
def get_all_for_conversation(self, conversation_id: int):
|
||
2 years ago
|
response = self.index.query(
|
||
|
top_k=100, filter={"conversation_id": conversation_id}
|
||
|
)
|
||
2 years ago
|
return response
|
||
|
|
||
2 years ago
|
async def upsert_conversation_embedding(
|
||
2 years ago
|
self, model, conversation_id: int, text, timestamp, custom_api_key=None
|
||
2 years ago
|
):
|
||
2 years ago
|
# If the text is > 512 characters, we need to split it up into multiple entries.
|
||
|
first_embedding = None
|
||
|
if len(text) > 500:
|
||
|
# Split the text into 512 character chunks
|
||
2 years ago
|
chunks = [text[i : i + 500] for i in range(0, len(text), 500)]
|
||
2 years ago
|
for chunk in chunks:
|
||
|
# Create an embedding for the split chunk
|
||
2 years ago
|
embedding = await model.send_embedding_request(
|
||
|
chunk, custom_api_key=custom_api_key
|
||
|
)
|
||
2 years ago
|
if not first_embedding:
|
||
|
first_embedding = embedding
|
||
2 years ago
|
self.index.upsert(
|
||
|
[(chunk, embedding)],
|
||
|
metadata={
|
||
|
"conversation_id": conversation_id,
|
||
|
"timestamp": timestamp,
|
||
|
},
|
||
|
)
|
||
2 years ago
|
return first_embedding
|
||
|
else:
|
||
2 years ago
|
embedding = await model.send_embedding_request(
|
||
|
text, custom_api_key=custom_api_key
|
||
|
)
|
||
2 years ago
|
self.index.upsert(
|
||
|
[
|
||
|
(
|
||
|
text,
|
||
|
embedding,
|
||
|
{"conversation_id": conversation_id, "timestamp": timestamp},
|
||
|
)
|
||
|
]
|
||
|
)
|
||
2 years ago
|
return embedding
|
||
|
|
||
|
def get_n_similar(self, conversation_id: int, embedding, n=10):
|
||
2 years ago
|
response = self.index.query(
|
||
|
vector=embedding,
|
||
|
top_k=n,
|
||
|
include_metadata=True,
|
||
|
filter={"conversation_id": conversation_id},
|
||
|
)
|
||
2 years ago
|
# print(response)
|
||
2 years ago
|
relevant_phrases = [
|
||
|
(match["id"], match["metadata"]["timestamp"])
|
||
|
for match in response["matches"]
|
||
|
]
|
||
2 years ago
|
# Sort the relevant phrases based on the timestamp
|
||
|
relevant_phrases.sort(key=lambda x: x[1])
|
||
2 years ago
|
return relevant_phrases
|