Kaveen Kumarasinghe 1 year ago
commit 2bdd9baced

@ -45,7 +45,7 @@ SUPPORT SERVER FOR BOT SETUP: https://discord.gg/WvAHXDMS7Q (You can try out the
# Recent Notable Updates
- **ChatGPT API Integration** - The ChatGPT API has been released and our bot is now fully integrated with it! Change to model to one of the ChatGPT turbo models with `/system settings`, or include the model as a param in your `/gpt converse`, `/gpt ask`, etc requests! The two currently available ChatGPT models are `gpt-3.5-turbo` and `gpt-3.5-turbo-0301`
- **ChatGPT API Integration** - The ChatGPT API has been released and our bot is now fully integrated with it! Change to model to one of the ChatGPT turbo models with `/system settings`, or include the model as a param in your `/gpt converse`, `/gpt ask`, etc requests! The two currently available ChatGPT models are `gpt-3.5-turbo` and `gpt-3.5-turbo-0301`. This change is very experimental, so we're looking for your feedback and input on what you think of the new model's performance, especially for search and indexing functionality.
- **AI-Assisted Google Search** - Use GPT3 to browse the internet, you can search the internet for a query and GPT3 will look at the top websites for you automatically and formulate an answer to your query! You can also ask follow-up questions, this is kinda like BingGPT, but much better lol!
<p align="center"/>

@ -615,7 +615,8 @@ class Index_handler:
),
)
total_usage_price = await self.usage_service.get_price(
llm_predictor_mock.last_token_usage, chatgpt=False, # TODO Enable again when tree indexes are fixed
llm_predictor_mock.last_token_usage,
chatgpt=False, # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
embedding_model_mock.last_token_usage, embeddings=True
)
@ -625,7 +626,9 @@ class Index_handler:
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
llm_predictor_temp_non_cgpt = LLMPredictor(llm=OpenAI(model_name="text-davinci-003")) # TODO Get rid of this
llm_predictor_temp_non_cgpt = LLMPredictor(
llm=OpenAI(model_name="text-davinci-003")
) # TODO Get rid of this
tree_index = await self.loop.run_in_executor(
None,
@ -638,7 +641,9 @@ class Index_handler:
),
)
await self.usage_service.update_usage(llm_predictor_temp_non_cgpt.last_token_usage, chatgpt=False) # Todo set to false
await self.usage_service.update_usage(
llm_predictor_temp_non_cgpt.last_token_usage, chatgpt=False
) # Todo set to false
await self.usage_service.update_usage(
embedding_model.last_token_usage, embeddings=True
)
@ -748,7 +753,6 @@ class Index_handler:
)
try:
embedding_model = OpenAIEmbedding()
embedding_model.last_token_usage = 0
response = await self.loop.run_in_executor(
@ -766,7 +770,9 @@ class Index_handler:
),
)
print("The last token usage was ", llm_predictor.last_token_usage)
await self.usage_service.update_usage(llm_predictor.last_token_usage, chatgpt=True)
await self.usage_service.update_usage(
llm_predictor.last_token_usage, chatgpt=True
)
await self.usage_service.update_usage(
embedding_model.last_token_usage, embeddings=True
)

@ -400,15 +400,18 @@ class Search:
)
total_usage_price = await self.usage_service.get_price(
llm_predictor_deep.last_token_usage, chatgpt=True,
llm_predictor_deep.last_token_usage,
chatgpt=True,
) + await self.usage_service.get_price(
embedding_model.last_token_usage, embeddings=True)
embedding_model.last_token_usage, embeddings=True
)
await self.usage_service.update_usage(
embedding_model.last_token_usage, embeddings=True
)
await self.usage_service.update_usage(
llm_predictor_deep.last_token_usage, chatgpt=True,
llm_predictor_deep.last_token_usage,
chatgpt=True,
)
price += total_usage_price
@ -451,7 +454,7 @@ class Search:
partial(
index.query,
query,
embedding_mode='hybrid',
embedding_mode="hybrid",
llm_predictor=llm_predictor,
include_text=True,
embed_model=embedding_model,
@ -461,7 +464,9 @@ class Search:
),
)
await self.usage_service.update_usage(llm_predictor.last_token_usage, chatgpt=True)
await self.usage_service.update_usage(
llm_predictor.last_token_usage, chatgpt=True
)
await self.usage_service.update_usage(
embedding_model.last_token_usage, embeddings=True
)

Loading…
Cancel
Save