diff --git a/models/index_model.py b/models/index_model.py index c6d38ea..c1fcbe7 100644 --- a/models/index_model.py +++ b/models/index_model.py @@ -37,7 +37,8 @@ from llama_index import ( OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, - download_loader, LLMPredictor, + download_loader, + LLMPredictor, ) from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR @@ -165,7 +166,13 @@ class Index_handler: def __init__(self, bot, usage_service): self.bot = bot self.openai_key = os.getenv("OPENAI_TOKEN") - self.llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=self.openai_key)) + self.llm_predictor = LLMPredictor( + llm=OpenAIChat( + temperature=0, + model_name="gpt-3.5-turbo", + openai_api_key=self.openai_key, + ) + ) self.index_storage = defaultdict(IndexData) self.loop = asyncio.get_running_loop() self.usage_service = usage_service diff --git a/models/search_model.py b/models/search_model.py index 6e34a69..2c9b2a9 100644 --- a/models/search_model.py +++ b/models/search_model.py @@ -331,7 +331,9 @@ class Search: embedding_model = OpenAIEmbedding() - llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo")) + llm_predictor = LLMPredictor( + llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo") + ) if not deep: embed_model_mock = MockEmbedding(embed_dim=1536) @@ -371,7 +373,9 @@ class Search: ) price += total_usage_price else: - llm_predictor_deep = LLMPredictor(llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo")) + llm_predictor_deep = LLMPredictor( + llm=OpenAIChat(temperature=0, model_name="gpt-3.5-turbo") + ) # Try a mock call first llm_predictor_mock = MockLLMPredictor(4096)