Merge pull request #237 from Kav-K/request-testing

Request testing
Lu Yao 2 years ago committed by GitHub
commit 3baa3f0029
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

3
.gitignore vendored

@ -11,4 +11,5 @@ usage.txt
/dalleimages /dalleimages
/indexes /indexes
/audiotemp /audiotemp
/pickles /pickles
.idea

@ -33,7 +33,7 @@ from services.environment_service import EnvService
from models.openai_model import Model from models.openai_model import Model
__version__ = "11.1.4" __version__ = "11.1.5"
PID_FILE = Path("bot.pid") PID_FILE = Path("bot.pid")

@ -633,10 +633,14 @@ class Model:
else: else:
await self.usage_service.update_usage(tokens_used) await self.usage_service.update_usage(tokens_used)
except Exception as e: except Exception as e:
raise ValueError( traceback.print_exc()
"The API returned an invalid response: " if 'error' in response:
+ str(response["error"]["message"]) raise ValueError(
) from e "The API returned an invalid response: "
+ str(response["error"]["message"])
) from e
else:
raise ValueError("The API returned an invalid response") from e
@backoff.on_exception( @backoff.on_exception(
backoff.expo, backoff.expo,
@ -1010,13 +1014,11 @@ class Model:
stop=None, stop=None,
custom_api_key=None, custom_api_key=None,
is_chatgpt_request=False, is_chatgpt_request=False,
) -> (
Tuple[dict, bool]
): # The response, and a boolean indicating whether or not the context limit was reached. ): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request # Validate that all the parameters are in a good state before we send the request
if not max_tokens_override: if not max_tokens_override:
if model: if model and model not in Models.GPT4_MODELS and model not in Models.CHATGPT_MODELS:
max_tokens_override = Models.get_max_tokens(model) - tokens max_tokens_override = Models.get_max_tokens(model) - tokens
print(f"The prompt about to be sent is {prompt}") print(f"The prompt about to be sent is {prompt}")
@ -1057,7 +1059,7 @@ class Model:
headers=headers, headers=headers,
) as resp: ) as resp:
response = await resp.json() response = await resp.json()
# print(f"Payload -> {payload}") print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response # Parse the total tokens used for this request and response pair from the response
await self.valid_text_request( await self.valid_text_request(
response, model=self.model if model is None else model response, model=self.model if model is None else model

@ -41,7 +41,9 @@ dependencies = [
"langchain==0.0.115", "langchain==0.0.115",
"unidecode==1.3.6", "unidecode==1.3.6",
"tqdm==4.64.1", "tqdm==4.64.1",
"docx2txt==0.8" "docx2txt==0.8",
"pytest-asyncio==0.21.0",
"pytest~=7.2.2"
] ]
dynamic = ["version"] dynamic = ["version"]

@ -24,3 +24,5 @@ openai-whisper
unidecode==1.3.6 unidecode==1.3.6
tqdm==4.64.1 tqdm==4.64.1
docx2txt==0.8 docx2txt==0.8
pytest-asyncio==0.21.0
pytest~=7.2.2

@ -21,4 +21,6 @@ python-pptx==0.6.21
langchain==0.0.115 langchain==0.0.115
unidecode==1.3.6 unidecode==1.3.6
tqdm==4.64.1 tqdm==4.64.1
docx2txt==0.8 docx2txt==0.8
pytest-asyncio==0.21.0
pytest~=7.2.2

@ -0,0 +1,51 @@
from pathlib import Path
import pytest
from models.openai_model import Model
from transformers import GPT2TokenizerFast
from services.usage_service import UsageService
# Non-ChatGPT -> TODO: make generic test and loop through text models
@pytest.mark.asyncio
async def test_send_req():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = 'how many hours are in a day?'
tokens = len(GPT2TokenizerFast.from_pretrained("gpt2")(prompt)["input_ids"])
res = await model.send_request(prompt, tokens)
assert '24' in res['choices'][0]['text']
# ChatGPT version
@pytest.mark.asyncio
async def test_send_req_gpt():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = 'how many hours are in a day?'
res = await model.send_request(prompt, None, is_chatgpt_request=True, model="gpt-3.5-turbo")
assert '24' in res['choices'][0]['message']['content']
# GPT4 version
@pytest.mark.asyncio
async def test_send_req_gpt4():
usage_service = UsageService(Path("../tests"))
model = Model(usage_service)
prompt = 'how many hours are in a day?'
res = await model.send_request(prompt, None, is_chatgpt_request=True, model="gpt-4")
assert '24' in res['choices'][0]['message']['content']
# Edit request -> currently broken due to endpoint
# @pytest.mark.asyncio
# async def test_send_edit_req():
# usage_service = UsageService(Path("../tests"))
# model = Model(usage_service)
# text = 'how many hours are in a day?'
# res = await model.send_edit_request(text, codex=True)
# assert '24' in res['choices'][0]['text']
Loading…
Cancel
Save