From d78a7d2fad7eecde26c022206daef8b3548d2699 Mon Sep 17 00:00:00 2001 From: Lu Yao Chen Date: Thu, 23 Mar 2023 14:34:25 -0700 Subject: [PATCH] chatgpt send request and fix logic for max tokens --- models/openai_model.py | 2 +- tests/test_requests.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/models/openai_model.py b/models/openai_model.py index b8cfced..659e4fc 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -1018,7 +1018,7 @@ class Model: # Validate that all the parameters are in a good state before we send the request if not max_tokens_override: - if model: + if model and model not in Models.GPT4_MODELS and model not in Models.CHATGPT_MODELS: max_tokens_override = Models.get_max_tokens(model) - tokens print(f"The prompt about to be sent is {prompt}") diff --git a/tests/test_requests.py b/tests/test_requests.py index 6226bba..14ef128 100644 --- a/tests/test_requests.py +++ b/tests/test_requests.py @@ -16,6 +16,16 @@ async def test_send_req(): model = Model(usage_service) prompt = 'how many hours are in a day?' tokens = len(GPT2TokenizerFast.from_pretrained("gpt2")(prompt)["input_ids"]) - # tokens = 60 res = await model.send_request(prompt, tokens) assert '24' in res['choices'][0]['text'] + + +# ChatGPT version +@pytest.mark.asyncio +async def test_send_req_gpt(): + + usage_service = UsageService(Path("../tests")) + model = Model(usage_service) + prompt = 'how many hours are in a day?' + res = await model.send_request(prompt, None, is_chatgpt_request=True, model="gpt-3.5-turbo") + assert '24' in res['choices'][0]['message']['content'] \ No newline at end of file