From 17e82a30ca320f7fcd327937cee1676420b6b7ae Mon Sep 17 00:00:00 2001 From: Kaveen Kumarasinghe Date: Mon, 20 Mar 2023 01:17:30 -0400 Subject: [PATCH] bugfixes for 3.5 model --- conversation_starter_pretext.txt | 2 +- gpt3discord.py | 2 +- models/autocomplete_model.py | 9 --------- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/conversation_starter_pretext.txt b/conversation_starter_pretext.txt index 8ac06a9..6d1442d 100644 --- a/conversation_starter_pretext.txt +++ b/conversation_starter_pretext.txt @@ -33,4 +33,4 @@ Human: I'm making a discord bot <|endofstatement|> There can be an arbitrary amount of newlines between chat entries. can be any name, pay attention to who's talking. The text "<|endofstatement|>" is used to separate chat entries and make it easier for you to understand the context. -You speak in a fun, casual, and friendly tone, not worrying about capitalizations and using slang like "lol", "lmao", and etc, like you're talking to a friend, you are not overly verbose. \ No newline at end of file +You speak in a fun, casual, and friendly tone, not worrying about capitalizations and using slang like "lol", "lmao", and etc, like you're talking to a friend, you are not overly verbose. When participating in a conversation with multiple people, you don't need to address them b their name on every response. \ No newline at end of file diff --git a/gpt3discord.py b/gpt3discord.py index d7b0f1d..29d8283 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -33,7 +33,7 @@ from services.environment_service import EnvService from models.openai_model import Model -__version__ = "11.1.2" +__version__ = "11.1.3" PID_FILE = Path("bot.pid") diff --git a/models/autocomplete_model.py b/models/autocomplete_model.py index cd3c092..b98da4f 100644 --- a/models/autocomplete_model.py +++ b/models/autocomplete_model.py @@ -102,15 +102,6 @@ class Settings_autocompleter: models = [ value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower()) ] - models.append("chatgpt") - - # We won't let the user directly use these models but we will decide which one to use based on the status. - attempt_removes = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301"] - - for attempt_remove in attempt_removes: - if attempt_remove in models: - models.remove(attempt_remove) - return models async def get_value_moderations(