Remove codex from edit and code models

Fix edit command
Fix token counting for edit and gpt-4 in converse
Add newlines manually with ask when using turbo and gpt-4
Rene Teigen 1 year ago
parent 580f02557b
commit ab4b4b0971

@ -105,7 +105,7 @@ These commands are grouped, so each group has a prefix but you can easily tab co
`/gpt ask <prompt> <temp> <top_p> <frequency penalty> <presence penalty>` Ask the GPT3 Davinci 003 model a question. Optional overrides available
`/gpt edit <instruction> <input> <temp> <top_p> <codex>` Use the bot to edit text using the given instructions for how to do it, currently an alpha openai feature so results might vary. Codex uses a model trained on code. Editing is currently free
`/gpt edit <instruction> <input> <temp> <top_p>` Use the bot to edit text using the given instructions for how to do it, currently an alpha openai feature so results might vary. Editing is currently free
`/gpt converse <opener> <opener_file> <private> <minimal>` - Start a conversation with the bot, like ChatGPT. Also use the option `use_threads:False` to start a conversation in a full discord channel!

@ -413,9 +413,6 @@ class Commands(discord.Cog, name="Commands"):
min_value=0,
max_value=1,
)
@discord.option(
name="codex", description="Enable codex version", required=False, default=False
)
@discord.guild_only()
async def edit(
self,
@ -425,10 +422,9 @@ class Commands(discord.Cog, name="Commands"):
private: bool,
temperature: float,
top_p: float,
codex: bool,
):
await self.converser_cog.edit_command(
ctx, instruction, text, private, temperature, top_p, codex
ctx, instruction, text, private, temperature, top_p
)
@add_to_group("gpt")

@ -141,7 +141,6 @@ class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"):
ctx=ctx,
response=response_message,
instruction=None,
codex=False,
paginator=None,
)
self.converser_cog.redo_users[user.id].add_interaction(response_message.id)

@ -47,7 +47,7 @@ class SearchService(discord.Cog, name="SearchService"):
async def paginate_embed(
self, response_text, user: discord.Member, original_link=None
):
"""Given a response text make embed pages and return a list of the pages. Codex makes it a codeblock in the embed"""
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
@ -59,9 +59,9 @@ class SearchService(discord.Cog, name="SearchService"):
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Search Results"
title="Search Results"
if not original_link
else f"Follow-up results",
else "Follow-up results",
description=chunk,
url=original_link,
)

@ -500,14 +500,8 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
response_message = await ctx.channel.send(chunk)
return response_message
async def paginate_embed(self, response_text, codex, prompt=None, instruction=None):
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages. Codex makes it a codeblock in the embed"""
if codex: # clean codex input
response_text = response_text.replace("```", "")
response_text = response_text.replace(f"***Prompt: {prompt}***\n", "")
response_text = response_text.replace(
f"***Instruction: {instruction}***\n\n", ""
)
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
@ -520,15 +514,13 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if not first:
page = discord.Embed(
title=f"Page {count}",
description=chunk
if not codex
else f"***Prompt:{prompt}***\n***Instruction:{instruction:}***\n```python\n{chunk}\n```",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk if not codex else f"```python\n{chunk}\n```",
description=chunk,
)
pages.append(page)
@ -945,7 +937,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
private: bool,
temperature: float,
top_p: float,
codex: bool,
):
"""Command handler. Requests and returns a generation with no extras to the edit endpoint
@ -955,7 +946,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
text (str): The text that should be modified
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
codex (bool): Enables the codex edit model
"""
user = ctx.user
@ -991,7 +981,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
overrides=overrides,
instruction=instruction,
from_edit_command=True,
codex=codex,
custom_api_key=user_api_key,
)

@ -201,7 +201,7 @@ class Index_handler:
return False
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages. Codex makes it a codeblock in the embed"""
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]

@ -54,16 +54,11 @@ class Models:
BABBAGE = "text-babbage-001"
ADA = "text-ada-001"
# Code models
CODE_DAVINCI = "code-davinci-002"
CODE_CUSHMAN = "code-cushman-001"
# Embedding models
EMBEDDINGS = "text-embedding-ada-002"
# Edit models
EDIT = "text-davinci-edit-001"
CODE_EDIT = "code-davinci-edit-001"
# ChatGPT Models
TURBO = "gpt-3.5-turbo"
@ -79,8 +74,6 @@ class Models:
CURIE,
BABBAGE,
ADA,
CODE_DAVINCI,
CODE_CUSHMAN,
TURBO,
TURBO_DEV,
GPT4,
@ -88,7 +81,7 @@ class Models:
]
CHATGPT_MODELS = [TURBO, TURBO_DEV]
GPT4_MODELS = [GPT4, GPT4_32]
EDIT_MODELS = [EDIT, CODE_EDIT]
EDIT_MODELS = [EDIT]
DEFAULT = DAVINCI
LOW_USAGE_MODEL = CURIE
@ -99,8 +92,6 @@ class Models:
"text-curie-001": 2024,
"text-babbage-001": 2024,
"text-ada-001": 2024,
"code-davinci-002": 7900,
"code-cushman-001": 2024,
TURBO: 4096,
TURBO_DEV: 4096,
GPT4: 8192,
@ -630,6 +621,8 @@ class Model:
completion_tokens=int(response["usage"]["completion_tokens"]),
gpt4=True,
)
if model and model in Models.EDIT_MODELS:
pass
else:
await self.usage_service.update_usage(tokens_used)
except Exception as e:
@ -686,17 +679,16 @@ class Model:
text=None,
temp_override=None,
top_p_override=None,
codex=False,
custom_api_key=None,
):
print(
f"The text about to be edited is [{text}] with instructions [{instruction}] codex [{codex}]"
f"The text about to be edited is [{text}] with instructions [{instruction}]"
)
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override}")
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.EDIT if codex is False else Models.CODE_EDIT,
"model": Models.EDIT,
"input": "" if text is None else text,
"instruction": instruction,
"temperature": self.temp if temp_override is None else temp_override,
@ -710,7 +702,7 @@ class Model:
"https://api.openai.com/v1/edits", json=payload, headers=headers
) as resp:
response = await resp.json()
await self.valid_text_request(response)
await self.valid_text_request(response, model=Models.EDIT)
return response
@backoff.on_exception(
@ -943,7 +935,7 @@ class Model:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response)
await self.valid_text_request(response, model=self.model if model is None else model)
print(f"Response -> {response}")
return response
@ -1063,7 +1055,7 @@ class Model:
headers=headers,
) as resp:
response = await resp.json()
print(f"Payload -> {payload}")
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(
response, model=self.model if model is None else model

@ -5,13 +5,12 @@ history, message count, and the id of the user in order to track them.
class RedoUser:
def __init__(self, prompt, instruction, message, ctx, response, codex, paginator):
def __init__(self, prompt, instruction, message, ctx, response, paginator):
self.prompt = prompt
self.instruction = instruction
self.message = message
self.ctx = ctx
self.response = response
self.codex = codex
self.paginator = paginator
self.interactions = []

@ -122,7 +122,6 @@ class ImageService:
ctx=ctx,
response=result_message,
instruction=None,
codex=False,
paginator=None,
)
@ -184,7 +183,6 @@ class ImageService:
ctx=ctx,
response=result_message,
instruction=None,
codex=False,
paginator=None,
)

@ -34,7 +34,6 @@ class TextService:
instruction=None,
from_ask_command=False,
from_edit_command=False,
codex=False,
model=None,
user=None,
custom_api_key=None,
@ -59,7 +58,6 @@ class TextService:
instruction (str, optional): Instruction for use with the edit endpoint. Defaults to None.
from_ask_command (bool, optional): Called from the ask command. Defaults to False.
from_edit_command (bool, optional): Called from the edit command. Defaults to False.
codex (bool, optional): Pass along that we want to use a codex model. Defaults to False.
model (str, optional): Which model to generate output with. Defaults to None.
user (discord.User, optional): An user object that can be used to set the stop. Defaults to None.
custom_api_key (str, optional): per-user api key. Defaults to None.
@ -317,7 +315,6 @@ class TextService:
instruction=instruction,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
codex=codex,
custom_api_key=custom_api_key,
)
else:
@ -338,7 +335,7 @@ class TextService:
response_text = (
converser_cog.cleanse_response(str(response["choices"][0]["text"]))
if not is_chatgpt_request and not is_chatgpt_conversation
if not is_chatgpt_request and not is_chatgpt_conversation or from_edit_command
else converser_cog.cleanse_response(
str(response["choices"][0]["message"]["content"])
)
@ -349,14 +346,13 @@ class TextService:
elif from_other_action:
response_text = f"***{from_other_action}*** {response_text}"
elif from_ask_command or from_ask_action:
response_model = response["model"]
if response_model in Models.GPT4_MODELS or response_model in Models.CHATGPT_MODELS:
response_text = f"\n\n{response_text}"
response_text = f"***{prompt}***{response_text}"
elif from_edit_command:
if codex:
response_text = response_text.strip()
response_text = f"***Prompt:\n `{prompt}`***\n***Instruction:\n `{instruction}`***\n\n```\n{response_text}\n```"
else:
response_text = response_text.strip()
response_text = f"***Prompt:\n `{prompt}`***\n***Instruction:\n `{instruction}`***\n\n{response_text}\n"
response_text = response_text.strip()
response_text = f"***Prompt:***\n {prompt}\n\n***Instruction:***\n {instruction}\n\n***Response:***\n {response_text}"
# If gpt3 tries writing a user mention try to replace it with their name
response_text = await converser_cog.mention_to_username(ctx, response_text)
@ -437,7 +433,7 @@ class TextService:
)
else:
embed_pages = await converser_cog.paginate_embed(
response_text, codex, prompt, instruction
response_text
)
view = ConversationView(
ctx,
@ -500,7 +496,6 @@ class TextService:
ctx=ctx,
message=ctx,
response=response_message,
codex=codex,
paginator=paginator,
)
converser_cog.redo_users[ctx.author.id].add_interaction(
@ -512,7 +507,7 @@ class TextService:
paginator = converser_cog.redo_users.get(ctx.author.id).paginator
if isinstance(paginator, pages.Paginator):
embed_pages = await converser_cog.paginate_embed(
response_text, codex, prompt, instruction
response_text
)
view = ConversationView(
ctx,
@ -950,7 +945,6 @@ class RedoButton(discord.ui.Button["ConversationView"]):
instruction = self.converser_cog.redo_users[user_id].instruction
ctx = self.converser_cog.redo_users[user_id].ctx
response_message = self.converser_cog.redo_users[user_id].response
codex = self.converser_cog.redo_users[user_id].codex
await interaction.response.send_message(
"Retrying your original request...", ephemeral=True, delete_after=15
@ -965,7 +959,7 @@ class RedoButton(discord.ui.Button["ConversationView"]):
ctx=ctx,
model=self.model,
response_message=response_message,
codex=codex,
custom_api_key=self.custom_api_key,
redo_request=True,
from_ask_command=self.from_ask_command,

@ -46,5 +46,5 @@ async def test_send_req_gpt4():
# usage_service = UsageService(Path("../tests"))
# model = Model(usage_service)
# text = 'how many hours are in a day?'
# res = await model.send_edit_request(text, codex=True)
# res = await model.send_edit_request(text)
# assert '24' in res['choices'][0]['text']

Loading…
Cancel
Save