Format Python code with psf/black push

github-actions 1 year ago
parent 20eea7f6b8
commit 62ad5350d7

@ -311,7 +311,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if isinstance(ctx.channel, discord.Thread):
thread = True
if (
conversation_limit
): # if we reach the conversation limit we want to close from the channel it was maxed out in
@ -1083,8 +1082,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
embed = discord.Embed(title=embed_title, color=0x808080)
await ctx.respond(embed=embed)
self.conversation_threads[target.id] = Thread(target.id)
self.conversation_threads[target.id].model = (
self.model.model if not model else model

@ -620,14 +620,16 @@ class Model:
f"{details['exception'].args[0]}"
)
async def valid_text_request(self,response, model=None):
async def valid_text_request(self, response, model=None):
try:
tokens_used = int(response["usage"]["total_tokens"])
if model and model in Models.GPT4_MODELS:
await self.usage_service.update_usage(tokens_used,
prompt_tokens=int(response["usage"]["prompt_tokens"]),
completion_tokens=int(response["usage"]["completion_tokens"]),
gpt4=True)
await self.usage_service.update_usage(
tokens_used,
prompt_tokens=int(response["usage"]["prompt_tokens"]),
completion_tokens=int(response["usage"]["completion_tokens"]),
gpt4=True,
)
else:
await self.usage_service.update_usage(tokens_used)
except Exception as e:
@ -781,7 +783,6 @@ class Model:
return response
@backoff.on_exception(
backoff.expo,
ValueError,
@ -887,13 +888,15 @@ class Model:
)
else:
try:
print("In first block The message text is ->" + message.text)
if message.text.strip().lower().startswith("this conversation has some context from earlier"):
if (
message.text.strip()
.lower()
.startswith("this conversation has some context from earlier")
):
print("Hit the exception clause")
raise Exception("This is a context message")
username = re.search(r"(?<=\n)(.*?)(?=:)", message.text).group()
username_clean = self.cleanse_username(username)
text = message.text.replace(f"{username}:", "")
@ -905,9 +908,7 @@ class Model:
except Exception:
print("In second block The message text is ->" + message.text)
text = message.text.replace("<|endofstatement|>", "")
messages.append(
{"role": "system", "content": text}
)
messages.append({"role": "system", "content": text})
print(f"Messages -> {messages}")
async with aiohttp.ClientSession(raise_for_status=False) as session:
@ -1058,7 +1059,9 @@ class Model:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response, model=self.model if model is None else model)
await self.valid_text_request(
response, model=self.model if model is None else model
)
print(f"Response -> {response}")
return response
@ -1092,7 +1095,9 @@ class Model:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response, model=self.model if model is None else model)
await self.valid_text_request(
response, model=self.model if model is None else model
)
print(f"Response -> {response}")
return response

@ -261,7 +261,9 @@ class TextService:
)
await converser_cog.end_conversation(ctx)
converser_cog.remove_awaiting(ctx.author.id, ctx.channel.id, False, False)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, False, False
)
return
else:
await ctx.reply("The conversation context limit has been reached.")

@ -14,7 +14,15 @@ class UsageService:
f.close()
self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
async def get_price(self, tokens_used, prompt_tokens=None, completion_tokens=None, embeddings=False, chatgpt=False, gpt4=False):
async def get_price(
self,
tokens_used,
prompt_tokens=None,
completion_tokens=None,
embeddings=False,
chatgpt=False,
gpt4=False,
):
tokens_used = int(tokens_used)
if chatgpt:
price = (tokens_used / 1000) * 0.002
@ -23,23 +31,27 @@ class UsageService:
price = (prompt_tokens / 1000) * 0.03 + (completion_tokens / 1000) * 0.06
return price
elif not embeddings:
price = (
tokens_used / 1000
) * 0.02
price = (tokens_used / 1000) * 0.02
else:
price = (tokens_used / 1000) * 0.0004
return price
async def update_usage(self, tokens_used, prompt_tokens=None, completion_tokens=None, embeddings=False, chatgpt=False, gpt4=False):
async def update_usage(
self,
tokens_used,
prompt_tokens=None,
completion_tokens=None,
embeddings=False,
chatgpt=False,
gpt4=False,
):
tokens_used = int(tokens_used)
if chatgpt:
price = (tokens_used / 1000) * 0.002
elif gpt4:
price = (prompt_tokens / 1000) * 0.03 + (completion_tokens / 1000) * 0.06
elif not embeddings:
price = (
tokens_used / 1000
) * 0.02
price = (tokens_used / 1000) * 0.02
else:
price = (tokens_used / 1000) * 0.0004
usage = await self.get_usage()

Loading…
Cancel
Save