Kaveen Kumarasinghe 1 year ago
commit fccda65f43

@ -16,6 +16,7 @@ USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
class RedoSearchUser:
def __init__(self, ctx, query, search_scope, nodes):
self.ctx = ctx

@ -556,7 +556,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
if message.author == self.bot.user:
return
# Moderations service is done here.
if (
hasattr(message, "guild")
@ -580,7 +579,9 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Language check
if FORCE_ENGLISH and len(message.content.split(" ")) > 3:
if not await Moderation.force_english_and_respond(message.content, self.LANGUAGE_DETECT_STARTER_TEXT, message):
if not await Moderation.force_english_and_respond(
message.content, self.LANGUAGE_DETECT_STARTER_TEXT, message
):
await message.delete()
return
@ -1025,7 +1026,6 @@ class GPT3ComCon(discord.Cog, name="GPT3ComCon"):
# Append the starter text for gpt3 to the user's history so it gets concatenated with the prompt later
if minimal or opener_file or opener:
self.conversation_threads[thread.id].history.append(
EmbeddedConversationItem(self.CONVERSATION_STARTER_TEXT_MINIMAL, 0)
)

@ -88,9 +88,13 @@ sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt install python3.9
sudo apt install python3.9-distutils # If this doesn't work, try sudo apt install python3-distutils
# Install Pip for python3.9
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.pypython3.9 get-pip.py
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python3.9 get-pip.py
# Install project dependencies
python3.9 -m pip install --ignore-installed PyYAMLpython3.9 -m pip install torch==1.9.1+cpu torchvision==0.10.1+cpu -f https://download.pytorch.org/whl/torch_stable.htmlpython3.9 -m pip install -r requirements.txtpython3.9 -m pip install .
python3.9 -m pip install --ignore-installed PyYAML
python3.9 -m pip install torch==1.9.1+cpu torchvision==0.10.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3.9 -m pip install -r requirements.txt
python3.9 -m pip install .
# Copy the sample.env file into a regular .env file. `DEBUG_GUILD` and the ID for `ALLOWED_GUILDS` can be found by right-clicking your server and choosing "Copy ID". Similarly, `DEBUG_CHANNEL` can be found by right-clicking your debug channel.
cp sample.env .env
# The command below is used to edit the .env file and to put in your API keys. You can right click within the
@ -127,24 +131,24 @@ python3.9 gpt3discord.py
To use docker you can use the following command after [installing docker](https://docs.docker.com/get-docker/)
- Make a .env file to mount to `/opt/gpt3discord/etc/environment` in docker
- `env_file` in the command should be replaced with where you have your .env file stored on your machine
The parts enclosed in [ ] is optional, read below for information
- Add `DATA_DIR=/data` to your env file -> `usage.txt` is saved here
- Add `SHARE_DIR=/data/share` to your env file -> this is where `conversation starters, optimizer pretext and the 'openers' folder` is alternatively loaded from for persistence
- Make sure the path on the left side of the colon in the paths below is a valid path on your machibne
```shell
docker run -d --name gpt3discord env_file:/opt/gpt3discord/etc/environment [-v /containers/gpt3discord:/data] [-v /containers/gpt3discord/share:/data/share] ghcr.io/kav-k/gpt3discord:main
docker run -d --name gpt3discord -v env_file:/opt/gpt3discord/etc/environment -v /containers/gpt3discord:/data -v /containers/gpt3discord/share:/data/share ghcr.io/kav-k/gpt3discord:main
```
If you wish to build your own image then do the following commands instead
```shell
# build the image
docker build -t gpt3discord .
# run it
docker run -d --name gpt3discord env_file:/opt/gpt3discord/etc/environment [-v /containers/gpt3discord:/data] [-v /containers/gpt3discord/share:/data/share] gpt3discord
docker run -d --name gpt3discord -v env_file:/opt/gpt3discord/etc/environment -v /containers/gpt3discord:/data -v /containers/gpt3discord/share:/data/share gpt3discord
```
- Optional: Make a data and share directory then mount it to docker to keep persistent data
- Add `-v DATA_DIR=/data` to command -> `usage.txt` is saved here
- Add `-v SHARE_DIR=/data/share` to command -> this is where `conversation starters, optimizer pretext and the 'openers' folder` is alternatively loaded from
- If `SHARE_DIR` is not included it'll load only from the files added during the docker image build
Make sure the `env_file` path is correct and the `DATA_DIR` and `SHARE_DIR` paths exists on your machine if used.
Make sure all the paths are correct.
#### Docker Compose

@ -205,7 +205,7 @@ class Model:
)
print("Building language detector")
#self.detector = LanguageDetectorBuilder.from_languages(*Language.all()).build()
# self.detector = LanguageDetectorBuilder.from_languages(*Language.all()).build()
print("Language detector built")
def reset_settings(self):
@ -781,7 +781,9 @@ class Model:
prompt = f"{pretext}{text}\nOutput:"
max_tokens = Models.get_max_tokens(Models.DAVINCI) - self.usage_service.count_tokens(prompt)
max_tokens = Models.get_max_tokens(
Models.DAVINCI
) - self.usage_service.count_tokens(prompt)
print(f"Language detection request for {text}")
@ -791,11 +793,9 @@ class Model:
"prompt": prompt,
"temperature": 0,
"top_p": 1,
"max_tokens": max_tokens
}
headers = {
"Authorization": f"Bearer {self.openai_key}"
"max_tokens": max_tokens,
}
headers = {"Authorization": f"Bearer {self.openai_key}"}
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:

@ -139,7 +139,7 @@ class Moderation:
@staticmethod
async def force_english_and_respond(text, pretext, ctx):
response = await model.send_language_detect_request(text, pretext)
response_text = response['choices'][0]['text']
response_text = response["choices"][0]["text"]
if "false" in response_text.lower().strip():
if isinstance(ctx, discord.Message):
@ -159,7 +159,14 @@ class Moderation:
response = await Moderation.simple_moderate(text)
print(response)
flagged = True if Moderation.determine_moderation_result(text, response, pre_mod_set, pre_mod_set) == ModerationResult.DELETE else False
flagged = (
True
if Moderation.determine_moderation_result(
text, response, pre_mod_set, pre_mod_set
)
== ModerationResult.DELETE
else False
)
if flagged:
if isinstance(ctx, discord.Message):

@ -263,7 +263,6 @@ class TextService:
await converser_cog.end_conversation(ctx)
return
# Send the request to the model
if from_edit_command:
response = await converser_cog.model.send_edit_request(
@ -538,7 +537,9 @@ class TextService:
if conversing:
# Pre-moderation check
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(message.content, message):
if await Moderation.simple_moderate_and_respond(
message.content, message
):
await message.delete()
return

Loading…
Cancel
Save