Introduce unit test framework

pytest is now utilize for unit tests. I wrote a single unit test for the
anime markdown parser for now. More will come.
pull/5/head
Robert Dailey 3 years ago
parent 2133bfceb9
commit 7a5969874a

@ -2,13 +2,22 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
<excludeFolder url="file://$MODULE_DIR$/venv" />
<excludeFolder url="file://$MODULE_DIR$/.vscode" />
<excludeFolder url="file://$MODULE_DIR$/sonarr_api_examples" />
</content>
<orderEntry type="jdk" jdkName="Python 3.9 (TrashUpdater)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PackageRequirementsSettings">
<option name="requirementsPath" value="" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="pytest" />
</component>
</module>

@ -0,0 +1,5 @@
<component name="ProjectCodeStyleConfiguration">
<state>
<option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
</state>
</component>

@ -71,3 +71,11 @@ In addition to the above limitations, the following items are planned for the fu
* Implement some sort of guide versioning (e.g. to avoid updating a release profile if the guide did
not change).
* Unit Testing
## Development / Contributing
### Prerequisites
Some additional packages are required to run the unit tests. All can be installed via `pip`:
* `pytest`

@ -5,16 +5,16 @@ import requests
from ..profile_data import ProfileData
Filter = Enum('FilterType', 'Preferred Required Ignored')
TermCategory = Enum('TermCategory', 'Preferred Required Ignored')
header_regex = re.compile(r'^(#+)\s([\w\s\d]+)\s*$')
score_regex = re.compile(r'score.*?\[(-?[\d]+)\]', re.IGNORECASE)
# included_preferred_regex = re.compile(r'include preferred', re.IGNORECASE)
# not_regex = re.compile(r'not', re.IGNORECASE)
filter_regexes = (
(Filter.Required, re.compile(r'must contain', re.IGNORECASE)),
(Filter.Ignored, re.compile(r'must not contain', re.IGNORECASE)),
(Filter.Preferred, re.compile(r'preferred', re.IGNORECASE)),
category_regex = (
(TermCategory.Required, re.compile(r'must contain', re.IGNORECASE)),
(TermCategory.Ignored, re.compile(r'must not contain', re.IGNORECASE)),
(TermCategory.Preferred, re.compile(r'preferred', re.IGNORECASE)),
)
# --------------------------------------------------------------------------------------------------
@ -24,23 +24,21 @@ def get_trash_anime_markdown():
return response.content.decode('utf8')
# --------------------------------------------------------------------------------------------------
def parse_filter(line):
for rx in filter_regexes:
def parse_category(line):
for rx in category_regex:
if rx[1].search(line):
return rx[0]
return None
# --------------------------------------------------------------------------------------------------
def parse_markdown(logger):
class state:
results = defaultdict(ProfileData)
profile_name = None
score = None
filter = None
bracket_depth = 0
def parse_markdown(logger, markdown_content):
results = defaultdict(ProfileData)
profile_name = None
score = None
category = None
bracket_depth = 0
markdown_content = get_trash_anime_markdown()
for line in markdown_content.splitlines():
# Header processing
if match := header_regex.search(line):
@ -49,41 +47,42 @@ def parse_markdown(logger):
# Profile name (always reset previous state here)
if header_depth == 3:
state.score = None
state.filter = Filter.Preferred
state.profile_name = header_text
score = None
category = TermCategory.Preferred
profile_name = header_text
logger.debug(f'New Profile: {header_text}')
# Filter type for provided regexes
elif header_depth == 4:
state.filter = parse_filter(header_text)
if state.filter:
logger.debug(f' Filter Set: {state.filter}')
category = parse_category(header_text)
if category:
logger.debug(f' Category Set: {category}')
# Lines we always look for
elif line.startswith('```'):
state.bracket_depth = 1 - state.bracket_depth
bracket_depth = 1 - bracket_depth
# Filter-based line processing
elif state.profile_name:
profile = state.results[state.profile_name]
# Category-based line processing
elif profile_name:
profile = results[profile_name]
lower_line = line.lower()
if 'include preferred' in lower_line:
profile.include_preferred_when_renaming = 'not' not in lower_line
logger.debug(f' Include preferred found: {profile.include_preferred_when_renaming}, {lower_line}')
elif state.filter == Filter.Preferred:
elif category == TermCategory.Preferred:
if match := score_regex.search(line):
# bracket_depth = 0
state.score = int(match.group(1))
elif state.bracket_depth:
if state.score is not None:
logger.debug(f' [Preferred] Score: {state.score}, Term: {line}')
profile.preferred[state.score].append(line)
elif state.filter == Filter.Ignored:
if state.bracket_depth:
# Sometimes a comma is present at the end of these regexes, because when it's
# pasted into Sonarr it acts as a delimiter. However, when using them with the
# API we do not need them.
profile.ignored.append(line.rstrip(','))
score = int(match.group(1))
elif bracket_depth:
if score is not None:
logger.debug(f' [Preferred] Score: {score}, Term: {line}')
profile.preferred[score].append(line)
elif category == TermCategory.Ignored and bracket_depth:
# Sometimes a comma is present at the end of these regexes, because when it's
# pasted into Sonarr it acts as a delimiter. However, when using them with the
# API we do not need them.
profile.ignored.append(line.rstrip(','))
elif category == TermCategory.Required and bracket_depth:
profile.required.append(line.rstrip(','))
logger.debug('\n\n')
return state.results
return results

@ -0,0 +1,22 @@
### Profile 1
The score is [100]
```
term1
```
This is another Score that should not be used [200]
#### Must not contain
```
term2
term3
```
#### Must contain
```
term4
```

@ -0,0 +1,28 @@
import trash.guide.anime as anime
from pathlib import Path
data_files = Path(__file__).parent / 'data'
class TestLogger:
def info(self, msg): pass
def debug(self, msg): pass
def test_parse_markdown_complete_doc():
md_file = data_files / 'test_parse_markdown_complete_doc.md'
with open(md_file) as file:
test_markdown = file.read()
results = anime.parse_markdown(TestLogger(), test_markdown)
assert len(results) == 1
profile = next(iter(results.values()))
assert len(profile.ignored) == 2
assert sorted(profile.ignored) == sorted(['term2', 'term3'])
assert len(profile.required) == 1
assert profile.required == ['term4']
assert len(profile.preferred) == 1
assert profile.preferred.get(100) == ['term1']

@ -1,9 +1,4 @@
import requests
# import re
# import json
# from collections import defaultdict
# from dataclasses import dataclass
# from enum import Enum
from packaging import version # pip install packaging
from app import guide
@ -12,64 +7,63 @@ from app.guide import anime, utils
from app.cmd import setup_and_parse_args
from app.logger import Logger
####################################################################################################
if __name__ == '__main__':
try:
args = setup_and_parse_args()
logger = Logger(args)
sonarr = Sonarr(args, logger)
try:
args = setup_and_parse_args()
logger = Logger(args)
sonarr = Sonarr(args, logger)
profiles = anime.parse_markdown(logger, anime.get_trash_anime_markdown())
profiles = anime.parse_markdown(logger)
# A few false-positive profiles are added sometimes. We filter these out by checking if they
# actually have meaningful data attached to them, such as preferred terms. If they are mostly empty,
# we remove them here.
utils.filter_profiles(profiles)
# A few false-positive profiles are added sometimes. We filter these out by checking if they
# actually have meaningful data attached to them, such as preferred terms. If they are mostly empty,
# we remove them here.
utils.filter_profiles(profiles)
if args.preview:
utils.print_terms_and_scores(profiles)
exit(0)
if args.preview:
utils.print_terms_and_scores(profiles)
exit(0)
# Since this script requires a specific version of v3 Sonarr that implements name support for
# release profiles, we perform that version check here and bail out if it does not meet a minimum
# required version.
minimum_version = version.parse('3.0.4.1098')
version = sonarr.get_version()
if version < minimum_version:
print(f'ERROR: Your Sonarr version ({version}) does not meet the minimum required version of {minimum_version} to use this script.')
exit(1)
# Since this script requires a specific version of v3 Sonarr that implements name support for
# release profiles, we perform that version check here and bail out if it does not meet a minimum
# required version.
minimum_version = version.parse('3.0.4.1098')
version = sonarr.get_version()
if version < minimum_version:
print(f'ERROR: Your Sonarr version ({version}) does not meet the minimum required version of {minimum_version} to use this script.')
exit(1)
# If tags were provided, ensure they exist. Tags that do not exist are added first, so that we
# may specify them with the release profile request payload.
tag_ids = []
if args.tags:
tags = sonarr.get_tags()
tags = sonarr.create_missing_tags(tags, args.tags[:])
logger.debug(f'Tags JSON: {tags}')
# If tags were provided, ensure they exist. Tags that do not exist are added first, so that we
# may specify them with the release profile request payload.
tag_ids = []
if args.tags:
tags = sonarr.get_tags()
tags = sonarr.create_missing_tags(tags, args.tags[:])
logger.debug(f'Tags JSON: {tags}')
# Get a list of IDs that we can pass along with the request to update/create release
# profiles
tag_ids = [t['id'] for t in tags if t['label'] in args.tags]
logger.debug(f'Tag IDs: {tag_ids}')
# Get a list of IDs that we can pass along with the request to update/create release
# profiles
tag_ids = [t['id'] for t in tags if t['label'] in args.tags]
logger.debug(f'Tag IDs: {tag_ids}')
# Obtain all of the existing release profiles first. If any were previously created by our script
# here, we favor replacing those instead of creating new ones, which would just be mostly duplicates
# (but with some differences, since there have likely been updates since the last run).
existing_profiles = sonarr.get_release_profiles()
# Obtain all of the existing release profiles first. If any were previously created by our script
# here, we favor replacing those instead of creating new ones, which would just be mostly duplicates
# (but with some differences, since there have likely been updates since the last run).
existing_profiles = sonarr.get_release_profiles()
for name, profile in profiles.items():
new_profile_name = f'[Trash] Anime - {name}'
profile_to_update = guide.utils.find_existing_profile(new_profile_name, existing_profiles)
for name, profile in profiles.items():
new_profile_name = f'[Trash] Anime - {name}'
profile_to_update = guide.utils.find_existing_profile(new_profile_name, existing_profiles)
if profile_to_update:
print(f'Updating existing profile: {new_profile_name}')
sonarr.update_existing_profile(profile_to_update, profile, tag_ids)
else:
print(f'Creating new profile: {new_profile_name}')
sonarr.create_release_profile(new_profile_name, profile, tag_ids)
if profile_to_update:
print(f'Updating existing profile: {new_profile_name}')
sonarr.update_existing_profile(profile_to_update, profile, tag_ids)
else:
print(f'Creating new profile: {new_profile_name}')
sonarr.create_release_profile(new_profile_name, profile, tag_ids)
except requests.exceptions.HTTPError as e:
print(e)
if error_msg := Sonarr.get_error_message(e.response):
print(f'Response Message: {error_msg}')
exit(1)
except requests.exceptions.HTTPError as e:
print(e)
if error_msg := Sonarr.get_error_message(e.response):
print(f'Response Message: {error_msg}')
exit(1)

Loading…
Cancel
Save