* Significant refactoring of parsing logic to generalize it between anime and web-dl * Rework exception type hierarchy for Server codepull/5/head
parent
7a0c3ba26d
commit
cb44ab36cb
@ -0,0 +1 @@
|
||||
from . import server, sonarr
|
@ -1,21 +1,29 @@
|
||||
import json
|
||||
import requests
|
||||
|
||||
from app.trash_error import TrashError
|
||||
|
||||
class TrashHttpError(TrashError):
|
||||
def __init__(self, response):
|
||||
self.response = response
|
||||
|
||||
class Server:
|
||||
def __init__(self, base_uri, apikey):
|
||||
dispatch = {
|
||||
'put': requests.put,
|
||||
'get': requests.get,
|
||||
'post': requests.post,
|
||||
}
|
||||
|
||||
def __init__(self, base_uri, apikey, exception_strategy):
|
||||
self.base_uri = base_uri
|
||||
self.apikey = apikey
|
||||
self.exception_strategy = exception_strategy
|
||||
|
||||
def build_uri(self, endpoint):
|
||||
return self.base_uri + endpoint + self.apikey
|
||||
|
||||
def request(self, method, endpoint, data=None):
|
||||
dispatch = {
|
||||
'put': requests.put,
|
||||
'get': requests.get,
|
||||
'post': requests.post,
|
||||
}
|
||||
|
||||
r = dispatch.get(method)(self.build_uri(endpoint), json.dumps(data))
|
||||
r.raise_for_status()
|
||||
r = Server.dispatch.get(method)(self.build_uri(endpoint), json.dumps(data))
|
||||
if 400 <= r.status_code < 600:
|
||||
raise self.exception_strategy(r)
|
||||
return json.loads(r.content)
|
||||
|
@ -0,0 +1 @@
|
||||
from . import profile, quality, utils
|
@ -1,88 +0,0 @@
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
import requests
|
||||
|
||||
from ..profile_data import ProfileData
|
||||
|
||||
TermCategory = Enum('TermCategory', 'Preferred Required Ignored')
|
||||
|
||||
header_regex = re.compile(r'^(#+)\s([\w\s\d]+)\s*$')
|
||||
score_regex = re.compile(r'score.*?\[(-?[\d]+)\]', re.IGNORECASE)
|
||||
header_release_profile_regex = re.compile(r'release profile', re.IGNORECASE)
|
||||
# not_regex = re.compile(r'not', re.IGNORECASE)
|
||||
category_regex = (
|
||||
(TermCategory.Required, re.compile(r'must contain', re.IGNORECASE)),
|
||||
(TermCategory.Ignored, re.compile(r'must not contain', re.IGNORECASE)),
|
||||
(TermCategory.Preferred, re.compile(r'preferred', re.IGNORECASE)),
|
||||
)
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def get_trash_anime_markdown():
|
||||
trash_anime_markdown_url = 'https://raw.githubusercontent.com/TRaSH-/Guides/master/docs/Sonarr/V3/Sonarr-Release-Profile-RegEx-Anime.md'
|
||||
response = requests.get(trash_anime_markdown_url)
|
||||
return response.content.decode('utf8')
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_category(line):
|
||||
for rx in category_regex:
|
||||
if rx[1].search(line):
|
||||
return rx[0]
|
||||
|
||||
return None
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_markdown(args, logger, markdown_content):
|
||||
results = defaultdict(ProfileData)
|
||||
profile_name = None
|
||||
score = None
|
||||
category = None
|
||||
bracket_depth = 0
|
||||
|
||||
for line in markdown_content.splitlines():
|
||||
# Header processing
|
||||
if match := header_regex.search(line):
|
||||
header_text = match.group(2)
|
||||
|
||||
# Profile name (always reset previous state here)
|
||||
if header_release_profile_regex.search(header_text):
|
||||
score = None
|
||||
category = TermCategory.Preferred
|
||||
profile_name = header_text
|
||||
logger.debug(f'New Profile: {header_text}')
|
||||
# Filter type for provided regexes
|
||||
elif category := parse_category(header_text):
|
||||
logger.debug(f' Category Set: {category}')
|
||||
|
||||
# Lines we always look for
|
||||
elif line.startswith('```'):
|
||||
bracket_depth = 1 - bracket_depth
|
||||
|
||||
# Category-based line processing
|
||||
elif profile_name:
|
||||
profile = results[profile_name]
|
||||
lower_line = line.lower()
|
||||
if 'include preferred' in lower_line:
|
||||
profile.include_preferred_when_renaming = 'not' not in lower_line
|
||||
logger.debug(f' Include preferred found: {profile.include_preferred_when_renaming}, {lower_line}')
|
||||
elif category == TermCategory.Preferred:
|
||||
if match := score_regex.search(line):
|
||||
# bracket_depth = 0
|
||||
score = int(match.group(1))
|
||||
elif bracket_depth:
|
||||
if score is not None:
|
||||
logger.debug(f' [Preferred] Score: {score}, Term: {line}')
|
||||
if args.strict_negative_scores and score < 0:
|
||||
profile.ignored.append(line)
|
||||
else:
|
||||
profile.preferred[score].append(line)
|
||||
elif category == TermCategory.Ignored and bracket_depth:
|
||||
# Sometimes a comma is present at the end of these regexes, because when it's
|
||||
# pasted into Sonarr it acts as a delimiter. However, when using them with the
|
||||
# API we do not need them.
|
||||
profile.ignored.append(line.rstrip(','))
|
||||
elif category == TermCategory.Required and bracket_depth:
|
||||
profile.required.append(line.rstrip(','))
|
||||
|
||||
logger.debug('\n\n')
|
||||
return results
|
@ -0,0 +1,170 @@
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
import requests
|
||||
|
||||
from ..profile_data import ProfileData
|
||||
|
||||
# This defines general information specific to guide types. Used across different modules as needed.
|
||||
types = {
|
||||
'sonarr:anime': {
|
||||
'cmd_help': 'The anime release profile for Sonarr v3',
|
||||
'markdown_doc_name': 'Sonarr-Release-Profile-RegEx-Anime',
|
||||
'profile_typename': 'Anime'
|
||||
},
|
||||
'sonarr:web-dl': {
|
||||
'cmd_help': 'The WEB-DL release profile for Sonarr v3',
|
||||
'markdown_doc_name': 'Sonarr-Release-Profile-RegEx',
|
||||
'profile_typename': 'WEB-DL'
|
||||
}
|
||||
}
|
||||
|
||||
TermCategory = Enum('TermCategory', 'Preferred Required Ignored')
|
||||
|
||||
header_regex = re.compile(r'^(#+)\s([\w\s\d]+)\s*$')
|
||||
score_regex = re.compile(r'score.*?\[(-?[\d]+)\]', re.IGNORECASE)
|
||||
header_release_profile_regex = re.compile(r'release profile', re.IGNORECASE)
|
||||
category_regex = (
|
||||
(TermCategory.Required, re.compile(r'must contain', re.IGNORECASE)),
|
||||
(TermCategory.Ignored, re.compile(r'must not contain', re.IGNORECASE)),
|
||||
(TermCategory.Preferred, re.compile(r'preferred', re.IGNORECASE)),
|
||||
)
|
||||
|
||||
class ParserState:
|
||||
def __init__(self):
|
||||
self.profile_name = None
|
||||
self.score = None
|
||||
self.current_category = TermCategory.Preferred
|
||||
self.bracket_depth = 0
|
||||
self.current_header_depth = -1
|
||||
|
||||
def reset(self):
|
||||
self.__init__()
|
||||
|
||||
def is_valid(self):
|
||||
return \
|
||||
self.profile_name is not None and \
|
||||
self.current_category is not None and \
|
||||
(self.current_category != TermCategory.Preferred or self.score is not None)
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def get_markdown(page):
|
||||
response = requests.get(f'https://raw.githubusercontent.com/TRaSH-/Guides/master/docs/Sonarr/V3/{page}.md')
|
||||
return response.content.decode('utf8')
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_category(line):
|
||||
for rx in category_regex:
|
||||
if rx[1].search(line):
|
||||
return rx[0]
|
||||
|
||||
return None
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_markdown_outside_fence(args, logger, line, state, results):
|
||||
# Header processing
|
||||
if match := header_regex.search(line):
|
||||
header_depth = len(match.group(1))
|
||||
header_text = match.group(2)
|
||||
logger.debug(f'> Parsing Header [Text: {header_text}] [Depth: {header_depth}]')
|
||||
|
||||
# Profile name (always reset previous state here)
|
||||
if header_release_profile_regex.search(header_text):
|
||||
state.reset()
|
||||
state.profile_name = header_text
|
||||
logger.debug(f' - New Profile [Text: {header_text}]')
|
||||
return
|
||||
|
||||
elif header_depth <= state.current_header_depth:
|
||||
logger.debug(' - !! Non-nested, non-profile header found; resetting all state')
|
||||
state.reset()
|
||||
return
|
||||
|
||||
# Until we find a header that defines a profile, we don't care about anything under it.
|
||||
if not state.profile_name:
|
||||
return
|
||||
|
||||
# Check if we are enabling the "Include Preferred when Renaming" checkbox
|
||||
profile = results[state.profile_name]
|
||||
lower_line = line.lower()
|
||||
if 'include preferred' in lower_line:
|
||||
profile.include_preferred_when_renaming = 'not' not in lower_line
|
||||
logger.debug(f' - "Include Preferred" found [Value: {profile.include_preferred_when_renaming}] [Line: {line}]')
|
||||
return
|
||||
|
||||
# Either we have a nested header or normal line at this point
|
||||
# We need to check if we're defining a new category.
|
||||
if category := parse_category(line):
|
||||
state.current_category = category
|
||||
logger.debug(f' - Category Set [Name: {category}] [Line: {line}]')
|
||||
# DO NOT RETURN HERE!
|
||||
# The category and score are sometimes in the same sentence (line); continue processing the line!!
|
||||
# return
|
||||
|
||||
# Check this line for a score value. We do this even if our category may not be set to 'Preferred' yet.
|
||||
if match := score_regex.search(line):
|
||||
state.score = int(match.group(1))
|
||||
logger.debug(f' - Score [Value: {state.score}]')
|
||||
return
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_markdown_inside_fence(args, logger, line, state, results):
|
||||
profile = results[state.profile_name]
|
||||
|
||||
if state.current_category == TermCategory.Preferred:
|
||||
logger.debug(' + Capture Term '
|
||||
f'[Category: {state.current_category}] '
|
||||
f'[Score: {state.score}] '
|
||||
f'[Strict: {args.strict_negative_scores}] '
|
||||
f'[Term: {line}]')
|
||||
|
||||
if args.strict_negative_scores and state.score < 0:
|
||||
profile.ignored.append(line)
|
||||
else:
|
||||
profile.preferred[state.score].append(line)
|
||||
return
|
||||
|
||||
# Sometimes a comma is present at the end of these regexes, because when it's
|
||||
# pasted into Sonarr it acts as a delimiter. However, when using them with the
|
||||
# API we do not need them.
|
||||
line = line.rstrip(',')
|
||||
|
||||
if state.current_category == TermCategory.Ignored:
|
||||
profile.ignored.append(line)
|
||||
logger.debug(f' + Capture Term [Category: {state.current_category}] [Term: {line}]')
|
||||
return
|
||||
|
||||
if state.current_category == TermCategory.Required:
|
||||
profile.required.append(line)
|
||||
logger.debug(f' + Capture Term [Category: {state.current_category}] [Term: {line}]')
|
||||
return
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def parse_markdown(args, logger, markdown_content):
|
||||
results = defaultdict(ProfileData)
|
||||
state = ParserState()
|
||||
|
||||
for line in markdown_content.splitlines():
|
||||
# Always check if we're starting a fenced code block. Whether we are inside one or not greatly affects
|
||||
# the logic we use.
|
||||
if line.startswith('```'):
|
||||
state.bracket_depth = 1 - state.bracket_depth
|
||||
continue
|
||||
|
||||
# Not inside brackets
|
||||
if state.bracket_depth == 0:
|
||||
parse_markdown_outside_fence(args, logger, line, state, results)
|
||||
# Inside brackets
|
||||
elif state.bracket_depth == 1:
|
||||
if not state.is_valid():
|
||||
logger.debug(' - !! Inside bracket with invalid state; skipping! '
|
||||
f'[Profile Name: {state.profile_name}] '
|
||||
f'[Category: {state.current_category}] '
|
||||
f'[Score: {state.score}] '
|
||||
f'[Line: {line}] '
|
||||
)
|
||||
else:
|
||||
parse_markdown_inside_fence(args, logger, line, state, results)
|
||||
|
||||
logger.debug('\n')
|
||||
return results
|
@ -0,0 +1,97 @@
|
||||
import re
|
||||
|
||||
from app import guide
|
||||
from app.guide.profile import types as profile_types
|
||||
from app.api.sonarr import Sonarr
|
||||
from app.trash_error import TrashError
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def process_profile(args, logger):
|
||||
page = profile_types.get(args.type).get('markdown_doc_name')
|
||||
logger.debug(f'Using markdown page: {page}')
|
||||
profiles = guide.profile.parse_markdown(args, logger, guide.profile.get_markdown(page))
|
||||
|
||||
# A few false-positive profiles are added sometimes. We filter these out by checking if they
|
||||
# actually have meaningful data attached to them, such as preferred terms. If they are mostly empty,
|
||||
# we remove them here.
|
||||
guide.utils.filter_profiles(profiles)
|
||||
|
||||
if args.preview:
|
||||
guide.utils.print_terms_and_scores(profiles)
|
||||
exit(0)
|
||||
|
||||
sonarr = Sonarr(args, logger)
|
||||
|
||||
# If tags were provided, ensure they exist. Tags that do not exist are added first, so that we
|
||||
# may specify them with the release profile request payload.
|
||||
tag_ids = []
|
||||
if args.tags:
|
||||
tags = sonarr.get_tags()
|
||||
tags = sonarr.create_missing_tags(tags, args.tags[:])
|
||||
logger.debug(f'Tags JSON: {tags}')
|
||||
|
||||
# Get a list of IDs that we can pass along with the request to update/create release
|
||||
# profiles
|
||||
tag_ids = [t['id'] for t in tags if t['label'] in args.tags]
|
||||
logger.debug(f'Tag IDs: {tag_ids}')
|
||||
|
||||
# Obtain all of the existing release profiles first. If any were previously created by our script
|
||||
# here, we favor replacing those instead of creating new ones, which would just be mostly duplicates
|
||||
# (but with some differences, since there have likely been updates since the last run).
|
||||
existing_profiles = sonarr.get_release_profiles()
|
||||
|
||||
for name, profile in profiles.items():
|
||||
type_for_name = profile_types.get(args.type).get('profile_typename')
|
||||
new_profile_name = f'[Trash] {type_for_name} - {name}'
|
||||
profile_to_update = guide.utils.find_existing_profile(new_profile_name, existing_profiles)
|
||||
|
||||
if profile_to_update:
|
||||
logger.info(f'Updating existing profile: {new_profile_name}')
|
||||
sonarr.update_existing_profile(profile_to_update, profile, tag_ids)
|
||||
else:
|
||||
logger.info(f'Creating new profile: {new_profile_name}')
|
||||
sonarr.create_release_profile(new_profile_name, profile, tag_ids)
|
||||
|
||||
# --------------------------------------------------------------------------------------------------
|
||||
def process_quality(args, logger):
|
||||
guide_definitions = guide.quality.parse_markdown(logger, guide.quality.get_markdown())
|
||||
|
||||
if args.type == 'sonarr:hybrid':
|
||||
hybrid_quality_regex = re.compile(r'720|1080')
|
||||
anime = guide_definitions.get('sonarr:anime')
|
||||
nonanime = guide_definitions.get('sonarr:non-anime')
|
||||
if len(anime) != len(nonanime):
|
||||
raise TrashError('For some reason the anime and non-anime quality definitions are not the same length')
|
||||
|
||||
logger.info(
|
||||
'Notice: Hybrid only functions on 720/1080 qualities and uses non-anime values for the rest (e.g. 2160)')
|
||||
|
||||
hybrid = []
|
||||
for i in range(len(nonanime)):
|
||||
left = nonanime[i]
|
||||
if not hybrid_quality_regex.search(left[0]):
|
||||
logger.debug('Ignored Quality: ' + left[0])
|
||||
hybrid.append(left)
|
||||
else:
|
||||
right = None
|
||||
for r in anime:
|
||||
if r[0] == left[0]:
|
||||
right = r
|
||||
|
||||
if right is None:
|
||||
raise TrashError(f'Could not find matching anime quality for non-anime quality named: {left[0]}')
|
||||
|
||||
hybrid.append((left[0], min(left[1], right[1]), max(left[2], right[2])))
|
||||
|
||||
guide_definitions['sonarr:hybrid'] = hybrid
|
||||
|
||||
selected_definition = guide_definitions.get(args.type)
|
||||
|
||||
if args.preview:
|
||||
guide.utils.quality_preview(selected_definition)
|
||||
exit(0)
|
||||
|
||||
print(f'Updating quality definition using {args.type}')
|
||||
sonarr = Sonarr(args, logger)
|
||||
definition = sonarr.get_quality_definition()
|
||||
sonarr.update_quality_definition(definition, selected_definition)
|
@ -0,0 +1,2 @@
|
||||
class TrashError(Exception):
|
||||
pass
|
@ -0,0 +1,16 @@
|
||||
import pytest
|
||||
|
||||
from app.trash_error import TrashError
|
||||
from app.logic import sonarr as logic
|
||||
from app import cmd
|
||||
from tests.mock_logger import MockLogger
|
||||
|
||||
class TestSonarrLogic:
|
||||
logger = MockLogger()
|
||||
|
||||
@staticmethod
|
||||
def test_throw_without_required_arguments():
|
||||
with pytest.raises(TrashError):
|
||||
args = cmd.setup_and_parse_args(['profile', 'sonarr:anime', '--base-uri', 'value'])
|
||||
logic.process_profile(args, TestSonarrLogic.logger)
|
||||
|
@ -1,19 +0,0 @@
|
||||
import pytest
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from app import cmd
|
||||
from tests.mock_logger import MockLogger
|
||||
|
||||
sys.path.insert(0, Path(__name__).parent.parent)
|
||||
import trash
|
||||
|
||||
class TestEntrypoint:
|
||||
logger = MockLogger()
|
||||
|
||||
@staticmethod
|
||||
def test_throw_without_required_arguments():
|
||||
with pytest.raises(ValueError):
|
||||
args = cmd.setup_and_parse_args(['profile', 'sonarr:anime', '--base-uri', 'value'])
|
||||
trash.process_sonarr_profile(args, TestEntrypoint.logger)
|
||||
|
Loading…
Reference in new issue