Merge development into master

pull/1370/head
github-actions[bot] 4 years ago committed by GitHub
commit 38a7ba522e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -2,8 +2,6 @@ name: release_beta_to_dev
on:
push:
branches: [ development ]
pull_request:
branches: [ development ]
jobs:
Release:

@ -1 +1 @@
0.9.2
0.9.3

@ -297,19 +297,31 @@ class SystemReleases(Resource):
try:
with io.open(os.path.join(args.config_dir, 'config', 'releases.txt'), 'r', encoding='UTF-8') as f:
releases = json.loads(f.read())
releases = releases[:5]
for i, release in enumerate(releases):
filtered_releases = []
for release in releases:
if settings.general.branch == 'master' and not release['prerelease']:
filtered_releases.append(release)
elif settings.general.branch != 'master' and any(not x['prerelease'] for x in filtered_releases):
continue
elif settings.general.branch != 'master':
filtered_releases.append(release)
if settings.general.branch == 'master':
filtered_releases = filtered_releases[:5]
for i, release in enumerate(filtered_releases):
body = release['body'].replace('- ', '').split('\n')[1:]
releases[i] = {"body": body,
"name": release['name'],
"date": release['date'][:10],
"prerelease": release['prerelease'],
"current": True if release['name'].lstrip('v') == os.environ["BAZARR_VERSION"] else False}
filtered_releases[i] = {"body": body,
"name": release['name'],
"date": release['date'][:10],
"prerelease": release['prerelease'],
"current": True if release['name'].lstrip('v') == os.environ["BAZARR_VERSION"]
else False}
except Exception as e:
logging.exception(
'BAZARR cannot parse releases caching file: ' + os.path.join(args.config_dir, 'config', 'releases.txt'))
return jsonify(data=releases)
return jsonify(data=filtered_releases)
class Series(Resource):
@ -856,7 +868,7 @@ class EpisodesTools(Resource):
def get(self):
episodeid = request.args.get('episodeid')
episode_ext_subs = database.execute("SELECT path, subtitles FROM table_episodes WHERE sonarrEpisodeId=?",
episode_ext_subs = database.execute("SELECT path, subtitles, season FROM table_episodes WHERE sonarrEpisodeId=?",
(episodeid,), only_one=True)
try:
all_subs = ast.literal_eval(episode_ext_subs['subtitles'])
@ -875,6 +887,7 @@ class EpisodesTools(Resource):
episode_external_subtitles.append({'language': subs[0],
'path': path_mappings.path_replace(subs[1]),
'filename': os.path.basename(subs[1]),
'season' : episode_ext_subs['season'],
'videopath': path_mappings.path_replace(episode_ext_subs['path'])})
return jsonify(data=episode_external_subtitles)
@ -1912,6 +1925,46 @@ class SyncSubtitles(Resource):
return '', 200
class SyncAllSubtitles(Resource):
@authenticate
def post(self):
language = request.form.get('language')
media_type = request.form.get('mediaType')
season = request.form.get('season')
show = request.form.get('show')
if media_type == 'series' and show:
if season:
episode_metadata = database.execute("SELECT sonarrSeriesId, sonarrEpisodeId, path, subtitles FROM table_episodes"
" WHERE sonarrSeriesId = ?1 AND season = ?2", (show,season,))
else:
episode_metadata = database.execute("SELECT sonarrSeriesId, sonarrEpisodeId, path, subtitles FROM table_episodes"
" WHERE sonarrSeriesId = ?1", (show,))
ret = ''
#ret += json.dumps(episode_metadata)
for episode in episode_metadata:
#ret += str(episode['sonarrSeriesId']) +" "+str(episode['sonarrEpisodeId'])
if episode['subtitles']:
episode.update({"subtitles": ast.literal_eval(episode['subtitles'])})
for subs in episode['subtitles']:
lang = subs[0].split(':')
if lang[0] == "en":
video_path = path_mappings.path_replace(episode['path'])
subtitles_path = path_mappings.path_replace(subs[1])
language = language
media_type = media_type
sonarr_series_id = episode['sonarrSeriesId']
sonarr_episode_id = episode['sonarrEpisodeId']
if video_path and subtitles_path and language and media_type and sonarr_series_id and sonarr_episode_id:
logging.info('Batch-syncing subtitle '+subtitles_path+' for series '+str(sonarr_series_id) + ' episode ' + str(sonarr_episode_id))
subsync.sync(video_path=video_path, srt_path=subtitles_path,
srt_lang=language, media_type=media_type, sonarr_series_id=sonarr_series_id,
sonarr_episode_id=sonarr_episode_id)
#ret+= " " + json.dumps(video_path) + " " + json.dumps(subtitles_path) + " " + json.dumps(language) + " " + json.dumps(media_type) + " " + json.dumps(sonarr_series_id) + " " + json.dumps(sonarr_episode_id)
logging.info('Finished batch-sync')
return ret, 200
class SubMods(Resource):
@authenticate
@ -2078,9 +2131,11 @@ api.add_resource(BlacklistMovieSubtitlesRemove, '/blacklist_movie_subtitles_remo
api.add_resource(BlacklistMovieSubtitlesRemoveAll, '/blacklist_movie_subtitles_remove_all')
api.add_resource(SyncSubtitles, '/sync_subtitles')
api.add_resource(SyncAllSubtitles, '/sync_all_subtitles')
api.add_resource(SubMods, '/sub_mods')
api.add_resource(SubTranslate, '/sub_translate')
api.add_resource(BrowseBazarrFS, '/browse_bazarr_filesystem')
api.add_resource(BrowseSonarrFS, '/browse_sonarr_filesystem')
api.add_resource(BrowseRadarrFS, '/browse_radarr_filesystem')

@ -1,138 +1,162 @@
[
{
"system": "Linux",
"machine": "aarch64",
"directory": "unrar",
"name": "unrar",
"checksum": "07a6371cc7db8493352739ce26b19ea1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/aarch64/unrar/unrar"
},
{
"system": "Linux",
"machine": "armv5tel",
"directory": "unrar",
"name": "unrar",
"checksum": "07a6371cc7db8493352739ce26b19ea1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/armv5tel/unrar/unrar"
},
{
"system": "Linux",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "43910b9df223a772402830461a80e9a1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/ffmpeg/ffmpeg"
},
{
"system": "Linux",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "24ccbd651630c562fbaf17e30a18ce38",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/ffmpeg/ffprobe"
},
{
"system": "Linux",
"machine": "i386",
"directory": "unrar",
"name": "unrar",
"checksum": "f03ae1d4abf871b95142a7248a6cfa3a",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/unrar/unrar"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "9fd68f7b80cd1177b94a71455b288131",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/ffmpeg/ffmpeg"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "29fdc34d9c7ad07c8f75e83d8fb5965b",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/ffmpeg/ffprobe"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "unrar",
"name": "unrar",
"checksum": "2a63e80d50c039e1fba01de7dcfa6432",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/unrar/unrar"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "af1723314ba5ca9c70ef35dc8b5c2260",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/ffmpeg/ffmpeg"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "d81468cebd6630450d2f5a17720b4504",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/ffmpeg/ffprobe"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "unrar",
"name": "unrar",
"checksum": "47de1fed0a5d4f7bac4d8f7557926398",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/unrar/unrar"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "af1723314ba5ca9c70ef35dc8b5c2260",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffmpeg"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "d81468cebd6630450d2f5a17720b4504",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffprobe"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "unrar",
"name": "unrar",
"checksum": "8d5b3d5d6be2c14b74b02767430ade9c",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/unrar/unrar"
},
{
"system": "Windows",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg.exe",
"checksum": "43f89a5172c377e05ebb4c26a498a366",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/ffmpeg/ffmpeg.exe"
},
{
"system": "Windows",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe.exe",
"checksum": "a0d88aa85624070a8ab65ed99c214bea",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/ffmpeg/ffprobe.exe"
},
{
"system": "Windows",
"machine": "i386",
"directory": "unrar",
"name": "unrar.exe",
"checksum": "4611a5b3f70a8d6c40776e0bfa3b3f36",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/unrar/UnRAR.exe"
}
]
{
"system": "Linux",
"machine": "aarch64",
"directory": "unrar",
"name": "unrar",
"checksum": "07a6371cc7db8493352739ce26b19ea1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/aarch64/unrar/unrar"
},
{
"system": "Linux",
"machine": "armv5tel",
"directory": "unrar",
"name": "unrar",
"checksum": "07a6371cc7db8493352739ce26b19ea1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/armv5tel/unrar/unrar"
},
{
"system": "Linux",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "43910b9df223a772402830461a80e9a1",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/ffmpeg/ffmpeg"
},
{
"system": "Linux",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "24ccbd651630c562fbaf17e30a18ce38",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/ffmpeg/ffprobe"
},
{
"system": "Linux",
"machine": "i386",
"directory": "unrar",
"name": "unrar",
"checksum": "f03ae1d4abf871b95142a7248a6cfa3a",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/i386/unrar/unrar"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "9fd68f7b80cd1177b94a71455b288131",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/ffmpeg/ffmpeg"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "29fdc34d9c7ad07c8f75e83d8fb5965b",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/ffmpeg/ffprobe"
},
{
"system": "Linux",
"machine": "x86_64",
"directory": "unrar",
"name": "unrar",
"checksum": "2a63e80d50c039e1fba01de7dcfa6432",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Linux/x86_64/unrar/unrar"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "af1723314ba5ca9c70ef35dc8b5c2260",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/ffmpeg/ffmpeg"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "d81468cebd6630450d2f5a17720b4504",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/ffmpeg/ffprobe"
},
{
"system": "MacOSX",
"machine": "i386",
"directory": "unrar",
"name": "unrar",
"checksum": "47de1fed0a5d4f7bac4d8f7557926398",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/i386/unrar/unrar"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "af1723314ba5ca9c70ef35dc8b5c2260",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffmpeg"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "d81468cebd6630450d2f5a17720b4504",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffprobe"
},
{
"system": "MacOSX",
"machine": "x86_64",
"directory": "unrar",
"name": "unrar",
"checksum": "8d5b3d5d6be2c14b74b02767430ade9c",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/unrar/unrar"
},
{
"system": "MacOSX",
"machine": "arm64",
"directory": "ffmpeg",
"name": "ffmpeg",
"checksum": "af1723314ba5ca9c70ef35dc8b5c2260",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffmpeg"
},
{
"system": "MacOSX",
"machine": "arm64",
"directory": "ffmpeg",
"name": "ffprobe",
"checksum": "d81468cebd6630450d2f5a17720b4504",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/ffmpeg/ffprobe"
},
{
"system": "MacOSX",
"machine": "arm64",
"directory": "unrar",
"name": "unrar",
"checksum": "8d5b3d5d6be2c14b74b02767430ade9c",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/MacOSX/x86_64/unrar/unrar"
},
{
"system": "Windows",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffmpeg.exe",
"checksum": "43f89a5172c377e05ebb4c26a498a366",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/ffmpeg/ffmpeg.exe"
},
{
"system": "Windows",
"machine": "i386",
"directory": "ffmpeg",
"name": "ffprobe.exe",
"checksum": "a0d88aa85624070a8ab65ed99c214bea",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/ffmpeg/ffprobe.exe"
},
{
"system": "Windows",
"machine": "i386",
"directory": "unrar",
"name": "unrar.exe",
"checksum": "4611a5b3f70a8d6c40776e0bfa3b3f36",
"url": "https://github.com/morpheus65535/bazarr-binaries/raw/master/bin/Windows/i386/unrar/UnRAR.exe"
}
]

@ -13,7 +13,7 @@ from config import settings
def check_releases():
releases = []
url_releases = 'https://api.github.com/repos/morpheus65535/Bazarr/releases'
url_releases = 'https://api.github.com/repos/morpheus65535/Bazarr/releases?per_page=100'
try:
logging.debug('BAZARR getting releases from Github: {}'.format(url_releases))
r = requests.get(url_releases, allow_redirects=True)

@ -3,6 +3,8 @@
import hashlib
import os
from urllib.parse import quote_plus
from subliminal.cache import region
from simpleconfigparser import simpleconfigparser
@ -372,8 +374,8 @@ def configure_captcha_func():
def configure_proxy_func():
if settings.proxy.type != 'None':
if settings.proxy.username != '' and settings.proxy.password != '':
proxy = settings.proxy.type + '://' + settings.proxy.username + ':' + settings.proxy.password + '@' + \
settings.proxy.url + ':' + settings.proxy.port
proxy = settings.proxy.type + '://' + quote_plus(settings.proxy.username) + ':' + \
quote_plus(settings.proxy.password) + '@' + settings.proxy.url + ':' + settings.proxy.port
else:
proxy = settings.proxy.type + '://' + settings.proxy.url + ':' + settings.proxy.port
os.environ['HTTP_PROXY'] = str(proxy)

@ -105,7 +105,7 @@ def get_providers():
logging.info("Using %s again after %s, (disabled because: %s)", provider, throttle_desc, reason)
del tp[provider]
set_throttled_providers(str(tp))
# if forced only is enabled: # fixme: Prepared for forced only implementation to remove providers with don't support forced only subtitles
# for provider in providers_list:
# if provider in PROVIDERS_FORCED_OFF:
@ -298,4 +298,12 @@ def set_throttled_providers(data):
handle.write(data)
tp = eval(str(get_throttled_providers()))
try:
tp = eval(str(get_throttled_providers()))
if not isinstance(tp, dict):
raise ValueError('tp should be a dict')
except Exception:
logging.error("Invalid content in throttled_providers.dat. Resetting")
# set empty content in throttled_providers.dat
set_throttled_providers('')
tp = eval(str(get_throttled_providers()))

@ -10,6 +10,7 @@ import pickle
import codecs
import re
import subliminal
import copy
from datetime import datetime, timedelta
from subzero.language import Language
from subzero.video import parse_video
@ -68,7 +69,7 @@ def get_video(path, title, sceneName, providers=None, media_type="movie"):
refine_from_db(original_path, video)
refine_from_ffprobe(original_path, video)
logging.debug('BAZARR is using these video object properties: %s', vars(video))
logging.debug('BAZARR is using these video object properties: %s', vars(copy.deepcopy(video)))
return video
except Exception as e:

@ -4,14 +4,12 @@
Entry point module
"""
# pragma: no cover
from __future__ import print_function
import json
import logging
import os
import sys
import six
from collections import OrderedDict
from rebulk.__version__ import __version__ as __rebulk_version__
from guessit import api
@ -20,12 +18,6 @@ from guessit.jsonutils import GuessitEncoder
from guessit.options import argument_parser, parse_options, load_config, merge_options
try:
from collections import OrderedDict
except ImportError: # pragma: no-cover
from ordereddict import OrderedDict # pylint:disable=import-error
def guess_filename(filename, options):
"""
Guess a single filename using given options
@ -48,6 +40,7 @@ def guess_filename(filename, options):
if options.get('json'):
print(json.dumps(guess, cls=GuessitEncoder, ensure_ascii=False))
elif options.get('yaml'):
# pylint:disable=import-outside-toplevel
import yaml
from guessit import yamlutils
@ -78,6 +71,7 @@ def display_properties(options):
else:
print(json.dumps(list(properties.keys()), cls=GuessitEncoder, ensure_ascii=False))
elif options.get('yaml'):
# pylint:disable=import-outside-toplevel
import yaml
from guessit import yamlutils
if options.get('values'):
@ -97,24 +91,10 @@ def display_properties(options):
print(4 * ' ' + '[!] %s' % (property_value,))
def fix_argv_encoding():
"""
Fix encoding of sys.argv on windows Python 2
"""
if six.PY2 and os.name == 'nt': # pragma: no cover
# see http://bugs.python.org/issue2128
import locale
for i, j in enumerate(sys.argv):
sys.argv[i] = j.decode(locale.getpreferredencoding())
def main(args=None): # pylint:disable=too-many-branches
"""
Main function for entry point
"""
fix_argv_encoding()
if args is None: # pragma: no cover
options = parse_options()
else:
@ -142,7 +122,7 @@ def main(args=None): # pylint:disable=too-many-branches
if options.get('yaml'):
try:
import yaml # pylint:disable=unused-variable,unused-import
import yaml # pylint:disable=unused-variable,unused-import,import-outside-toplevel
except ImportError: # pragma: no cover
del options['yaml']
print('PyYAML is not installed. \'--yaml\' option will be ignored ...', file=sys.stderr)
@ -156,10 +136,7 @@ def main(args=None): # pylint:disable=too-many-branches
for filename in options.get('filename'):
filenames.append(filename)
if options.get('input_file'):
if six.PY2:
input_file = open(options.get('input_file'), 'r')
else:
input_file = open(options.get('input_file'), 'r', encoding='utf-8')
input_file = open(options.get('input_file'), 'r', encoding='utf-8')
try:
filenames.extend([line.strip() for line in input_file.readlines()])
finally:

@ -4,4 +4,4 @@
Version module
"""
# pragma: no cover
__version__ = '3.1.1'
__version__ = '3.3.1'

@ -4,15 +4,12 @@
API functions that can be used by external software
"""
try:
from collections import OrderedDict
except ImportError: # pragma: no-cover
from ordereddict import OrderedDict # pylint:disable=import-error
from collections import OrderedDict
from pathlib import Path
import os
import traceback
import six
from rebulk.introspector import introspect
from .__version__ import __version__
@ -26,18 +23,18 @@ class GuessitException(Exception):
"""
def __init__(self, string, options):
super(GuessitException, self).__init__("An internal error has occured in guessit.\n"
"===================== Guessit Exception Report =====================\n"
"version=%s\n"
"string=%s\n"
"options=%s\n"
"--------------------------------------------------------------------\n"
"%s"
"--------------------------------------------------------------------\n"
"Please report at "
"https://github.com/guessit-io/guessit/issues.\n"
"====================================================================" %
(__version__, str(string), str(options), traceback.format_exc()))
super().__init__("An internal error has occured in guessit.\n"
"===================== Guessit Exception Report =====================\n"
"version=%s\n"
"string=%s\n"
"options=%s\n"
"--------------------------------------------------------------------\n"
"%s"
"--------------------------------------------------------------------\n"
"Please report at "
"https://github.com/guessit-io/guessit/issues.\n"
"====================================================================" %
(__version__, str(string), str(options), traceback.format_exc()))
self.string = string
self.options = options
@ -113,9 +110,7 @@ class GuessItApi(object):
return [cls._fix_encoding(item) for item in value]
if isinstance(value, dict):
return {cls._fix_encoding(k): cls._fix_encoding(v) for k, v in value.items()}
if six.PY2 and isinstance(value, six.text_type):
return value.encode('utf-8')
if six.PY3 and isinstance(value, six.binary_type):
if isinstance(value, bytes):
return value.decode('ascii')
return value
@ -175,16 +170,12 @@ class GuessItApi(object):
:return:
:rtype:
"""
try:
from pathlib import Path
if isinstance(string, Path):
try:
# Handle path-like object
string = os.fspath(string)
except AttributeError:
string = str(string)
except ImportError:
pass
if isinstance(string, Path):
try:
# Handle path-like object
string = os.fspath(string)
except AttributeError:
string = str(string)
try:
options = parse_options(options, True)
@ -194,32 +185,27 @@ class GuessItApi(object):
result_decode = False
result_encode = False
if six.PY2:
if isinstance(string, six.text_type):
string = string.encode("utf-8")
result_decode = True
elif isinstance(string, six.binary_type):
string = six.binary_type(string)
if six.PY3:
if isinstance(string, six.binary_type):
string = string.decode('ascii')
result_encode = True
elif isinstance(string, six.text_type):
string = six.text_type(string)
if isinstance(string, bytes):
string = string.decode('ascii')
result_encode = True
matches = self.rebulk.matches(string, options)
if result_decode:
for match in matches:
if isinstance(match.value, six.binary_type):
if isinstance(match.value, bytes):
match.value = match.value.decode("utf-8")
if result_encode:
for match in matches:
if isinstance(match.value, six.text_type):
if isinstance(match.value, str):
match.value = match.value.encode("ascii")
return matches.to_dict(options.get('advanced', False), options.get('single_value', False),
options.get('enforce_list', False))
except:
raise GuessitException(string, options)
matches_dict = matches.to_dict(options.get('advanced', False), options.get('single_value', False),
options.get('enforce_list', False))
output_input_string = options.get('output_input_string', False)
if output_input_string:
matches_dict['input_string'] = matches.input_string
return matches_dict
except Exception as err:
raise GuessitException(string, options) from err
def properties(self, options=None):
"""
@ -235,8 +221,8 @@ class GuessItApi(object):
options = merge_options(config, options)
unordered = introspect(self.rebulk, options).properties
ordered = OrderedDict()
for k in sorted(unordered.keys(), key=six.text_type):
ordered[k] = list(sorted(unordered[k], key=six.text_type))
for k in sorted(unordered.keys(), key=str):
ordered[k] = list(sorted(unordered[k], key=str))
if hasattr(self.rebulk, 'customize_properties'):
ordered = self.rebulk.customize_properties(ordered)
return ordered

@ -1,27 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Backports
"""
# pragma: no-cover
# pylint: skip-file
def cmp_to_key(mycmp):
"""functools.cmp_to_key backport"""
class KeyClass(object):
"""Key class"""
def __init__(self, obj, *args): # pylint: disable=unused-argument
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return KeyClass

@ -416,6 +416,10 @@
"Animal Planet": "ANPL",
"AnimeLab": "ANLB",
"AOL": "AOL",
"AppleTV": [
"ATVP",
"ATV+"
],
"ARD": "ARD",
"BBC iPlayer": [
"iP",
@ -482,6 +486,7 @@
"HBO",
"re:HBO-?Go"
],
"HBO Max": "HMAX",
"HGTV": "HGTV",
"History": [
"HIST",
@ -490,7 +495,10 @@
"Hulu": "HULU",
"Investigation Discovery": "ID",
"IFC": "IFC",
"iTunes": "iTunes",
"iTunes": [
"iTunes",
{"pattern": "iT", "ignore_case": false}
],
"ITV": "ITV",
"Knowledge Network": "KNOW",
"Lifetime": "LIFE",
@ -537,6 +545,7 @@
"SeeSo"
],
"Shomi": "SHMI",
"Showtime": "SHO",
"Spike": "SPIK",
"Spike TV": [
"SPKE",

@ -4,10 +4,7 @@
Monkeypatch initialisation functions
"""
try:
from collections import OrderedDict
except ImportError: # pragma: no-cover
from ordereddict import OrderedDict # pylint:disable=import-error
from collections import OrderedDict
from rebulk.match import Match

@ -11,8 +11,6 @@ import shlex
from argparse import ArgumentParser
import six
def build_argument_parser():
"""
@ -68,6 +66,8 @@ def build_argument_parser():
help='Display information for filename guesses as json output')
output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None,
help='Display information for filename guesses as yaml output')
output_opts.add_argument('-i', '--output-input-string', dest='output_input_string', action='store_true',
default=False, help='Add input_string property in the output')
conf_opts = opts.add_argument_group("Configuration")
conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None,
@ -108,7 +108,7 @@ def parse_options(options=None, api=False):
:return:
:rtype:
"""
if isinstance(options, six.string_types):
if isinstance(options, str):
args = shlex.split(options)
options = vars(argument_parser.parse_args(args))
elif options is None:
@ -153,7 +153,7 @@ def load_config(options):
cwd = os.getcwd()
yaml_supported = False
try:
import yaml # pylint:disable=unused-variable,unused-import
import yaml # pylint:disable=unused-variable,unused-import,import-outside-toplevel
yaml_supported = True
except ImportError:
pass
@ -225,7 +225,7 @@ def merge_option_value(option, value, merged):
if value is not None and option != 'pristine':
if option in merged.keys() and isinstance(merged[option], list):
for val in value:
if val not in merged[option]:
if val not in merged[option] and val is not None:
merged[option].append(val)
elif option in merged.keys() and isinstance(merged[option], dict):
merged[option] = merge_options(merged[option], value)
@ -250,13 +250,13 @@ def load_config_file(filepath):
return json.load(config_file_data)
if filepath.endswith('.yaml') or filepath.endswith('.yml'):
try:
import yaml
import yaml # pylint:disable=import-outside-toplevel
with open(filepath) as config_file_data:
return yaml.load(config_file_data, yaml.SafeLoader)
except ImportError: # pragma: no cover
except ImportError as err: # pragma: no cover
raise ConfigurationException('Configuration file extension is not supported. '
'PyYAML should be installed to support "%s" file' % (
filepath,))
filepath,)) from err
try:
# Try to load input as JSON

@ -3,7 +3,7 @@
"""
Common module
"""
import re
from rebulk.remodule import re
seps = r' [](){}+*|=-_~#/\\.,;:' # list of tags/words separators
seps_no_groups = seps.replace('[](){}', '')

@ -3,10 +3,8 @@
"""
Comparators
"""
try:
from functools import cmp_to_key
except ImportError:
from ...backports import cmp_to_key
from functools import cmp_to_key
def marker_comparator_predicate(match):
@ -14,10 +12,10 @@ def marker_comparator_predicate(match):
Match predicate used in comparator
"""
return (
not match.private
and match.name not in ('proper_count', 'title')
and not (match.name == 'container' and 'extension' in match.tags)
and not (match.name == 'other' and match.value == 'Rip')
not match.private
and match.name not in ('proper_count', 'title')
and not (match.name == 'container' and 'extension' in match.tags)
and not (match.name == 'other' and match.value == 'Rip')
)

@ -3,7 +3,7 @@
"""
Expected property factory
"""
import re
from rebulk.remodule import re
from rebulk import Rebulk
from rebulk.utils import find_all

@ -3,10 +3,9 @@
"""
Quantities: Size
"""
import re
from abc import abstractmethod
import six
from rebulk.remodule import re
from ..common import seps
@ -50,7 +49,7 @@ class Quantity(object):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, six.string_types):
if isinstance(other, str):
return str(self) == other
if not isinstance(other, self.__class__):
return NotImplemented

@ -5,6 +5,7 @@ Groups markers (...), [...] and {...}
"""
from rebulk import Rebulk
from ...options import ConfigurationException
def groups(config):
"""
@ -21,6 +22,9 @@ def groups(config):
starting = config['starting']
ending = config['ending']
if len(starting) != len(ending):
raise ConfigurationException("Starting and ending groups must have the same length")
def mark_groups(input_string):
"""
Functional pattern to mark groups (...), [...] and {...}.
@ -28,7 +32,7 @@ def groups(config):
:param input_string:
:return:
"""
openings = ([], [], [])
openings = ([], ) * len(starting)
i = 0
ret = []

@ -6,8 +6,6 @@ Processors
from collections import defaultdict
import copy
import six
from rebulk import Rebulk, Rule, CustomRule, POST_PROCESS, PRE_PROCESS, AppendMatch, RemoveMatch
from .common import seps_no_groups
@ -68,7 +66,7 @@ class EquivalentHoles(Rule):
for name in matches.names:
for hole in list(holes):
for current_match in matches.named(name):
if isinstance(current_match.value, six.string_types) and \
if isinstance(current_match.value, str) and \
hole.value.lower() == current_match.value.lower():
if 'equivalent-ignore' in current_match.tags:
continue
@ -96,7 +94,7 @@ class RemoveAmbiguous(Rule):
consequence = RemoveMatch
def __init__(self, sort_function=marker_sorted, predicate=None):
super(RemoveAmbiguous, self).__init__()
super().__init__()
self.sort_function = sort_function
self.predicate = predicate
@ -131,7 +129,7 @@ class RemoveLessSpecificSeasonEpisode(RemoveAmbiguous):
keep the one tagged as 'SxxExx' or in the rightmost filepart.
"""
def __init__(self, name):
super(RemoveLessSpecificSeasonEpisode, self).__init__(
super().__init__(
sort_function=(lambda markers, matches:
marker_sorted(list(reversed(markers)), matches,
lambda match: match.name == name and 'SxxExx' in match.tags)),

@ -130,7 +130,7 @@ class AudioProfileRule(Rule):
consequence = RemoveMatch
def __init__(self, codec):
super(AudioProfileRule, self).__init__()
super().__init__()
self.codec = codec
def enabled(self, context):
@ -166,7 +166,7 @@ class DtsHDRule(AudioProfileRule):
"""
def __init__(self):
super(DtsHDRule, self).__init__('DTS-HD')
super().__init__('DTS-HD')
class DtsRule(AudioProfileRule):
@ -175,7 +175,7 @@ class DtsRule(AudioProfileRule):
"""
def __init__(self):
super(DtsRule, self).__init__('DTS')
super().__init__('DTS')
class AacRule(AudioProfileRule):
@ -184,7 +184,7 @@ class AacRule(AudioProfileRule):
"""
def __init__(self):
super(AacRule, self).__init__('AAC')
super().__init__('AAC')
class DolbyDigitalRule(AudioProfileRule):
@ -193,7 +193,7 @@ class DolbyDigitalRule(AudioProfileRule):
"""
def __init__(self):
super(DolbyDigitalRule, self).__init__('Dolby Digital')
super().__init__('Dolby Digital')
class HqConflictRule(Rule):

@ -3,7 +3,7 @@
"""
video_bit_rate and audio_bit_rate properties
"""
import re
from rebulk.remodule import re
from rebulk import Rebulk
from rebulk.rules import Rule, RemoveMatch, RenameMatch

@ -47,7 +47,7 @@ class RemoveConflictsWithEpisodeTitle(Rule):
consequence = RemoveMatch
def __init__(self, previous_names):
super(RemoveConflictsWithEpisodeTitle, self).__init__()
super().__init__()
self.previous_names = previous_names
self.next_names = ('streaming_service', 'screen_size', 'source',
'video_codec', 'audio_codec', 'other', 'container')
@ -129,7 +129,7 @@ class EpisodeTitleFromPosition(TitleBaseRule):
dependency = TitleToEpisodeTitle
def __init__(self, previous_names):
super(EpisodeTitleFromPosition, self).__init__('episode_title', ['title'])
super().__init__('episode_title', ['title'])
self.previous_names = previous_names
def hole_filter(self, hole, matches):
@ -150,12 +150,12 @@ class EpisodeTitleFromPosition(TitleBaseRule):
def should_remove(self, match, matches, filepart, hole, context):
if match.name == 'episode_details':
return False
return super(EpisodeTitleFromPosition, self).should_remove(match, matches, filepart, hole, context)
return super().should_remove(match, matches, filepart, hole, context)
def when(self, matches, context): # pylint:disable=inconsistent-return-statements
if matches.named('episode_title'):
return
return super(EpisodeTitleFromPosition, self).when(matches, context)
return super().when(matches, context)
class AlternativeTitleReplace(Rule):
@ -166,7 +166,7 @@ class AlternativeTitleReplace(Rule):
consequence = RenameMatch
def __init__(self, previous_names):
super(AlternativeTitleReplace, self).__init__()
super().__init__()
self.previous_names = previous_names
def when(self, matches, context): # pylint:disable=inconsistent-return-statements

@ -479,7 +479,7 @@ class SeePatternRange(Rule):
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators):
super(SeePatternRange, self).__init__()
super().__init__()
self.range_separators = range_separators
def when(self, matches, context):
@ -516,7 +516,7 @@ class AbstractSeparatorRange(Rule):
consequence = [RemoveMatch, AppendMatch]
def __init__(self, range_separators, property_name):
super(AbstractSeparatorRange, self).__init__()
super().__init__()
self.range_separators = range_separators
self.property_name = property_name
@ -608,7 +608,7 @@ class EpisodeNumberSeparatorRange(AbstractSeparatorRange):
"""
def __init__(self, range_separators):
super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode")
super().__init__(range_separators, "episode")
class SeasonSeparatorRange(AbstractSeparatorRange):
@ -617,7 +617,7 @@ class SeasonSeparatorRange(AbstractSeparatorRange):
"""
def __init__(self, range_separators):
super(SeasonSeparatorRange, self).__init__(range_separators, "season")
super().__init__(range_separators, "season")
class RemoveWeakIfMovie(Rule):
@ -662,7 +662,7 @@ class RemoveWeak(Rule):
consequence = RemoveMatch, AppendMatch
def __init__(self, episode_words):
super(RemoveWeak, self).__init__()
super().__init__()
self.episode_words = episode_words
def when(self, matches, context):

@ -396,7 +396,7 @@ class SubtitlePrefixLanguageRule(Rule):
def then(self, matches, when_response, context):
to_rename, to_remove = when_response
super(SubtitlePrefixLanguageRule, self).then(matches, to_remove, context)
super().then(matches, to_remove, context)
for prefix, match in to_rename:
# Remove suffix equivalent of prefix.
suffix = copy.copy(prefix)
@ -435,7 +435,7 @@ class SubtitleSuffixLanguageRule(Rule):
def then(self, matches, when_response, context):
to_rename, to_remove = when_response
super(SubtitleSuffixLanguageRule, self).then(matches, to_remove, context)
super().then(matches, to_remove, context)
for match in to_rename:
matches.remove(match)
match.name = 'subtitle_language'
@ -488,7 +488,7 @@ class RemoveInvalidLanguages(Rule):
def __init__(self, common_words):
"""Constructor."""
super(RemoveInvalidLanguages, self).__init__()
super().__init__()
self.common_words = common_words
def when(self, matches, context):

@ -86,7 +86,7 @@ def other(config): # pylint:disable=unused-argument,too-many-statements
rebulk.regex('(HD)(?P<another>Rip)', value={'other': 'HD', 'another': 'Rip'},
private_parent=True, children=True, validator={'__parent__': seps_surround}, validate_all=True)
for value in ('Screener', 'Remux', 'PAL', 'SECAM', 'NTSC', 'XXX'):
for value in ('Screener', 'Remux', 'Hybrid', 'PAL', 'SECAM', 'NTSC', 'XXX'):
rebulk.string(value, value=value)
rebulk.string('3D', value='3D', tags='has-neighbor')

@ -98,7 +98,7 @@ class DashSeparatedReleaseGroup(Rule):
def __init__(self, value_formatter):
"""Default constructor."""
super(DashSeparatedReleaseGroup, self).__init__()
super().__init__()
self.value_formatter = value_formatter
@classmethod
@ -212,7 +212,7 @@ class SceneReleaseGroup(Rule):
def __init__(self, value_formatter):
"""Default constructor."""
super(SceneReleaseGroup, self).__init__()
super().__init__()
self.value_formatter = value_formatter
@staticmethod
@ -321,7 +321,6 @@ class AnimeReleaseGroup(Rule):
for filepart in marker_sorted(matches.markers.named('path'), matches):
# pylint:disable=bad-continuation
empty_group = matches.markers.range(filepart.start,
filepart.end,
lambda marker: (marker.name == 'group'

@ -69,7 +69,7 @@ class PostProcessScreenSize(Rule):
consequence = AppendMatch
def __init__(self, standard_heights, min_ar, max_ar):
super(PostProcessScreenSize, self).__init__()
super().__init__()
self.standard_heights = standard_heights
self.min_ar = min_ar
self.max_ar = max_ar

@ -3,7 +3,7 @@
"""
size property
"""
import re
from rebulk.remodule import re
from rebulk import Rebulk

@ -3,7 +3,7 @@
"""
streaming_service property
"""
import re
from rebulk.remodule import re
from rebulk import Rebulk
from rebulk.rules import Rule, RemoveMatch
@ -25,13 +25,22 @@ def streaming_service(config): # pylint: disable=too-many-statements,unused-arg
rebulk = rebulk.string_defaults(ignore_case=True).regex_defaults(flags=re.IGNORECASE, abbreviations=[dash])
rebulk.defaults(name='streaming_service', tags=['source-prefix'])
regex_prefix = 're:'
for value, items in config.items():
patterns = items if isinstance(items, list) else [items]
for pattern in patterns:
if pattern.startswith('re:'):
rebulk.regex(pattern, value=value)
if isinstance(pattern, dict):
pattern_value = pattern.pop('pattern')
kwargs = pattern
pattern = pattern_value
else:
kwargs = {}
regex = kwargs.pop('regex', False)
if regex or pattern.startswith(regex_prefix):
rebulk.regex(pattern[len(regex_prefix):], value=value, **kwargs)
else:
rebulk.string(pattern, value=value)
rebulk.string(pattern, value=value, **kwargs)
rebulk.rules(ValidateStreamingService)

@ -53,7 +53,7 @@ class TitleBaseRule(Rule):
consequence = [AppendMatch, RemoveMatch]
def __init__(self, match_name, match_tags=None, alternative_match_name=None):
super(TitleBaseRule, self).__init__()
super().__init__()
self.match_name = match_name
self.match_tags = match_tags
self.alternative_match_name = alternative_match_name
@ -299,7 +299,7 @@ class TitleFromPosition(TitleBaseRule):
properties = {'title': [None], 'alternative_title': [None]}
def __init__(self):
super(TitleFromPosition, self).__init__('title', ['title'], 'alternative_title')
super().__init__('title', ['title'], 'alternative_title')
def enabled(self, context):
return not is_disabled(context, 'alternative_title')

@ -27,7 +27,7 @@ def website(config):
rebulk = rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(name="website")
with resource_stream('guessit', 'tlds-alpha-by-domain.txt') as tld_file:
with resource_stream('guessit', 'data/tlds-alpha-by-domain.txt') as tld_file:
tlds = [
tld.strip().decode('utf-8')
for tld in tld_file.readlines()

@ -1752,6 +1752,7 @@
year: 2018
other:
- 3D
- Hybrid
- Proper
- Remux
proper_count: 1

@ -80,6 +80,9 @@
? Remux
: other: Remux
? Hybrid
: other: Hybrid
? 3D.2019
: other: 3D

@ -577,13 +577,13 @@
release_group: BTW
type: episode
# Streaming service: RTÉ One
# Streaming service: RTE One
? Show.Name.S10E01.576p.RTE.WEBRip.AAC2.0.H.264-RTN
: title: Show Name
season: 10
episode: 1
screen_size: 576p
streaming_service: RTÉ One
streaming_service: RTE One
source: Web
other: Rip
audio_codec: AAC
@ -818,7 +818,6 @@
episode: 0
episode_details: Pilot
episode_title: Pilot
language: zh
other:
- Proper
- Rip
@ -862,7 +861,6 @@
? What.The.Fuck.France.S01E01.Le.doublage.CNLP.WEBRip.AAC2.0.x264-TURTLE
: audio_channels: '2.0'
audio_codec: AAC
country: FR
episode: 1
episode_title: Le doublage
other: Rip
@ -870,7 +868,7 @@
season: 1
source: Web
streaming_service: Canal+
title: What The Fuck
title: What The Fuck France
type: episode
video_codec: H.264
@ -943,14 +941,13 @@
? The.Amazing.Race.Canada.S03.720p.CTV.WEBRip.AAC2.0.H.264-BTW
: audio_channels: '2.0'
audio_codec: AAC
country: CA
other: Rip
release_group: BTW
screen_size: 720p
season: 3
source: Web
streaming_service: CTV
title: The Amazing Race
title: The Amazing Race Canada
type: episode
video_codec: H.264
@ -1240,13 +1237,12 @@
? Big.Brother.Canada.S05.GLBL.WEBRip.AAC2.0.H.264-RTN
: audio_channels: '2.0'
audio_codec: AAC
country: CA
other: Rip
release_group: RTN
season: 5
source: Web
streaming_service: Global
title: Big Brother
title: Big Brother Canada
type: episode
video_codec: H.264
@ -1330,7 +1326,6 @@
? Handmade.in.Japan.S01E01.720p.iP.WEBRip.AAC2.0.H.264-SUP
: audio_channels: '2.0'
audio_codec: AAC
country: JP
episode: 1
other: Rip
release_group: SUP
@ -1338,7 +1333,7 @@
season: 1
source: Web
streaming_service: BBC iPlayer
title: Handmade in
title: Handmade in Japan
type: episode
video_codec: H.264
@ -1463,9 +1458,8 @@
? Bunsen.is.a.Beast.S01E23.Guinea.Some.Lovin.1080p.NICK.WEBRip.AAC2.0.x264-TVSmash
: audio_channels: '2.0'
audio_codec: AAC
country: GN
episode: 23
episode_title: Some Lovin
episode_title: Guinea Some Lovin
other: Rip
release_group: TVSmash
screen_size: 1080p
@ -1538,13 +1532,14 @@
episode_title: The Masquerade
other: Rip
part: 2
release_group: VP9-BTW
release_group: BTW
screen_size: 1080p
season: 2
source: Web
streaming_service: YouTube Red
title: Escape The Night
type: episode
video_codec: VP9
? Escape.The.Night.S02E02.The.Masquerade.Part.II.2160p.RED.WEBRip.AAC5.1.VP9-BTW
: audio_channels: '5.1'
@ -1553,13 +1548,14 @@
episode_title: The Masquerade
other: Rip
part: 2
release_group: VP9-BTW
release_group: BTW
screen_size: 2160p
season: 2
source: Web
streaming_service: YouTube Red
title: Escape The Night
type: episode
video_codec: VP9
? Escape.The.Night.S02E02.The.Masquerade.Part.II.720p.RED.WEBRip.AAC5.1.VP9-BTW
: audio_channels: '5.1'
@ -1568,13 +1564,14 @@
episode_title: The Masquerade
other: Rip
part: 2
release_group: VP9-BTW
release_group: BTW
screen_size: 720p
season: 2
source: Web
streaming_service: YouTube Red
title: Escape The Night
type: episode
video_codec: VP9
? The.Family.Law.S02E01.720p.SBS.WEB-DL.AAC2.0.H.264-BTN
: audio_channels: '2.0'
@ -1892,7 +1889,7 @@
season: 1
source: Web
streaming_service: Vimeo
title: '555'
# title: '555'
type: episode
video_codec: H.264

@ -3,10 +3,9 @@
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement
import json
import os
import sys
from pathlib import Path
import pytest
import six
from ..api import guessit, properties, suggested_expected, GuessitException
@ -19,25 +18,19 @@ def test_default():
def test_forced_unicode():
ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.text_type)
ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], str)
def test_forced_binary():
ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type)
assert ret and 'title' in ret and isinstance(ret['title'], bytes)
@pytest.mark.skipif(sys.version_info < (3, 4), reason="Path is not available")
def test_pathlike_object():
try:
from pathlib import Path
path = Path('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
ret = guessit(path)
assert ret and 'title' in ret
except ImportError: # pragma: no-cover
pass
path = Path('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
ret = guessit(path)
assert ret and 'title' in ret
def test_unicode_japanese():
@ -51,16 +44,8 @@ def test_unicode_japanese_options():
def test_forced_unicode_japanese_options():
ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]})
assert ret and 'title' in ret and ret['title'] == u"阿维达"
# TODO: This doesn't compile on python 3, but should be tested on python 2.
"""
if six.PY2:
def test_forced_binary_japanese_options():
ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]})
assert ret and 'title' in ret and ret['title'] == b"阿维达"
"""
ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]})
assert ret and 'title' in ret and ret['title'] == "阿维达"
def test_properties():

@ -3,12 +3,9 @@
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement
from __future__ import unicode_literals
import os
import pytest
import six
from ..api import guessit, properties, GuessitException
@ -21,13 +18,13 @@ def test_default():
def test_forced_unicode():
ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.text_type)
ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], str)
def test_forced_binary():
ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type)
assert ret and 'title' in ret and isinstance(ret['title'], bytes)
def test_unicode_japanese():
@ -41,24 +38,18 @@ def test_unicode_japanese_options():
def test_forced_unicode_japanese_options():
ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]})
assert ret and 'title' in ret and ret['title'] == u"阿维达"
# TODO: This doesn't compile on python 3, but should be tested on python 2.
"""
if six.PY2:
def test_forced_binary_japanese_options():
ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]})
assert ret and 'title' in ret and ret['title'] == b"阿维达"
"""
ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]})
assert ret and 'title' in ret and ret['title'] == "阿维达"
def test_ensure_standard_string_class():
def test_ensure_custom_string_class():
class CustomStr(str):
pass
ret = guessit(CustomStr('1080p'), options={'advanced': True})
assert ret and 'screen_size' in ret and not isinstance(ret['screen_size'].input_string, CustomStr)
ret = guessit(CustomStr('some.title.1080p.mkv'), options={'advanced': True})
assert ret and 'screen_size' in ret and isinstance(ret['screen_size'].input_string, CustomStr)
assert ret and 'title' in ret and isinstance(ret['title'].input_string, CustomStr)
assert ret and 'container' in ret and isinstance(ret['container'].input_string, CustomStr)
def test_properties():

@ -1,16 +1,25 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name
import json
import os
import sys
import pytest
from _pytest.capture import CaptureFixture
from ..__main__ import main
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Prevent output from spamming the console
@pytest.fixture(scope="function", autouse=True)
def no_stdout(monkeypatch):
with open(os.devnull, "w") as f:
monkeypatch.setattr(sys, "stdout", f)
yield
def test_main_no_args():
main([])
@ -24,7 +33,7 @@ def test_main_unicode():
def test_main_forced_unicode():
main([u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv'])
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv'])
def test_main_verbose():
@ -70,3 +79,22 @@ def test_main_help():
def test_main_version():
main(['--version'])
def test_json_output_input_string(capsys: CaptureFixture):
main(['--json', '--output-input-string', 'test.avi'])
outerr = capsys.readouterr()
data = json.loads(outerr.out)
assert 'input_string' in data
assert data['input_string'] == 'test.avi'
def test_json_no_output_input_string(capsys: CaptureFixture):
main(['--json', 'test.avi'])
outerr = capsys.readouterr()
data = json.loads(outerr.out)
assert 'input_string' not in data

@ -7,7 +7,6 @@ import os
from io import open # pylint: disable=redefined-builtin
import babelfish
import six # pylint:disable=wrong-import-order
import yaml # pylint:disable=wrong-import-order
from rebulk.remodule import re
from rebulk.utils import is_iterable
@ -53,16 +52,16 @@ class EntryResult(object):
if self.ok:
return self.string + ': OK!'
if self.warning:
return '%s%s: WARNING! (valid=%i, extra=%i)' % ('-' if self.negates else '', self.string, len(self.valid),
len(self.extra))
return '%s%s: WARNING! (valid=%i, extra=%s)' % ('-' if self.negates else '', self.string, len(self.valid),
self.extra)
if self.error:
return '%s%s: ERROR! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \
('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different),
len(self.extra), len(self.others))
return '%s%s: ERROR! (valid=%i, extra=%s, missing=%s, different=%s, others=%s)' % \
('-' if self.negates else '', self.string, len(self.valid), self.extra, self.missing,
self.different, self.others)
return '%s%s: UNKOWN! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \
('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different),
len(self.extra), len(self.others))
return '%s%s: UNKOWN! (valid=%i, extra=%s, missing=%s, different=%s, others=%s)' % \
('-' if self.negates else '', self.string, len(self.valid), self.extra, self.missing, self.different,
self.others)
@property
def details(self):
@ -110,7 +109,7 @@ def files_and_ids(predicate=None):
for filename in filenames:
name, ext = os.path.splitext(filename)
filepath = os.path.join(dirpath_rel, filename)
if ext == '.yml' and (not predicate or predicate(filepath)):
if ext in ['.yml', '.yaml'] and (not predicate or predicate(filepath)):
files.append(filepath)
ids.append(os.path.join(dirpath_rel, name))
@ -161,7 +160,7 @@ class TestYml(object):
for string, expected in data.items():
TestYml.set_default(expected, default)
string = TestYml.fix_encoding(string, expected)
string = TestYml.fix_encoding(string)
entries.append((filename, string, expected))
unique_id = self._get_unique_id(entry_set, '[' + filename + '] ' + str(string))
@ -178,17 +177,7 @@ class TestYml(object):
expected[k] = v
@classmethod
def fix_encoding(cls, string, expected):
if six.PY2:
if isinstance(string, six.text_type):
string = string.encode('utf-8')
converts = []
for k, v in expected.items():
if isinstance(v, six.text_type):
v = v.encode('utf-8')
converts.append((k, v))
for k, v in converts:
expected[k] = v
def fix_encoding(cls, string):
if not isinstance(string, str):
string = str(string)
return string

@ -4,10 +4,7 @@
Options
"""
try:
from collections import OrderedDict
except ImportError: # pragma: no-cover
from ordereddict import OrderedDict # pylint:disable=import-error
from collections import OrderedDict
import babelfish
import yaml # pylint:disable=wrong-import-order
@ -24,8 +21,8 @@ class OrderedDictYAMLLoader(yaml.SafeLoader):
def __init__(self, *args, **kwargs):
yaml.SafeLoader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()

@ -7,4 +7,4 @@ Define simple search patterns in bulk to perform advanced matching on any string
from .rebulk import Rebulk
from .rules import Rule, CustomRule, AppendMatch, RemoveMatch, RenameMatch, AppendTags, RemoveTags
from .processors import ConflictSolver, PrivateRemover, POST_PROCESS, PRE_PROCESS
from .pattern import REGEX_AVAILABLE
from .pattern import REGEX_ENABLED

@ -4,4 +4,4 @@
Version module
"""
# pragma: no cover
__version__ = '2.0.1'
__version__ = '3.0.1'

@ -7,16 +7,13 @@ from abc import ABCMeta, abstractmethod
from copy import deepcopy
from logging import getLogger
from six import add_metaclass
from .loose import set_defaults
from .pattern import RePattern, StringPattern, FunctionalPattern
log = getLogger(__name__).log
@add_metaclass(ABCMeta)
class Builder(object):
class Builder(metaclass=ABCMeta):
"""
Base builder class for patterns
"""
@ -147,7 +144,7 @@ class Builder(object):
:return:
:rtype:
"""
from .chain import Chain
from .chain import Chain # pylint:disable=import-outside-toplevel
set_defaults(self._chain_defaults, kwargs)
set_defaults(self._defaults, kwargs)
chain = Chain(self, **kwargs)

@ -125,7 +125,7 @@ class Chain(Pattern, Builder):
:rtype:
"""
# pylint: disable=too-many-locals
ret = super(Chain, self)._process_match(match, match_index, child=child)
ret = super()._process_match(match, match_index, child=child)
if ret:
return True
@ -144,7 +144,7 @@ class Chain(Pattern, Builder):
for last_match in last_matches:
match.children.remove(last_match)
match.end = match.children[-1].end if match.children else match.start
ret = super(Chain, self)._process_match(match, match_index, child=child)
ret = super()._process_match(match, match_index, child=child)
if ret:
return True

@ -6,13 +6,11 @@ Introspect rebulk object to retrieve capabilities.
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import six
from .pattern import StringPattern, RePattern, FunctionalPattern
from .utils import extend_safe
@six.add_metaclass(ABCMeta)
class Description(object):
class Description(metaclass=ABCMeta):
"""
Abstract class for a description.
"""

@ -15,7 +15,6 @@ try:
from collections import OrderedDict # pylint:disable=ungrouped-imports
except ImportError: # pragma: no cover
from ordereddict import OrderedDict # pylint:disable=import-error
import six
from .loose import ensure_list, filter_index
from .utils import is_iterable
@ -28,7 +27,7 @@ class MatchesDict(OrderedDict):
"""
def __init__(self):
super(MatchesDict, self).__init__()
super().__init__()
self.matches = defaultdict(list)
self.values_list = defaultdict(list)
@ -67,7 +66,7 @@ class _BaseMatches(MutableSequence):
def _start_dict(self):
if self.__start_dict is None:
self.__start_dict = defaultdict(_BaseMatches._base)
for start, values in itertools.groupby([m for m in self._delegate], lambda item: item.start):
for start, values in itertools.groupby(list(self._delegate), lambda item: item.start):
_BaseMatches._base_extend(self.__start_dict[start], values)
return self.__start_dict
@ -76,7 +75,7 @@ class _BaseMatches(MutableSequence):
def _end_dict(self):
if self.__end_dict is None:
self.__end_dict = defaultdict(_BaseMatches._base)
for start, values in itertools.groupby([m for m in self._delegate], lambda item: item.end):
for start, values in itertools.groupby(list(self._delegate), lambda item: item.end):
_BaseMatches._base_extend(self.__end_dict[start], values)
return self.__end_dict
@ -534,13 +533,6 @@ class _BaseMatches(MutableSequence):
ret[match.name] = value
return ret
if six.PY2: # pragma: no cover
def clear(self):
"""
Python 3 backport
"""
del self[:]
def __len__(self):
return len(self._delegate)
@ -583,11 +575,11 @@ class Matches(_BaseMatches):
def __init__(self, matches=None, input_string=None):
self.markers = Markers(input_string=input_string)
super(Matches, self).__init__(matches=matches, input_string=input_string)
super().__init__(matches=matches, input_string=input_string)
def _add_match(self, match):
assert not match.marker, "A marker match should not be added to <Matches> object"
super(Matches, self)._add_match(match)
super()._add_match(match)
class Markers(_BaseMatches):
@ -596,11 +588,11 @@ class Markers(_BaseMatches):
"""
def __init__(self, matches=None, input_string=None):
super(Markers, self).__init__(matches=None, input_string=input_string)
super().__init__(matches=None, input_string=input_string)
def _add_match(self, match):
assert match.marker, "A non-marker match should not be added to <Markers> object"
super(Markers, self)._add_match(match)
super()._add_match(match)
class Match(object):

@ -7,19 +7,16 @@ Abstract pattern class definition along with various implementations (regexp, st
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from . import debug
from .formatters import default_formatter
from .loose import call, ensure_list, ensure_dict
from .match import Match
from .remodule import re, REGEX_AVAILABLE
from .remodule import re, REGEX_ENABLED
from .utils import find_all, is_iterable, get_first_defined
from .validators import allways_true
@six.add_metaclass(ABCMeta)
class BasePattern(object):
class BasePattern(metaclass=ABCMeta):
"""
Base class for Pattern like objects
"""
@ -41,8 +38,7 @@ class BasePattern(object):
pass
@six.add_metaclass(ABCMeta)
class Pattern(BasePattern):
class Pattern(BasePattern, metaclass=ABCMeta):
"""
Definition of a particular pattern to search for.
"""
@ -396,7 +392,7 @@ class StringPattern(Pattern):
"""
def __init__(self, *patterns, **kwargs):
super(StringPattern, self).__init__(**kwargs)
super().__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)
@ -422,11 +418,11 @@ class RePattern(Pattern):
"""
def __init__(self, *patterns, **kwargs):
super(RePattern, self).__init__(**kwargs)
self.repeated_captures = REGEX_AVAILABLE
super().__init__(**kwargs)
self.repeated_captures = REGEX_ENABLED
if 'repeated_captures' in kwargs:
self.repeated_captures = kwargs.get('repeated_captures')
if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover
if self.repeated_captures and not REGEX_ENABLED: # pragma: no cover
raise NotImplementedError("repeated_capture is available only with regex module.")
self.abbreviations = kwargs.get('abbreviations', [])
self._kwargs = kwargs
@ -434,7 +430,7 @@ class RePattern(Pattern):
self._children_match_kwargs = filter_match_kwargs(kwargs, children=True)
self._patterns = []
for pattern in patterns:
if isinstance(pattern, six.string_types):
if isinstance(pattern, str):
if self.abbreviations and pattern:
for key, replacement in self.abbreviations:
pattern = pattern.replace(key, replacement)
@ -494,7 +490,7 @@ class FunctionalPattern(Pattern):
"""
def __init__(self, *patterns, **kwargs):
super(FunctionalPattern, self).__init__(**kwargs)
super().__init__(**kwargs)
self._patterns = patterns
self._kwargs = kwargs
self._match_kwargs = filter_match_kwargs(kwargs)

@ -53,7 +53,7 @@ class Rebulk(Builder):
:return:
:rtype:
"""
super(Rebulk, self).__init__()
super().__init__()
if not callable(disabled):
self.disabled = lambda context: disabled
else:

@ -5,13 +5,17 @@ Uniform re module
"""
# pylint: disable-all
import os
import logging
REGEX_AVAILABLE = False
if os.environ.get('REGEX_DISABLED') in ["1", "true", "True", "Y"]:
import re
else:
log = logging.getLogger(__name__).log
REGEX_ENABLED = False
if os.environ.get('REBULK_REGEX_ENABLED') in ["1", "true", "True", "Y"]:
try:
import regex as re
REGEX_AVAILABLE = True
REGEX_ENABLED = True
except ImportError:
log.warning('regex module is not available. Unset REBULK_REGEX_ENABLED environment variable, or install regex module to enabled it.')
import re
else:
import re

@ -8,7 +8,6 @@ import inspect
from itertools import groupby
from logging import getLogger
import six
from .utils import is_iterable
from .toposort import toposort
@ -18,8 +17,7 @@ from . import debug
log = getLogger(__name__).log
@six.add_metaclass(ABCMeta)
class Consequence(object):
class Consequence(metaclass=ABCMeta):
"""
Definition of a consequence to apply.
"""
@ -40,8 +38,7 @@ class Consequence(object):
pass
@six.add_metaclass(ABCMeta)
class Condition(object):
class Condition(metaclass=ABCMeta):
"""
Definition of a condition to check.
"""
@ -60,8 +57,7 @@ class Condition(object):
pass
@six.add_metaclass(ABCMeta)
class CustomRule(Condition, Consequence):
class CustomRule(Condition, Consequence, metaclass=ABCMeta):
"""
Definition of a rule to apply
"""
@ -243,7 +239,7 @@ class Rules(list):
"""
def __init__(self, *rules):
super(Rules, self).__init__()
super().__init__()
self.load(*rules)
def load(self, *rules):

@ -3,7 +3,6 @@
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unneeded-not, len-as-condition
import pytest
import six
from ..match import Match, Matches
from ..pattern import StringPattern, RePattern
@ -72,23 +71,18 @@ class TestMatchClass(object):
assert match2 > match1
assert match2 >= match1
if six.PY3:
with pytest.raises(TypeError):
match1 < other
with pytest.raises(TypeError):
match1 < other
with pytest.raises(TypeError):
match1 <= other
with pytest.raises(TypeError):
match1 <= other
with pytest.raises(TypeError):
match1 > other
with pytest.raises(TypeError):
match1 > other
with pytest.raises(TypeError):
match1 >= other
with pytest.raises(TypeError):
match1 >= other
else:
assert match1 < other
assert match1 <= other
assert not match1 > other
assert not match1 >= other
def test_value(self):
match1 = Match(1, 3)

@ -1,11 +1,11 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unbalanced-tuple-unpacking, len-as-condition
# pylint: disable=no-self-use, pointless-statement, missing-docstring, unbalanced-tuple-unpacking, len-as-condition, no-member
import re
import pytest
from ..pattern import StringPattern, RePattern, FunctionalPattern, REGEX_AVAILABLE
from ..pattern import StringPattern, RePattern, FunctionalPattern, REGEX_ENABLED
from ..match import Match
class TestStringPattern(object):
@ -706,7 +706,7 @@ class TestFormatter(object):
assert len(matches) == 1
match = matches[0]
if REGEX_AVAILABLE:
if REGEX_ENABLED:
assert len(match.children) == 5
assert [child.value for child in match.children] == ["02", "03", "04", "05", "06"]
else:

@ -22,7 +22,7 @@ from functools import reduce
class CyclicDependency(ValueError):
def __init__(self, cyclic):
s = 'Cyclic dependencies exist among these items: {0}'.format(', '.join(repr(x) for x in cyclic.items()))
super(CyclicDependency, self).__init__(s)
super().__init__(s)
self.cyclic = cyclic

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from hashlib import sha1
from dogpile.cache import make_region
@ -14,4 +15,13 @@ EPISODE_EXPIRATION_TIME = datetime.timedelta(days=3).total_seconds()
REFINER_EXPIRATION_TIME = datetime.timedelta(weeks=1).total_seconds()
region = make_region()
def sha1_key_mangler(key):
"""Return sha1 hex for cache keys"""
if isinstance(key, str):
key = key.encode("utf-8")
return sha1(key).hexdigest()
# Use key mangler to limit cache key names to 40 characters
region = make_region(key_mangler=sha1_key_mangler)

@ -546,6 +546,10 @@ def scan_video(path, dont_use_actual_file=False, hints=None, providers=None, ski
if dont_use_actual_file and not hash_from:
return video
# if all providers are throttled, skip hashing
if not providers:
skip_hashing = True
# size and hashes
if not skip_hashing:
hash_path = hash_from or path

@ -362,7 +362,7 @@ def patch_create_connection():
except dns.exception.DNSException:
logger.warning("DNS: Couldn't resolve %s with DNS: %s", host, custom_resolver.nameservers)
logger.debug("DNS: Falling back to default DNS or IP on %s", host)
# logger.debug("DNS: Falling back to default DNS or IP on %s", host) <-- commented because it makes way too much noise in debug logs
return _orig_create_connection((host, port), *args, **kwargs)
patch_create_connection._sz_patched = True

@ -4,9 +4,13 @@ import logging
import io
from requests import Session
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from guessit import guessit
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.exceptions import TooManyRequests
from subliminal.subtitle import guess_matches
from subliminal.video import Episode, Movie
from subzero.language import Language
@ -22,7 +26,7 @@ logger = logging.getLogger(__name__)
class BSPlayerSubtitle(Subtitle):
"""BSPlayer Subtitle."""
provider_name = 'bsplayer'
provider_name = "bsplayer"
hash_verifiable = True
def __init__(self, language, filename, subtype, video, link, subid):
@ -46,64 +50,32 @@ class BSPlayerSubtitle(Subtitle):
matches = set()
matches |= guess_matches(video, guessit(self.filename))
subtitle_filename = self.filename
# episode
if isinstance(video, Episode):
# already matched in search query
matches.update(['title', 'series', 'season', 'episode', 'year'])
matches.update(["title", "series", "season", "episode", "year"])
# movie
elif isinstance(video, Movie):
# already matched in search query
matches.update(['title', 'year'])
# release_group
if video.release_group and video.release_group.lower() in subtitle_filename:
matches.add('release_group')
# resolution
if video.resolution and video.resolution.lower() in subtitle_filename:
matches.add('resolution')
# source
formats = []
if video.source:
formats = [video.source.lower()]
if formats[0] == "web":
formats.append("webdl")
formats.append("webrip")
formats.append("web ")
for frmt in formats:
if frmt.lower() in subtitle_filename:
matches.add('source')
break
matches.update(["title", "year"])
# video_codec
if video.video_codec:
video_codecs = [video.video_codec.lower()]
if video_codecs[0] == "H.264":
formats.append("x264")
elif video_codecs[0] == "H.265":
formats.append("x265")
for vc in formats:
if vc.lower() in subtitle_filename:
matches.add('video_codec')
break
matches.add('hash')
matches.add("hash")
return matches
class BSPlayerProvider(Provider):
"""BSPlayer Provider."""
# fmt: off
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra', 'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por',
'ron', 'rus', 'spa', 'swe', 'tur', 'ukr', 'zho'
]}
SEARCH_THROTTLE = 8
hash_verifiable = True
# fmt: on
# batantly based on kodi's bsplayer plugin
# also took from BSPlayer-Subtitles-Downloader
@ -112,20 +84,26 @@ class BSPlayerProvider(Provider):
def initialize(self):
self.session = Session()
# Try to avoid bsplayer throttling increasing retries time (0, 4, 6, 8, 10)
retry = Retry(connect=5, backoff_factor=2)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount("http://", adapter)
self.search_url = self.get_sub_domain()
self.token = None
self.login()
def terminate(self):
self.session.close()
self.logout()
def api_request(self, func_name='logIn', params='', tries=5):
def api_request(self, func_name="logIn", params="", tries=5):
headers = {
'User-Agent': 'BSPlayer/2.x (1022.12360)',
'Content-Type': 'text/xml; charset=utf-8',
'Connection': 'close',
'SOAPAction': '"http://api.bsplayer-subtitles.com/v1.php#{func_name}"'.format(func_name=func_name)
"User-Agent": "BSPlayer/2.x (1022.12360)",
"Content-Type": "text/xml; charset=utf-8",
"Connection": "close",
"SOAPAction": '"http://api.bsplayer-subtitles.com/v1.php#{func_name}"'.format(
func_name=func_name
),
}
data = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
@ -134,148 +112,194 @@ class BSPlayerProvider(Provider):
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:ns1="{search_url}">'
'<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
'<ns1:{func_name}>{params}</ns1:{func_name}></SOAP-ENV:Body></SOAP-ENV:Envelope>'
"<ns1:{func_name}>{params}</ns1:{func_name}></SOAP-ENV:Body></SOAP-ENV:Envelope>"
).format(search_url=self.search_url, func_name=func_name, params=params)
logger.info('Sending request: %s.' % func_name)
logger.debug("Sending request: %s." % func_name)
for i in iter(range(tries)):
try:
self.session.headers.update(headers.items())
res = self.session.post(self.search_url, data)
return ElementTree.fromstring(res.text)
return ElementTree.fromstring(res.text.strip())
except Exception as ex:
logger.info("ERROR: %s." % ex)
if func_name == 'logIn':
logger.error(f"Exception parsing response: {ex}")
if func_name == "logIn":
self.search_url = self.get_sub_domain()
sleep(1)
logger.info('ERROR: Too many tries (%d)...' % tries)
raise Exception('Too many tries...')
raise TooManyRequests(f"Too many retries: {tries}")
def login(self):
# If already logged in
if self.token:
# Setting attribute here as initialize() will reset it
if hasattr(self, "token"):
logger.debug("Token already met. Skipping logging")
return True
root = self.api_request(
func_name='logIn',
params=('<username></username>'
'<password></password>'
'<AppID>BSPlayer v2.67</AppID>')
func_name="logIn",
params=(
"<username></username>"
"<password></password>"
"<AppID>BSPlayer v2.67</AppID>"
),
)
res = root.find('.//return')
if res.find('status').text == 'OK':
self.token = res.find('data').text
logger.info("Logged In Successfully.")
res = root.find(".//return")
# avoid AttributeError
if not res:
return False
if res.find("status").text == "OK":
self.token = res.find("data").text
logger.debug("Logged In Successfully.")
return True
return False
def logout(self):
# If already logged out / not logged in
if not self.token:
# if not self.token:
# return True
if not hasattr(self, "token"):
logger.debug("Already logged out")
return True
root = self.api_request(
func_name='logOut',
params='<handle>{token}</handle>'.format(token=self.token)
func_name="logOut",
params="<handle>{token}</handle>".format(token=self.token),
)
res = root.find('.//return')
res = root.find(".//return")
self.token = None
if res.find('status').text == 'OK':
logger.info("Logged Out Successfully.")
# avoid AttributeError
if not res:
logger.debug("Root logout returned None")
return False
if res.find("status").text == "OK":
logger.debug("Logged Out Successfully.")
return True
return False
def query(self, video, video_hash, language):
if not self.login():
logger.debug("Token not found. Can't perform query")
return []
if isinstance(language, (tuple, list, set)):
# language_ids = ",".join(language)
# language_ids = 'spa'
language_ids = ','.join(sorted(l.opensubtitles for l in language))
language_ids = ",".join(sorted(l.opensubtitles for l in language))
if video.imdb_id is None:
imdbId = '*'
imdbId = "*"
else:
imdbId = video.imdb_id
sleep(self.SEARCH_THROTTLE)
root = self.api_request(
func_name='searchSubtitles',
func_name="searchSubtitles",
params=(
'<handle>{token}</handle>'
'<movieHash>{movie_hash}</movieHash>'
'<movieSize>{movie_size}</movieSize>'
'<languageId>{language_ids}</languageId>'
'<imdbId>{imdbId}</imdbId>'
).format(token=self.token, movie_hash=video_hash,
movie_size=video.size, language_ids=language_ids, imdbId=imdbId)
"<handle>{token}</handle>"
"<movieHash>{movie_hash}</movieHash>"
"<movieSize>{movie_size}</movieSize>"
"<languageId>{language_ids}</languageId>"
"<imdbId>{imdbId}</imdbId>"
).format(
token=self.token,
movie_hash=video_hash,
movie_size=video.size,
language_ids=language_ids,
imdbId=imdbId,
),
)
res = root.find('.//return/result')
if res.find('status').text != 'OK':
res = root.find(".//return/result")
if not res:
logger.debug("No subtitles found")
return []
items = root.findall('.//return/data/item')
status = res.find("status").text
if status != "OK":
logger.debug(f"No subtitles found (bad status: {status})")
return []
items = root.findall(".//return/data/item")
subtitles = []
if items:
logger.info("Subtitles Found.")
logger.debug("Subtitles Found.")
for item in items:
subID = item.find('subID').text
subDownloadLink = item.find('subDownloadLink').text
subLang = Language.fromopensubtitles(item.find('subLang').text)
subName = item.find('subName').text
subFormat = item.find('subFormat').text
subID = item.find("subID").text
subDownloadLink = item.find("subDownloadLink").text
subLang = Language.fromopensubtitles(item.find("subLang").text)
subName = item.find("subName").text
subFormat = item.find("subFormat").text
subtitles.append(
BSPlayerSubtitle(subLang, subName, subFormat, video, subDownloadLink, subID)
BSPlayerSubtitle(
subLang, subName, subFormat, video, subDownloadLink, subID
)
)
return subtitles
def list_subtitles(self, video, languages):
return self.query(video, video.hashes['bsplayer'], languages)
return self.query(video, video.hashes["bsplayer"], languages)
def get_sub_domain(self):
API_URL_TEMPLATE = None
session = Session()
# API_URL_TEMPLATE = None
# session = Session()
# s1-9, s101-109
# Don't test again
# fixme: Setting attribute here as initialize() may reset it (maybe
# there's a more elegant way?)
if hasattr(self, "API_URL_TEMPLATE"):
logger.debug(f"Working subdomain already met: {self.API_URL_TEMPLATE}")
return self.API_URL_TEMPLATE
else:
self.API_URL_TEMPLATE = None
# fmt: off
SUB_DOMAINS = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8',
's101', 's102', 's103', 's104', 's105', 's106', 's107', 's108', 's109']
# fmt: on
random.shuffle(SUB_DOMAINS)
for domain in SUB_DOMAINS:
# Limit to 8 tests
for domain in SUB_DOMAINS[:8]:
TEST_URL = "http://{}.api.bsplayer-subtitles.com".format(domain)
try:
logging.debug('Testing BSplayer sub-domain {}'.format(TEST_URL))
res = session.get(TEST_URL, timeout=5)
logging.debug("Testing BSplayer sub-domain {}".format(TEST_URL))
res = self.session.get(TEST_URL, timeout=10)
except:
continue
else:
res.raise_for_status()
if res.status_code == 200:
API_URL_TEMPLATE = "http://{}.api.bsplayer-subtitles.com/v1.php".format(domain)
logger.debug(f"Found working subdomain: {domain}")
self.API_URL_TEMPLATE = (
"http://{}.api.bsplayer-subtitles.com/v1.php".format(domain)
)
break
else:
sleep(5)
continue
if API_URL_TEMPLATE:
return API_URL_TEMPLATE
else:
raise ServiceUnavailable()
if self.API_URL_TEMPLATE:
return self.API_URL_TEMPLATE
raise ServiceUnavailable("No API URL template was found")
def download_subtitle(self, subtitle):
session = Session()
_addheaders = {
'User-Agent': 'Mozilla/4.0 (compatible; Synapse)'
}
session.headers.update(_addheaders)
res = session.get(subtitle.page_link)
# session = Session()
_addheaders = {"User-Agent": "Mozilla/4.0 (compatible; Synapse)"}
self.session.headers.update(_addheaders)
res = self.session.get(subtitle.page_link)
if res:
if res.text == '500':
raise ValueError('Error 500 on server')
if res.text == "500":
raise ServiceUnavailable("Error 500 on server")
with gzip.GzipFile(fileobj=io.BytesIO(res.content)) as gf:
subtitle.content = gf.read()
subtitle.normalize()
return subtitle
raise ValueError('Problems conecting to the server')
raise ServiceUnavailable("Problems conecting to the server")

@ -98,93 +98,95 @@ class LegendasTVProvider(_LegendasTVProvider):
return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year, imdb_id):
def search_titles(self, titles, season, title_year, imdb_id):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param str titles: the titles to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles[title_id] = title
titles_found = {}
logger.debug('Found %d titles', len(titles))
return titles
for title in titles:
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
def query(self, language, title, season=None, episode=None, year=None, imdb_id=None):
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles_found[title_id] = title
logger.debug('Found %d titles', len(titles_found))
return titles_found
def query(self, language, titles, season=None, episode=None, year=None, imdb_id=None):
# search for titles
titles = self.search_titles(title, season, year, imdb_id)
titles_found = self.search_titles(titles, season, year, imdb_id)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
for title_id, t in titles_found.items():
# Skip episodes or movies if it's not what was requested
if (season and t['type'] == 'movie') or (not season and t['type'] == 'episode'):
continue
@ -259,13 +261,12 @@ class LegendasTVProvider(_LegendasTVProvider):
titles = [video.title] + video.alternative_titles
imdb = video.imdb_id
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year, imdb_id=imdb)]
if subtitles:
return subtitles
return []
subtitles = [s for l in languages for s in
self.query(l, titles, season=season, episode=episode, year=video.year, imdb_id=imdb)]
if subtitles:
return subtitles
else:
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)

@ -18,7 +18,7 @@ from subliminal.providers.opensubtitles import OpenSubtitlesProvider as _OpenSub
from .mixins import ProviderRetryMixin
from subliminal.subtitle import fix_line_ending
from subliminal_patch.http import SubZeroRequestsTransport
from subliminal_patch.utils import sanitize
from subliminal_patch.utils import sanitize, fix_inconsistent_naming
from subliminal.cache import region
from subliminal_patch.score import framerate_equal
from subzero.language import Language
@ -28,6 +28,23 @@ from ..exceptions import TooManyRequests, APIThrottled
logger = logging.getLogger(__name__)
def fix_tv_naming(title):
"""Fix TV show titles with inconsistent naming using dictionary, but do not sanitize them.
:param str title: original title.
:return: new title.
:rtype: str
"""
return fix_inconsistent_naming(title, {"Superman & Lois": "Superman and Lois",
}, True)
def fix_movie_naming(title):
return fix_inconsistent_naming(title, {
}, True)
class OpenSubtitlesSubtitle(_OpenSubtitlesSubtitle):
hash_verifiable = True
hearing_impaired_verifiable = True
@ -58,14 +75,14 @@ class OpenSubtitlesSubtitle(_OpenSubtitlesSubtitle):
# episode
if isinstance(video, Episode) and self.movie_kind == 'episode':
# series
if video.series and (sanitize(self.series_name) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
if fix_tv_naming(video.series) and (sanitize(self.series_name) in (
sanitize(name) for name in [fix_tv_naming(video.series)] + video.alternative_series)):
matches.add('series')
# movie
elif isinstance(video, Movie) and self.movie_kind == 'movie':
# title
if video.title and (sanitize(self.movie_name) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
if fix_movie_naming(video.title) and (sanitize(self.movie_name) in (
sanitize(name) for name in [fix_movie_naming(video.title)] + video.alternative_titles)):
matches.add('title')
sub_fps = None

@ -17,12 +17,31 @@ from .mixins import ProviderRetryMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.subtitle import fix_line_ending, SUBTITLE_EXTENSIONS
from subliminal_patch.providers import Provider
from subliminal_patch.utils import fix_inconsistent_naming
from subliminal.cache import region
from guessit import guessit
logger = logging.getLogger(__name__)
SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=1).total_seconds()
TOKEN_EXPIRATION_TIME = datetime.timedelta(hours=12).total_seconds()
def fix_tv_naming(title):
"""Fix TV show titles with inconsistent naming using dictionary, but do not sanitize them.
:param str title: original title.
:return: new title.
:rtype: str
"""
return fix_inconsistent_naming(title, {"Superman & Lois": "Superman and Lois",
}, True)
def fix_movie_naming(title):
return fix_inconsistent_naming(title, {
}, True)
class OpenSubtitlesComSubtitle(Subtitle):
@ -125,22 +144,19 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
self.use_hash = use_hash
def initialize(self):
self.login()
self.token = region.get("oscom_token")
if self.token:
self.session.headers.update({'Authorization': 'Beaker ' + self.token})
return True
else:
self.login()
def terminate(self):
self.session.close()
@region.cache_on_arguments(expiration_time=TOKEN_EXPIRATION_TIME)
def login(self):
try:
r = self.session.post(self.server_url + 'login',
json={"username": self.username, "password": self.password},
allow_redirects=False,
timeout=10)
timeout=30)
except (ConnectionError, Timeout, ReadTimeout):
raise ServiceUnavailable('Unknown Error, empty response: %s: %r' % (r.status_code, r))
else:
@ -150,7 +166,6 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
except ValueError:
raise ProviderError('Invalid JSON returned by provider')
else:
self.session.headers.update({'Authorization': 'Beaker ' + self.token})
region.set("oscom_token", self.token)
return True
elif r.status_code == 401:
@ -179,16 +194,14 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
parameters = {'query': title}
logging.debug('Searching using this title: {}'.format(title))
results = self.session.get(self.server_url + 'features', params=parameters, timeout=10)
results.raise_for_status()
results = self.session.get(self.server_url + 'features', params=parameters, timeout=30)
if results.status_code == 401:
logging.debug('Authentification failed: clearing cache and attempting to login.')
region.delete("oscom_token")
self.login()
results = self.session.get(self.server_url + 'features', params=parameters, timeout=10)
results.raise_for_status()
results = self.session.get(self.server_url + 'features', params=parameters, timeout=30)
if results.status_code == 429:
raise TooManyRequests()
@ -203,7 +216,7 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
else:
# loop over results
for result in results_dict:
if title.lower() == result['attributes']['title'].lower() and \
if fix_tv_naming(title).lower() == result['attributes']['title'].lower() and \
(not self.video.year or self.video.year == int(result['attributes']['year'])):
title_id = result['id']
break
@ -243,14 +256,13 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
'episode_number': self.video.episode,
'season_number': self.video.season,
'moviehash': hash},
timeout=10)
timeout=30)
else:
res = self.session.get(self.server_url + 'subtitles',
params={'id': title_id,
'languages': langs,
'moviehash': hash},
timeout=10)
res.raise_for_status()
timeout=30)
if res.status_code == 429:
raise TooManyRequests()
@ -306,13 +318,12 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
def download_subtitle(self, subtitle):
logger.info('Downloading subtitle %r', subtitle)
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
headers = {'Accept': 'application/json', 'Content-Type': 'application/json',
'Authorization': 'Beaker ' + self.token}
res = self.session.post(self.server_url + 'download',
json={'file_id': subtitle.file_id, 'sub_format': 'srt'},
headers=headers,
timeout=10)
res.raise_for_status()
timeout=30)
if res.status_code == 429:
raise TooManyRequests()
elif res.status_code == 406:
@ -323,8 +334,7 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
except ValueError:
raise ProviderError('Invalid JSON returned by provider')
else:
r = self.session.get(subtitle.download_link, timeout=10)
r.raise_for_status()
r = self.session.get(subtitle.download_link, timeout=30)
if res.status_code == 429:
raise TooManyRequests()

@ -9,6 +9,7 @@ from babelfish import language_converters
from subzero.language import Language
from requests import Session
import urllib.parse
from random import randint
from subliminal.subtitle import fix_line_ending
from subliminal_patch.providers import Provider
@ -23,6 +24,7 @@ from zipfile import ZipFile
from rarfile import RarFile, is_rarfile
from subliminal_patch.utils import sanitize, fix_inconsistent_naming
from guessit import guessit
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
logger = logging.getLogger(__name__)
@ -175,7 +177,7 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
def initialize(self):
self.session = Session()
self.session.headers = {'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")}
self.session.headers = {'User-Agent': AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]}
def terminate(self):
self.session.close()
@ -251,18 +253,18 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
except IndexError:
continue
result_title = fix_tv_naming(result_title).strip().replace("<EFBFBD>", "").replace(" ", ".")
result_title = fix_tv_naming(result_title).strip().replace("<EFBFBD>", "").replace("& ", "").replace(" ", ".")
if not result_title:
continue
guessable = result_title.strip() + ".s01e01." + result_year
guess = guessit(guessable, {'type': "episode"})
if sanitize(original_title) == sanitize(guess['title']) and year and guess['year'] and \
if sanitize(original_title.replace('& ', '')) == sanitize(guess['title']) and year and guess['year'] and \
year == guess['year']:
# Return the founded id
return result_id
elif sanitize(original_title) == sanitize(guess['title']) and not year:
elif sanitize(original_title.replace('& ', '')) == sanitize(guess['title']) and not year:
# Return the founded id
return result_id
@ -364,6 +366,7 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
sub_english_name = sub_english_name.group() if sub_english_name else ''
sub_english_name = sub_english_name.split(' (')[0]
sub_english_name = sub_english_name.replace('&amp;', '&')
sub_version = 'n/a'
if len(str(sub_english).split('(')) > 1:
sub_version = (str(sub_english).split('(')[len(str(sub_english).split('(')) - 1]).split(')')[0]

@ -87,9 +87,9 @@ class ZimukuProvider(Provider):
languages = {Language(*l) for l in supported_languages}
logger.info(str(supported_languages))
server_url = "http://www.zimuku.la"
server_url = "http://www.zmk.pw"
search_url = "/search?q={}"
download_url = "http://www.zimuku.la/"
download_url = "http://www.zmk.pw/"
UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)"

@ -14,7 +14,7 @@ def sanitize(string, ignore_characters=None, default_characters={'-', ':', '(',
"""
# only deal with strings
if string is None:
if not isinstance(string, str):
return
ignore_characters = ignore_characters or set()

@ -3,8 +3,8 @@ apscheduler=3.5.1
babelfish=0.5.5
backports.functools-lru-cache=1.5
Beaker=1.10.0
bottle=0.12.13
bottle-fdsend=0.1.1
bottle=0.12.13
chardet=3.0.4
dogpile.cache=0.6.5
enzyme=0.4.1
@ -12,7 +12,7 @@ ffsubsync=2020-08-04
Flask=1.1.1
gevent-websocker=0.10.1
gitpython=2.1.9
guessit=3.1.1
guessit=3.3.1
guess_language-spirit=0.5.3
Js2Py=0.63 <-- modified: manually merged from upstream: https://github.com/PiotrDabkowski/Js2Py/pull/192/files
knowit=0.3.0-dev
@ -22,12 +22,13 @@ pyga=2.6.1
pysrt=1.1.1
pytz=2018.4
rarfile=3.0
rebulk=2.0.1
rebulk=3.0.1
requests=2.18.4
six=1.11.0
semver=2.13.0
SimpleConfigParser=0.1.0 <-- modified version: do not update!!!
six=1.11.0
stevedore=1.28.0
subliminal=2.1.0dev
tzlocal=2.1b1
urllib3=1.23
Waitress=1.4.3
Waitress=1.4.3

@ -46,6 +46,10 @@
padding: .4rem !important;
}
.table tr {
cursor: auto !important;
}
.btn-light {
background-color: white;
border: 1px solid #ced4da;

@ -61,6 +61,10 @@
<div><i class="fas fa-search align-top text-themecolor text-center font-20" aria-hidden="true"></i></div>
<div class="align-bottom text-themecolor small text-center">Search</div>
</button>
<button class="btn btn-outline" id="tools_button">
<div><i class="fa fa-briefcase align-top text-themecolor text-center font-20" aria-hidden="true"></i></div>
<div class="align-bottom text-themecolor small text-center">Tools</div>
</button>
</div>
{% endblock bcleft %}
@ -382,6 +386,80 @@
</div>
</div>
<div id="seasonToolsModal" class="modal" tabindex="-1" role="dialog">
<div class="modal-dialog modal-xl" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title"><span id="season_tools_title_span"></span></h5><br>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">&times;</span>
</button>
</div>
<div class="modal-body">
<div class="container-fluid">
<div class="row">
<div class="col-sm-3 text-right">
Language
</div>
<div class="form-group col-sm-8 pl-sm-0">
<span id="season_tools_audio_language_span"></span>
</div>
</div>
<br>
<div class="row">
<div class="col-sm-3 text-right">
Tools
</div>
<div class="form-group col-sm-8 pl-sm-0">
<span id="season_tools_span"></span>
</div>
</div>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<div id="showToolsModal" class="modal" tabindex="-1" role="dialog">
<div class="modal-dialog modal-xl" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title"><span id="show_tools_title_span"></span></h5><br>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">&times;</span>
</button>
</div>
<div class="modal-body">
<div class="container-fluid">
<div class="row">
<div class="col-sm-3 text-right">
Language
</div>
<div class="form-group col-sm-8 pl-sm-0">
<span id="show_tools_audio_language_span"></span>
</div>
</div>
<br>
<div class="row">
<div class="col-sm-3 text-right">
Tools
</div>
<div class="form-group col-sm-8 pl-sm-0">
<span id="show_tools_span"></span>
</div>
</div>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<div id="episodeSubtitleModColorModal" class="modal" tabindex="-1" role="dialog">
<div class="modal-dialog" role="document">
<div class="modal-content">
@ -620,14 +698,14 @@
});
if (collapsed) {
var chevron_icon = '<i class="fas fa-chevron-circle-right"></i>';
var chevron_icon = '<i style="cursor: pointer;" class="chevron fas fa-chevron-circle-up"></i>';
} else {
var chevron_icon = '<i class="fas fa-chevron-circle-down"></i>';
var chevron_icon = '<i style="cursor: pointer;" class="chevron fas fa-chevron-circle-down"></i>';
}
return $('<tr/>')
.append('<td colspan=' + rows.columns()[0].length + '>Season ' + group + ' ' + chevron_icon + '</td>')
.append('<td colspan=' + ( rows.columns()[0].length - 1 ) + '>Season ' + group + ' ' + chevron_icon + '</td><td><a href="" class="season_tools badge badge-secondary" data-season="' + group + '"><i class="fa fa-briefcase"></i></a></td>')
.attr('data-name', group)
.toggleClass('collapsed', collapsed);
}
@ -748,8 +826,8 @@
]
});
$('#episodes').on('click', 'tr.dtrg-start', function () {
var name = $(this).data('name');
$('#episodes').on('click', '.chevron', function () {
var name = $(this).closest('tr').data('name');
collapsedGroups[name] = !collapsedGroups[name];
table.draw(false);
});
@ -1057,6 +1135,47 @@
DONE: 3
}
$('#tools_button').on('click', function (e) {
$(this).tooltip('dispose');
e.preventDefault();
$("#show_tools_title_span").html(seriesDetails['title']);
$("#show_tools_audio_language_span").html(seriesDetails['audio_language'][0].name);
$("#show_tools_span").html('<a href="" class="subtitles_sync_all_show badge badge-secondary" data-language="' + seriesDetails['audio_language'][0].code3 + '" data-placement="right" data-toggle="tooltip" id="sync_button_show" title="Sync whole show"><i class="far fa-play-circle"></i></a>');
$('#showToolsModal')
.modal({
focus: false
});
});
$('#show_tools_span').on('click', '.subtitles_sync_all_show', function (e) {
e.preventDefault();
const values = {
language: $(this).attr("data-language"),
show: seriesDetails['sonarrSeriesId'],
mediaType: 'series'
};
var cell = $(this).parent()
;
$.ajax({
url: "{{ url_for('api.syncallsubtitles') }}",
type: "POST",
dataType: "json",
data: values,
beforeSend: function () {
$('#sync_button_show').find("i").addClass('fa-spin');
},
complete: function () {
$('#sync_button_show').find("i").removeClass('fa-spin');
}
});
});
$('#mass_upload_button').on('click', function (e) {
e.preventDefault();
@ -1635,6 +1754,45 @@
});
});
$('#episodes').on('click', '.season_tools', function (e) {
$(this).tooltip('dispose');
e.preventDefault();
$("#season_tools_title_span").html(seriesDetails['title'] + ' - Season ' + $(this).data("season"));
$("#season_tools_audio_language_span").html(seriesDetails['audio_language'][0].name);
$("#season_tools_span").html('<a href="" class="subtitles_sync_all badge badge-secondary" data-language="' + seriesDetails['audio_language'][0].code3 + '" data-season="' + $(this).data("season") + '" data-placement="right" data-toggle="tooltip" id="sync_button_season" title="Sync whole season"><i class="far fa-play-circle"></i></a>');
$('#seasonToolsModal')
.modal({
focus: false
});
});
$('#season_tools_span').on('click', '.subtitles_sync_all', function (e) {
e.preventDefault();
var season = $(this).attr("data-season");
const values = {
language: $(this).attr("data-language"),
show: seriesDetails['sonarrSeriesId'],
season: season,
mediaType: 'series'
};
var cell = $(this).parent();
$.ajax({
url: "{{ url_for('api.syncallsubtitles') }}",
type: "POST",
dataType: "json",
data: values,
beforeSend: function () {
$('#'+'sync_button_season').find("i").addClass('fa-spin');
},
complete: function () {
$('#'+'sync_button_season').find("i").removeClass('fa-spin');
}
});
});
$('#episode_tools_result').on('click', '.subtitles_sync', function (e) {
e.preventDefault();
const values = {

Loading…
Cancel
Save