Merge branch 'development' of https://github.com/morpheus65535/bazarr into development

pull/2687/head
JayZed 5 months ago
commit 4cc6806193

@ -8,6 +8,19 @@ updates:
prefix: "[bot]" prefix: "[bot]"
open-pull-requests-limit: 1 open-pull-requests-limit: 1
target-branch: "development" target-branch: "development"
groups:
fortawesome:
patterns:
- "@fortawesome*"
mantine:
patterns:
- "@mantine*"
react:
patterns:
- "react"
- "react-dom"
- "@types/react"
- "@types/react-dom"
- package-ecosystem: 'github-actions' - package-ecosystem: 'github-actions'
directory: '/' directory: '/'
schedule: schedule:

@ -7,9 +7,16 @@ sleep 30
if kill -s 0 $PID if kill -s 0 $PID
then then
echo "Bazarr is still running. We'll kill it..." echo "Bazarr is still running. We'll test if UI is working..."
kill $PID
exit 0
else else
exit 1 exit 1
fi fi
exitcode=0
curl -fsSL --retry-all-errors --retry 60 --retry-max-time 120 --max-time 10 "http://127.0.0.1:6767" --output /dev/null || exitcode=$?
[[ ${exitcode} == 0 ]] && echo "UI is responsive, good news!" || echo "Oops, UI isn't reachable, bad news..."
echo "Let's stop Bazarr before we exit..."
pkill -INT -P $PID
exit ${exitcode}

@ -10,5 +10,5 @@ latest_verion=$(git describe --tags --abbrev=0)
if [[ $RELEASE_MASTER -eq 1 ]]; then if [[ $RELEASE_MASTER -eq 1 ]]; then
auto-changelog --stdout -t changelog-master.hbs --starting-version "$master_version" --commit-limit 3 auto-changelog --stdout -t changelog-master.hbs --starting-version "$master_version" --commit-limit 3
else else
auto-changelog --stdout --starting-version "$latest_verion" --unreleased-only --commit-limit 0 auto-changelog --stdout --starting-version "$latest_verion" --unreleased-only --commit-limit false
fi fi

@ -34,9 +34,9 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: "lts/*" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install dependencies - name: Install dependencies
run: npm install run: npm install
@ -76,7 +76,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set up Python 3.8 - name: Set up Python 3.8
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: "3.8" python-version: "3.8"

@ -36,9 +36,9 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: "lts/*" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install Global Tools - name: Install Global Tools
run: npm install -g release-it auto-changelog run: npm install -g release-it auto-changelog

@ -38,9 +38,9 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: "lts/*" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install Global Tools - name: Install Global Tools
run: npm install -g release-it auto-changelog run: npm install -g release-it auto-changelog

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Execute - name: Execute
uses: benc-uk/workflow-dispatch@v121 uses: benc-uk/workflow-dispatch@v1.2.3
with: with:
workflow: "release_beta_to_dev" workflow: "release_beta_to_dev"
token: ${{ secrets.WF_GITHUB_TOKEN }} token: ${{ secrets.WF_GITHUB_TOKEN }}

@ -22,9 +22,9 @@ jobs:
ref: development ref: development
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v3 uses: actions/setup-node@v4
with: with:
node-version: "lts/*" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install UI Dependencies - name: Install UI Dependencies
run: npm install run: npm install
@ -35,7 +35,7 @@ jobs:
working-directory: ${{ env.UI_DIRECTORY }} working-directory: ${{ env.UI_DIRECTORY }}
- name: Set up Python 3.8 - name: Set up Python 3.8
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: "3.8" python-version: "3.8"

@ -48,7 +48,9 @@ If you need something that is not already part of Bazarr, feel free to create a
## Supported subtitles providers: ## Supported subtitles providers:
- Addic7ed - Addic7ed
- Animetosho (requires AniDb HTTP API client described [here](https://wiki.anidb.net/HTTP_API_Definition))
- Assrt - Assrt
- AvistaZ, CinemaZ (Get session cookies using method described [here](https://github.com/morpheus65535/bazarr/pull/2375#issuecomment-2057010996))
- BetaSeries - BetaSeries
- BSplayer - BSplayer
- Embedded Subtitles - Embedded Subtitles

@ -8,12 +8,14 @@ import sys
import time import time
from bazarr.app.get_args import args from bazarr.app.get_args import args
from bazarr.literals import * from bazarr.literals import EXIT_PYTHON_UPGRADE_NEEDED, EXIT_NORMAL, FILE_RESTART, FILE_STOP, ENV_RESTARTFILE, ENV_STOPFILE, EXIT_INTERRUPT
def exit_program(status_code): def exit_program(status_code):
print(f'Bazarr exited with status code {status_code}.') print(f'Bazarr exited with status code {status_code}.')
raise SystemExit(status_code) raise SystemExit(status_code)
def check_python_version(): def check_python_version():
python_version = platform.python_version_tuple() python_version = platform.python_version_tuple()
minimum_py3_tuple = (3, 8, 0) minimum_py3_tuple = (3, 8, 0)
@ -52,9 +54,10 @@ check_python_version()
dir_name = os.path.dirname(__file__) dir_name = os.path.dirname(__file__)
def start_bazarr(): def start_bazarr():
script = [get_python_path(), "-u", os.path.normcase(os.path.join(dir_name, 'bazarr', 'main.py'))] + sys.argv[1:] script = [get_python_path(), "-u", os.path.normcase(os.path.join(dir_name, 'bazarr', 'main.py'))] + sys.argv[1:]
ep = subprocess.Popen(script, stdout=None, stderr=None, stdin=subprocess.DEVNULL) ep = subprocess.Popen(script, stdout=None, stderr=None, stdin=subprocess.DEVNULL, env=os.environ)
print(f"Bazarr starting child process with PID {ep.pid}...") print(f"Bazarr starting child process with PID {ep.pid}...")
return ep return ep
@ -74,33 +77,33 @@ def get_stop_status_code(input_file):
except (ValueError, TypeError): except (ValueError, TypeError):
status_code = EXIT_NORMAL status_code = EXIT_NORMAL
file.close() file.close()
except: except Exception:
status_code = EXIT_NORMAL status_code = EXIT_NORMAL
return status_code return status_code
def check_status(): def check_status():
global child_process global child_process
if os.path.exists(stopfile): if os.path.exists(stop_file):
status_code = get_stop_status_code(stopfile) status_code = get_stop_status_code(stop_file)
try: try:
print(f"Deleting stop file...") print("Deleting stop file...")
os.remove(stopfile) os.remove(stop_file)
except Exception as e: except Exception:
print('Unable to delete stop file.') print('Unable to delete stop file.')
finally: finally:
terminate_child() terminate_child()
exit_program(status_code) exit_program(status_code)
if os.path.exists(restartfile): if os.path.exists(restart_file):
try: try:
print(f"Deleting restart file...") print("Deleting restart file...")
os.remove(restartfile) os.remove(restart_file)
except Exception: except Exception:
print('Unable to delete restart file.') print('Unable to delete restart file.')
finally: finally:
terminate_child() terminate_child()
print(f"Bazarr is restarting...") print("Bazarr is restarting...")
child_process = start_bazarr() child_process = start_bazarr()
@ -113,25 +116,25 @@ def interrupt_handler(signum, frame):
interrupted = True interrupted = True
print('Handling keyboard interrupt...') print('Handling keyboard interrupt...')
else: else:
print(f"Stop doing that! I heard you the first time!") print("Stop doing that! I heard you the first time!")
if __name__ == '__main__': if __name__ == '__main__':
interrupted = False interrupted = False
signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler)
restartfile = os.path.join(args.config_dir, FILE_RESTART) restart_file = os.path.join(args.config_dir, FILE_RESTART)
stopfile = os.path.join(args.config_dir, FILE_STOP) stop_file = os.path.join(args.config_dir, FILE_STOP)
os.environ[ENV_STOPFILE] = stopfile os.environ[ENV_STOPFILE] = stop_file
os.environ[ENV_RESTARTFILE] = restartfile os.environ[ENV_RESTARTFILE] = restart_file
# Cleanup leftover files # Cleanup leftover files
try: try:
os.remove(restartfile) os.remove(restart_file)
except FileNotFoundError: except FileNotFoundError:
pass pass
try: try:
os.remove(stopfile) os.remove(stop_file)
except FileNotFoundError: except FileNotFoundError:
pass pass
@ -145,5 +148,5 @@ if __name__ == '__main__':
time.sleep(5) time.sleep(5)
except (KeyboardInterrupt, SystemExit, ChildProcessError): except (KeyboardInterrupt, SystemExit, ChildProcessError):
# this code should never be reached, if signal handling is working properly # this code should never be reached, if signal handling is working properly
print(f'Bazarr exited main script file via keyboard interrupt.') print('Bazarr exited main script file via keyboard interrupt.')
exit_program(EXIT_INTERRUPT) exit_program(EXIT_INTERRUPT)

@ -114,6 +114,8 @@ class Subtitles(Resource):
subtitles_path = args.get('path') subtitles_path = args.get('path')
media_type = args.get('type') media_type = args.get('type')
id = args.get('id') id = args.get('id')
forced = True if args.get('forced') == 'True' else False
hi = True if args.get('hi') == 'True' else False
if not os.path.exists(subtitles_path): if not os.path.exists(subtitles_path):
return 'Subtitles file not found. Path mapping issue?', 500 return 'Subtitles file not found. Path mapping issue?', 500
@ -144,6 +146,8 @@ class Subtitles(Resource):
'video_path': video_path, 'video_path': video_path,
'srt_path': subtitles_path, 'srt_path': subtitles_path,
'srt_lang': language, 'srt_lang': language,
'hi': hi,
'forced': forced,
'reference': args.get('reference') if args.get('reference') not in empty_values else video_path, 'reference': args.get('reference') if args.get('reference') not in empty_values else video_path,
'max_offset_seconds': args.get('max_offset_seconds') if args.get('max_offset_seconds') not in 'max_offset_seconds': args.get('max_offset_seconds') if args.get('max_offset_seconds') not in
empty_values else str(settings.subsync.max_offset_seconds), empty_values else str(settings.subsync.max_offset_seconds),
@ -167,8 +171,6 @@ class Subtitles(Resource):
elif action == 'translate': elif action == 'translate':
from_language = subtitles_lang_from_filename(subtitles_path) from_language = subtitles_lang_from_filename(subtitles_path)
dest_language = language dest_language = language
forced = True if args.get('forced') == 'true' else False
hi = True if args.get('hi') == 'true' else False
try: try:
translate_subtitles_file(video_path=video_path, source_srt_file=subtitles_path, translate_subtitles_file(video_path=video_path, source_srt_file=subtitles_path,
from_lang=from_language, to_lang=dest_language, forced=forced, hi=hi, from_lang=from_language, to_lang=dest_language, forced=forced, hi=hi,

@ -7,7 +7,6 @@ from flask_restx import Resource, Namespace, fields, marshal
from app.config import settings from app.config import settings
from app.logger import empty_log from app.logger import empty_log
from app.get_args import args
from utilities.central import get_log_file_path from utilities.central import get_log_file_path
from ..utils import authenticate from ..utils import authenticate

@ -8,6 +8,8 @@ from app.database import TableShows, TableMovies, database, select
from ..utils import authenticate from ..utils import authenticate
import textdistance
api_ns_system_searches = Namespace('System Searches', description='Search for series or movies by name') api_ns_system_searches = Namespace('System Searches', description='Search for series or movies by name')
@ -61,4 +63,6 @@ class Searches(Resource):
results.append(result) results.append(result)
# sort results by how closely they match the query
results = sorted(results, key=lambda x: textdistance.hamming.distance(query, x['title']))
return results return results

@ -1,6 +1,6 @@
# coding=utf-8 # coding=utf-8
from flask import Flask, redirect from flask import Flask, redirect, Request
from flask_compress import Compress from flask_compress import Compress
from flask_cors import CORS from flask_cors import CORS
@ -13,9 +13,17 @@ from .config import settings, base_url
socketio = SocketIO() socketio = SocketIO()
class CustomRequest(Request):
def __init__(self, *args, **kwargs):
super(CustomRequest, self).__init__(*args, **kwargs)
# required to increase form-data size before returning a 413
self.max_form_parts = 10000
def create_app(): def create_app():
# Flask Setup # Flask Setup
app = Flask(__name__) app = Flask(__name__)
app.request_class = CustomRequest
app.config['COMPRESS_ALGORITHM'] = 'gzip' app.config['COMPRESS_ALGORITHM'] = 'gzip'
Compress(app) Compress(app)
app.wsgi_app = ReverseProxied(app.wsgi_app) app.wsgi_app = ReverseProxied(app.wsgi_app)

@ -25,7 +25,7 @@ def check_releases():
url_releases = 'https://api.github.com/repos/morpheus65535/Bazarr/releases?per_page=100' url_releases = 'https://api.github.com/repos/morpheus65535/Bazarr/releases?per_page=100'
try: try:
logging.debug(f'BAZARR getting releases from Github: {url_releases}') logging.debug(f'BAZARR getting releases from Github: {url_releases}')
r = requests.get(url_releases, allow_redirects=True) r = requests.get(url_releases, allow_redirects=True, timeout=15)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("Error trying to get releases from Github. Http error.") logging.exception("Error trying to get releases from Github. Http error.")
@ -160,12 +160,14 @@ def apply_update():
'BAZARR was unable to delete the previous build directory during upgrade process.') 'BAZARR was unable to delete the previous build directory during upgrade process.')
for file in archive.namelist(): for file in archive.namelist():
if file.startswith(zip_root_directory) and file != zip_root_directory and not \ if file.startswith(zip_root_directory) and file != zip_root_directory:
file.endswith('bazarr.py'):
file_path = os.path.join(bazarr_dir, file[len(zip_root_directory):]) file_path = os.path.join(bazarr_dir, file[len(zip_root_directory):])
parent_dir = os.path.dirname(file_path) parent_dir = os.path.dirname(file_path)
os.makedirs(parent_dir, exist_ok=True) os.makedirs(parent_dir, exist_ok=True)
if not os.path.isdir(file_path): if not os.path.isdir(file_path):
if os.path.exists(file_path):
# remove the file first to handle case-insensitive file systems
os.remove(file_path)
with open(file_path, 'wb+') as f: with open(file_path, 'wb+') as f:
f.write(archive.read(file)) f.write(archive.read(file))
except Exception: except Exception:
@ -230,6 +232,9 @@ def update_cleaner(zipfile, bazarr_dir, config_dir):
dir_to_ignore_regex = re.compile(dir_to_ignore_regex_string) dir_to_ignore_regex = re.compile(dir_to_ignore_regex_string)
file_to_ignore = ['nssm.exe', '7za.exe', 'unins000.exe', 'unins000.dat'] file_to_ignore = ['nssm.exe', '7za.exe', 'unins000.exe', 'unins000.dat']
# prevent deletion of leftover Apprise.py/pyi files after 1.8.0 version that caused issue on case-insensitive
# filesystem. This could be removed in a couple of major versions.
file_to_ignore += ['Apprise.py', 'Apprise.pyi', 'apprise.py', 'apprise.pyi']
logging.debug(f'BAZARR upgrade leftover cleaner will ignore those files: {", ".join(file_to_ignore)}') logging.debug(f'BAZARR upgrade leftover cleaner will ignore those files: {", ".join(file_to_ignore)}')
extension_to_ignore = ['.pyc'] extension_to_ignore = ['.pyc']
logging.debug( logging.debug(

@ -7,6 +7,7 @@ import logging
import re import re
from urllib.parse import quote_plus from urllib.parse import quote_plus
from utilities.binaries import BinaryNotFound, get_binary
from literals import EXIT_VALIDATION_ERROR from literals import EXIT_VALIDATION_ERROR
from utilities.central import stop_bazarr from utilities.central import stop_bazarr
from subliminal.cache import region from subliminal.cache import region
@ -54,6 +55,14 @@ class Validator(OriginalValidator):
) )
def check_parser_binary(value):
try:
get_binary(value)
except BinaryNotFound:
raise ValidationError(f"Executable '{value}' not found in search path. Please install before making this selection.")
return True
validators = [ validators = [
# general section # general section
Validator('general.flask_secret_key', must_exist=True, default=hexlify(os.urandom(16)).decode(), Validator('general.flask_secret_key', must_exist=True, default=hexlify(os.urandom(16)).decode(),
@ -100,6 +109,7 @@ validators = [
Validator('general.adaptive_searching_delta', must_exist=True, default='1w', is_type_of=str, Validator('general.adaptive_searching_delta', must_exist=True, default='1w', is_type_of=str,
is_in=['3d', '1w', '2w', '3w', '4w']), is_in=['3d', '1w', '2w', '3w', '4w']),
Validator('general.enabled_providers', must_exist=True, default=[], is_type_of=list), Validator('general.enabled_providers', must_exist=True, default=[], is_type_of=list),
Validator('general.enabled_integrations', must_exist=True, default=[], is_type_of=list),
Validator('general.multithreading', must_exist=True, default=True, is_type_of=bool), Validator('general.multithreading', must_exist=True, default=True, is_type_of=bool),
Validator('general.chmod_enabled', must_exist=True, default=False, is_type_of=bool), Validator('general.chmod_enabled', must_exist=True, default=False, is_type_of=bool),
Validator('general.chmod', must_exist=True, default='0640', is_type_of=str), Validator('general.chmod', must_exist=True, default='0640', is_type_of=str),
@ -119,7 +129,7 @@ validators = [
Validator('general.dont_notify_manual_actions', must_exist=True, default=False, is_type_of=bool), Validator('general.dont_notify_manual_actions', must_exist=True, default=False, is_type_of=bool),
Validator('general.hi_extension', must_exist=True, default='hi', is_type_of=str, is_in=['hi', 'cc', 'sdh']), Validator('general.hi_extension', must_exist=True, default='hi', is_type_of=str, is_in=['hi', 'cc', 'sdh']),
Validator('general.embedded_subtitles_parser', must_exist=True, default='ffprobe', is_type_of=str, Validator('general.embedded_subtitles_parser', must_exist=True, default='ffprobe', is_type_of=str,
is_in=['ffprobe', 'mediainfo']), is_in=['ffprobe', 'mediainfo'], condition=check_parser_binary),
Validator('general.default_und_audio_lang', must_exist=True, default='', is_type_of=str), Validator('general.default_und_audio_lang', must_exist=True, default='', is_type_of=str),
Validator('general.default_und_embedded_subtitles_lang', must_exist=True, default='', is_type_of=str), Validator('general.default_und_embedded_subtitles_lang', must_exist=True, default='', is_type_of=str),
Validator('general.parse_embedded_audio_track', must_exist=True, default=False, is_type_of=bool), Validator('general.parse_embedded_audio_track', must_exist=True, default=False, is_type_of=bool),
@ -225,6 +235,11 @@ validators = [
Validator('addic7ed.user_agent', must_exist=True, default='', is_type_of=str), Validator('addic7ed.user_agent', must_exist=True, default='', is_type_of=str),
Validator('addic7ed.vip', must_exist=True, default=False, is_type_of=bool), Validator('addic7ed.vip', must_exist=True, default=False, is_type_of=bool),
# animetosho section
Validator('animetosho.search_threshold', must_exist=True, default=6, is_type_of=int, gte=1, lte=15),
Validator('animetosho.anidb_api_client', must_exist=True, default='', is_type_of=str, cast=str),
Validator('animetosho.anidb_api_client_ver', must_exist=True, default=1, is_type_of=int, gte=1, lte=9),
# avistaz section # avistaz section
Validator('avistaz.cookies', must_exist=True, default='', is_type_of=str), Validator('avistaz.cookies', must_exist=True, default='', is_type_of=str),
Validator('avistaz.user_agent', must_exist=True, default='', is_type_of=str), Validator('avistaz.user_agent', must_exist=True, default='', is_type_of=str),
@ -278,10 +293,6 @@ validators = [
Validator('napisy24.username', must_exist=True, default='', is_type_of=str, cast=str), Validator('napisy24.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('napisy24.password', must_exist=True, default='', is_type_of=str, cast=str), Validator('napisy24.password', must_exist=True, default='', is_type_of=str, cast=str),
# subscene section
Validator('subscene.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('subscene.password', must_exist=True, default='', is_type_of=str, cast=str),
# betaseries section # betaseries section
Validator('betaseries.token', must_exist=True, default='', is_type_of=str, cast=str), Validator('betaseries.token', must_exist=True, default='', is_type_of=str, cast=str),
@ -310,6 +321,9 @@ validators = [
Validator('karagarga.f_username', must_exist=True, default='', is_type_of=str, cast=str), Validator('karagarga.f_username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('karagarga.f_password', must_exist=True, default='', is_type_of=str, cast=str), Validator('karagarga.f_password', must_exist=True, default='', is_type_of=str, cast=str),
# subdl section
Validator('subdl.api_key', must_exist=True, default='', is_type_of=str, cast=str),
# subsync section # subsync section
Validator('subsync.use_subsync', must_exist=True, default=False, is_type_of=bool), Validator('subsync.use_subsync', must_exist=True, default=False, is_type_of=bool),
Validator('subsync.use_subsync_threshold', must_exist=True, default=False, is_type_of=bool), Validator('subsync.use_subsync_threshold', must_exist=True, default=False, is_type_of=bool),
@ -360,6 +374,10 @@ validators = [
Validator('postgresql.database', must_exist=True, default='', is_type_of=str), Validator('postgresql.database', must_exist=True, default='', is_type_of=str),
Validator('postgresql.username', must_exist=True, default='', is_type_of=str, cast=str), Validator('postgresql.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('postgresql.password', must_exist=True, default='', is_type_of=str, cast=str), Validator('postgresql.password', must_exist=True, default='', is_type_of=str, cast=str),
# anidb section
Validator('anidb.api_client', must_exist=True, default='', is_type_of=str),
Validator('anidb.api_client_ver', must_exist=True, default=1, is_type_of=int),
] ]
@ -433,6 +451,7 @@ array_keys = ['excluded_tags',
'subzero_mods', 'subzero_mods',
'excluded_series_types', 'excluded_series_types',
'enabled_providers', 'enabled_providers',
'enabled_integrations',
'path_mappings', 'path_mappings',
'path_mappings_movie', 'path_mappings_movie',
'language_equals', 'language_equals',
@ -666,15 +685,6 @@ def save_settings(settings_items):
reset_providers = True reset_providers = True
region.delete('oscom_token') region.delete('oscom_token')
if key == 'settings-subscene-username':
if key != settings.subscene.username:
reset_providers = True
region.delete('subscene_cookies2')
elif key == 'settings-subscene-password':
if key != settings.subscene.password:
reset_providers = True
region.delete('subscene_cookies2')
if key == 'settings-titlovi-username': if key == 'settings-titlovi-username':
if key != settings.titlovi.username: if key != settings.titlovi.username:
reset_providers = True reset_providers = True

@ -172,6 +172,7 @@ class TableHistory(Base):
video_path = mapped_column(Text) video_path = mapped_column(Text)
matched = mapped_column(Text) matched = mapped_column(Text)
not_matched = mapped_column(Text) not_matched = mapped_column(Text)
upgradedFromId = mapped_column(Integer, ForeignKey('table_history.id'))
class TableHistoryMovie(Base): class TableHistoryMovie(Base):
@ -190,6 +191,7 @@ class TableHistoryMovie(Base):
video_path = mapped_column(Text) video_path = mapped_column(Text)
matched = mapped_column(Text) matched = mapped_column(Text)
not_matched = mapped_column(Text) not_matched = mapped_column(Text)
upgradedFromId = mapped_column(Integer, ForeignKey('table_history_movie.id'))
class TableLanguagesProfiles(Base): class TableLanguagesProfiles(Base):
@ -202,6 +204,7 @@ class TableLanguagesProfiles(Base):
name = mapped_column(Text, nullable=False) name = mapped_column(Text, nullable=False)
mustContain = mapped_column(Text) mustContain = mapped_column(Text)
mustNotContain = mapped_column(Text) mustNotContain = mapped_column(Text)
tag = mapped_column(Text)
class TableMovies(Base): class TableMovies(Base):
@ -497,3 +500,28 @@ def convert_list_to_clause(arr: list):
return f"({','.join(str(x) for x in arr)})" return f"({','.join(str(x) for x in arr)})"
else: else:
return "" return ""
def upgrade_languages_profile_hi_values():
for languages_profile in (database.execute(
select(
TableLanguagesProfiles.profileId,
TableLanguagesProfiles.name,
TableLanguagesProfiles.cutoff,
TableLanguagesProfiles.items,
TableLanguagesProfiles.mustContain,
TableLanguagesProfiles.mustNotContain,
TableLanguagesProfiles.originalFormat)
))\
.all():
items = json.loads(languages_profile.items)
for language in items:
if language['hi'] == "only":
language['hi'] = "True"
elif language['hi'] in ["also", "never"]:
language['hi'] = "False"
database.execute(
update(TableLanguagesProfiles)
.values({"items": json.dumps(items)})
.where(TableLanguagesProfiles.profileId == languages_profile.profileId)
)

@ -125,7 +125,7 @@ def provider_throttle_map():
PROVIDERS_FORCED_OFF = ["addic7ed", "tvsubtitles", "legendasdivx", "napiprojekt", "shooter", PROVIDERS_FORCED_OFF = ["addic7ed", "tvsubtitles", "legendasdivx", "napiprojekt", "shooter",
"hosszupuska", "supersubtitles", "titlovi", "assrt", "subscene"] "hosszupuska", "supersubtitles", "titlovi", "assrt"]
throttle_count = {} throttle_count = {}
@ -259,11 +259,6 @@ def get_providers_auth():
'also_foreign': False, # fixme 'also_foreign': False, # fixme
'verify_ssl': settings.podnapisi.verify_ssl 'verify_ssl': settings.podnapisi.verify_ssl
}, },
'subscene': {
'username': settings.subscene.username,
'password': settings.subscene.password,
'only_foreign': False, # fixme
},
'legendasdivx': { 'legendasdivx': {
'username': settings.legendasdivx.username, 'username': settings.legendasdivx.username,
'password': settings.legendasdivx.password, 'password': settings.legendasdivx.password,
@ -324,6 +319,12 @@ def get_providers_auth():
'timeout': settings.whisperai.timeout, 'timeout': settings.whisperai.timeout,
'ffmpeg_path': _FFMPEG_BINARY, 'ffmpeg_path': _FFMPEG_BINARY,
'loglevel': settings.whisperai.loglevel, 'loglevel': settings.whisperai.loglevel,
},
"animetosho": {
'search_threshold': settings.animetosho.search_threshold,
},
"subdl": {
'api_key': settings.subdl.api_key,
} }
} }
@ -498,7 +499,7 @@ def get_throttled_providers():
except Exception: except Exception:
# set empty content in throttled_providers.dat # set empty content in throttled_providers.dat
logging.error("Invalid content in throttled_providers.dat. Resetting") logging.error("Invalid content in throttled_providers.dat. Resetting")
set_throttled_providers(providers) set_throttled_providers(str(providers))
finally: finally:
return providers return providers

@ -11,7 +11,6 @@ from logging.handlers import TimedRotatingFileHandler
from utilities.central import get_log_file_path from utilities.central import get_log_file_path
from pytz_deprecation_shim import PytzUsageWarning from pytz_deprecation_shim import PytzUsageWarning
from .get_args import args
from .config import settings from .config import settings

@ -1,6 +1,6 @@
# coding=utf-8 # coding=utf-8
import apprise from apprise import Apprise, AppriseAsset
import logging import logging
from .database import TableSettingsNotifier, TableEpisodes, TableShows, TableMovies, database, insert, delete, select from .database import TableSettingsNotifier, TableEpisodes, TableShows, TableMovies, database, insert, delete, select
@ -8,7 +8,7 @@ from .database import TableSettingsNotifier, TableEpisodes, TableShows, TableMov
def update_notifier(): def update_notifier():
# define apprise object # define apprise object
a = apprise.Apprise() a = Apprise()
# Retrieve all the details # Retrieve all the details
results = a.details() results = a.details()
@ -70,9 +70,9 @@ def send_notifications(sonarr_series_id, sonarr_episode_id, message):
if not episode: if not episode:
return return
asset = apprise.AppriseAsset(async_mode=False) asset = AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset) apobj = Apprise(asset=asset)
for provider in providers: for provider in providers:
if provider.url is not None: if provider.url is not None:
@ -101,9 +101,9 @@ def send_notifications_movie(radarr_id, message):
else: else:
movie_year = '' movie_year = ''
asset = apprise.AppriseAsset(async_mode=False) asset = AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset) apobj = Apprise(asset=asset)
for provider in providers: for provider in providers:
if provider.url is not None: if provider.url is not None:

@ -10,7 +10,6 @@ from apscheduler.triggers.date import DateTrigger
from apscheduler.events import EVENT_JOB_SUBMITTED, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR from apscheduler.events import EVENT_JOB_SUBMITTED, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from datetime import datetime, timedelta from datetime import datetime, timedelta
from calendar import day_name from calendar import day_name
from math import floor
from random import randrange from random import randrange
from tzlocal import get_localzone from tzlocal import get_localzone
try: try:
@ -47,6 +46,10 @@ ONE_YEAR_IN_SECONDS = 60 * 60 * 24 * 365
def a_long_time_from_now(job): def a_long_time_from_now(job):
# job isn't scheduled at all
if job.next_run_time is None:
return True
# currently defined as more than a year from now # currently defined as more than a year from now
delta = job.next_run_time - datetime.now(job.next_run_time.tzinfo) delta = job.next_run_time - datetime.now(job.next_run_time.tzinfo)
return delta.total_seconds() > ONE_YEAR_IN_SECONDS return delta.total_seconds() > ONE_YEAR_IN_SECONDS
@ -321,8 +324,8 @@ class Scheduler:
self.aps_scheduler.modify_job(job.id, self.aps_scheduler.modify_job(job.id,
next_run_time=datetime.now(tz=self.timezone) + next_run_time=datetime.now(tz=self.timezone) +
timedelta(seconds=randrange( timedelta(seconds=randrange(
job.trigger.interval.total_seconds() * 0.75, int(job.trigger.interval.total_seconds() * 0.75),
job.trigger.interval.total_seconds()))) int(job.trigger.interval.total_seconds()))))
def __no_task(self): def __no_task(self):
for job in self.aps_scheduler.get_jobs(): for job in self.aps_scheduler.get_jobs():

@ -4,7 +4,7 @@ import signal
import warnings import warnings
import logging import logging
import errno import errno
from literals import EXIT_INTERRUPT, EXIT_NORMAL from literals import EXIT_INTERRUPT, EXIT_NORMAL, EXIT_PORT_ALREADY_IN_USE_ERROR
from utilities.central import restart_bazarr, stop_bazarr from utilities.central import restart_bazarr, stop_bazarr
from waitress.server import create_server from waitress.server import create_server
@ -18,10 +18,7 @@ from .database import close_database
from .app import create_app from .app import create_app
app = create_app() app = create_app()
ui_bp.register_blueprint(api_bp, url_prefix='/api') app.register_blueprint(api_bp, url_prefix=base_url.rstrip('/') + '/api')
# Mute UserWarning with flask-restx and Flask >= 2.2.0. Will be raised as an exception in 2.3.0
# https://github.com/python-restx/flask-restx/issues/485
warnings.filterwarnings('ignore', message='The setup method ')
app.register_blueprint(ui_bp, url_prefix=base_url.rstrip('/')) app.register_blueprint(ui_bp, url_prefix=base_url.rstrip('/'))
@ -56,10 +53,17 @@ class Server:
logging.exception("BAZARR cannot bind to specified IP, trying with default (0.0.0.0)") logging.exception("BAZARR cannot bind to specified IP, trying with default (0.0.0.0)")
self.address = '0.0.0.0' self.address = '0.0.0.0'
self.connected = False self.connected = False
super(Server, self).__init__()
elif error.errno == errno.EADDRINUSE: elif error.errno == errno.EADDRINUSE:
if self.port != '6767':
logging.exception("BAZARR cannot bind to specified TCP port, trying with default (6767)") logging.exception("BAZARR cannot bind to specified TCP port, trying with default (6767)")
self.port = '6767' self.port = '6767'
self.connected = False self.connected = False
super(Server, self).__init__()
else:
logging.exception("BAZARR cannot bind to default TCP port (6767) because it's already in use, "
"exiting...")
self.shutdown(EXIT_PORT_ALREADY_IN_USE_ERROR)
else: else:
logging.exception("BAZARR cannot start because of unhandled exception.") logging.exception("BAZARR cannot start because of unhandled exception.")
self.shutdown() self.shutdown()
@ -83,9 +87,9 @@ class Server:
pass pass
def close_all(self): def close_all(self):
print(f"Closing database...") print("Closing database...")
close_database() close_database()
print(f"Closing webserver...") print("Closing webserver...")
self.server.close() self.server.close()
def shutdown(self, status=EXIT_NORMAL): def shutdown(self, status=EXIT_NORMAL):

@ -12,7 +12,7 @@ from signalrcore.hub_connection_builder import HubConnectionBuilder
from collections import deque from collections import deque
from time import sleep from time import sleep
from constants import headers from constants import HEADERS
from app.event_handler import event_stream from app.event_handler import event_stream
from sonarr.sync.episodes import sync_episodes, sync_one_episode from sonarr.sync.episodes import sync_episodes, sync_one_episode
from sonarr.sync.series import update_series, update_one_series from sonarr.sync.series import update_series, update_one_series
@ -39,7 +39,7 @@ class SonarrSignalrClientLegacy:
self.session = Session() self.session = Session()
self.session.timeout = 60 self.session.timeout = 60
self.session.verify = False self.session.verify = False
self.session.headers = headers self.session.headers = HEADERS
self.connection = None self.connection = None
self.connected = False self.connected = False
@ -162,7 +162,7 @@ class SonarrSignalrClient:
.with_url(f"{url_sonarr()}/signalr/messages?access_token={self.apikey_sonarr}", .with_url(f"{url_sonarr()}/signalr/messages?access_token={self.apikey_sonarr}",
options={ options={
"verify_ssl": False, "verify_ssl": False,
"headers": headers "headers": HEADERS
}) \ }) \
.with_automatic_reconnect({ .with_automatic_reconnect({
"type": "raw", "type": "raw",
@ -229,7 +229,7 @@ class RadarrSignalrClient:
.with_url(f"{url_radarr()}/signalr/messages?access_token={self.apikey_radarr}", .with_url(f"{url_radarr()}/signalr/messages?access_token={self.apikey_radarr}",
options={ options={
"verify_ssl": False, "verify_ssl": False,
"headers": headers "headers": HEADERS
}) \ }) \
.with_automatic_reconnect({ .with_automatic_reconnect({
"type": "raw", "type": "raw",

@ -4,11 +4,12 @@ import os
import requests import requests
import mimetypes import mimetypes
from flask import request, abort, render_template, Response, session, send_file, stream_with_context, Blueprint from flask import (request, abort, render_template, Response, session, send_file, stream_with_context, Blueprint,
redirect)
from functools import wraps from functools import wraps
from urllib.parse import unquote from urllib.parse import unquote
from constants import headers from constants import HEADERS
from literals import FILE_LOG from literals import FILE_LOG
from sonarr.info import url_api_sonarr from sonarr.info import url_api_sonarr
from radarr.info import url_api_radarr from radarr.info import url_api_radarr
@ -19,9 +20,10 @@ from .config import settings, base_url
from .database import System from .database import System
from .get_args import args from .get_args import args
frontend_build_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'frontend', 'build')
ui_bp = Blueprint('ui', __name__, ui_bp = Blueprint('ui', __name__,
template_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), template_folder=frontend_build_path,
'frontend', 'build'),
static_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'frontend', static_folder=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'frontend',
'build', 'assets'), 'build', 'assets'),
static_url_path='/assets') static_url_path='/assets')
@ -37,13 +39,15 @@ static_bp = Blueprint('images', __name__, static_folder=static_directory, static
ui_bp.register_blueprint(static_bp) ui_bp.register_blueprint(static_bp)
mimetypes.add_type('application/javascript', '.js') mimetypes.add_type('application/javascript', '.js')
mimetypes.add_type('text/css', '.css') mimetypes.add_type('text/css', '.css')
mimetypes.add_type('font/woff2', '.woff2') mimetypes.add_type('font/woff2', '.woff2')
mimetypes.add_type('image/svg+xml', '.svg') mimetypes.add_type('image/svg+xml', '.svg')
mimetypes.add_type('image/png', '.png') mimetypes.add_type('image/png', '.png')
mimetypes.add_type('image/x-icon', '.ico') mimetypes.add_type('image/x-icon', '.ico')
mimetypes.add_type('application/manifest+json', '.webmanifest')
pwa_assets = ['registerSW.js', 'manifest.webmanifest', 'sw.js']
def check_login(actual_method): def check_login(actual_method):
@ -65,6 +69,14 @@ def check_login(actual_method):
@ui_bp.route('/', defaults={'path': ''}) @ui_bp.route('/', defaults={'path': ''})
@ui_bp.route('/<path:path>') @ui_bp.route('/<path:path>')
def catch_all(path): def catch_all(path):
if path.startswith('login') and settings.auth.type not in ['basic', 'form']:
# login page has been accessed when no authentication is enabled
return redirect(base_url or "/", code=302)
# PWA Assets are returned from frontend root folder
if path in pwa_assets or path.startswith('workbox-'):
return send_file(os.path.join(frontend_build_path, path))
auth = True auth = True
if settings.auth.type == 'basic': if settings.auth.type == 'basic':
auth = request.authorization auth = request.authorization
@ -113,7 +125,7 @@ def series_images(url):
baseUrl = settings.sonarr.base_url baseUrl = settings.sonarr.base_url
url_image = f'{url_api_sonarr()}{url.lstrip(baseUrl)}?apikey={apikey}'.replace('poster-250', 'poster-500') url_image = f'{url_api_sonarr()}{url.lstrip(baseUrl)}?apikey={apikey}'.replace('poster-250', 'poster-500')
try: try:
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=headers) req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=HEADERS)
except Exception: except Exception:
return '', 404 return '', 404
else: else:
@ -127,7 +139,7 @@ def movies_images(url):
baseUrl = settings.radarr.base_url baseUrl = settings.radarr.base_url
url_image = f'{url_api_radarr()}{url.lstrip(baseUrl)}?apikey={apikey}' url_image = f'{url_api_radarr()}{url.lstrip(baseUrl)}?apikey={apikey}'
try: try:
req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=headers) req = requests.get(url_image, stream=True, timeout=15, verify=False, headers=HEADERS)
except Exception: except Exception:
return '', 404 return '', 404
else: else:
@ -148,8 +160,8 @@ def backup_download(filename):
def swaggerui_static(filename): def swaggerui_static(filename):
basepath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'libs', 'flask_restx', basepath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'libs', 'flask_restx',
'static') 'static')
fullpath = os.path.join(basepath, filename) fullpath = os.path.realpath(os.path.join(basepath, filename))
if not fullpath.startswith(basepath): if not basepath == os.path.commonpath((basepath, fullpath)):
return '', 404 return '', 404
else: else:
return send_file(fullpath) return send_file(fullpath)
@ -168,7 +180,7 @@ def proxy(protocol, url):
url = f'{protocol}://{unquote(url)}' url = f'{protocol}://{unquote(url)}'
params = request.args params = request.args
try: try:
result = requests.get(url, params, allow_redirects=False, verify=False, timeout=5, headers=headers) result = requests.get(url, params, allow_redirects=False, verify=False, timeout=5, headers=HEADERS)
except Exception as e: except Exception as e:
return dict(status=False, error=repr(e)) return dict(status=False, error=repr(e))
else: else:
@ -181,7 +193,8 @@ def proxy(protocol, url):
elif result.status_code == 401: elif result.status_code == 401:
return dict(status=False, error='Access Denied. Check API key.', code=result.status_code) return dict(status=False, error='Access Denied. Check API key.', code=result.status_code)
elif result.status_code == 404: elif result.status_code == 404:
return dict(status=False, error='Cannot get version. Maybe unsupported legacy API call?', code=result.status_code) return dict(status=False, error='Cannot get version. Maybe unsupported legacy API call?',
code=result.status_code)
elif 300 <= result.status_code <= 399: elif 300 <= result.status_code <= 399:
return dict(status=False, error='Wrong URL Base.', code=result.status_code) return dict(status=False, error='Wrong URL Base.', code=result.status_code)
else: else:

@ -1,13 +1,12 @@
# coding=utf-8 # coding=utf-8
import os import os
import re
# set Bazarr user-agent used to make requests # set Bazarr user-agent used to make requests
headers = {"User-Agent": os.environ["SZ_USER_AGENT"]} HEADERS = {"User-Agent": os.environ["SZ_USER_AGENT"]}
# hearing-impaired detection regex
hi_regex = re.compile(r'[*¶♫♪].{3,}[*¶♫♪]|[\[\(\{].{3,}[\]\)\}](?<!{\\an\d})')
# minimum file size for Bazarr to consider it a video # minimum file size for Bazarr to consider it a video
MINIMUM_VIDEO_SIZE = 20480 MINIMUM_VIDEO_SIZE = 20480
# maximum size for a subtitles file
MAXIMUM_SUBTITLE_SIZE = 1 * 1024 * 1024

@ -19,7 +19,8 @@ from utilities.backup import restore_from_backup
from app.database import init_db from app.database import init_db
from literals import * from literals import (EXIT_CONFIG_CREATE_ERROR, ENV_BAZARR_ROOT_DIR, DIR_BACKUP, DIR_CACHE, DIR_CONFIG, DIR_DB, DIR_LOG,
DIR_RESTORE, EXIT_REQUIREMENTS_ERROR)
from utilities.central import make_bazarr_dir, restart_bazarr, stop_bazarr from utilities.central import make_bazarr_dir, restart_bazarr, stop_bazarr
# set start time global variable as epoch # set start time global variable as epoch

@ -28,3 +28,4 @@ EXIT_VALIDATION_ERROR = -101
EXIT_CONFIG_CREATE_ERROR = -102 EXIT_CONFIG_CREATE_ERROR = -102
EXIT_PYTHON_UPGRADE_NEEDED = -103 EXIT_PYTHON_UPGRADE_NEEDED = -103
EXIT_REQUIREMENTS_ERROR = -104 EXIT_REQUIREMENTS_ERROR = -104
EXIT_PORT_ALREADY_IN_USE_ERROR = -105

@ -1,7 +1,6 @@
# coding=utf-8 # coding=utf-8
import os import os
import io
from threading import Thread from threading import Thread
@ -36,18 +35,21 @@ else:
# there's missing embedded packages after a commit # there's missing embedded packages after a commit
check_if_new_update() check_if_new_update()
from app.database import System, database, update, migrate_db, create_db_revision # noqa E402 from app.database import System, database, update, migrate_db, create_db_revision, upgrade_languages_profile_hi_values # noqa E402
from app.notifier import update_notifier # noqa E402 from app.notifier import update_notifier # noqa E402
from languages.get_languages import load_language_in_db # noqa E402 from languages.get_languages import load_language_in_db # noqa E402
from app.signalr_client import sonarr_signalr_client, radarr_signalr_client # noqa E402 from app.signalr_client import sonarr_signalr_client, radarr_signalr_client # noqa E402
from app.server import webserver, app # noqa E402 from app.server import webserver, app # noqa E402
from app.announcements import get_announcements_to_file # noqa E402 from app.announcements import get_announcements_to_file # noqa E402
from utilities.central import stop_bazarr # noqa E402
from literals import EXIT_NORMAL # noqa E402
if args.create_db_revision: if args.create_db_revision:
create_db_revision(app) create_db_revision(app)
stop_bazarr(EXIT_NORMAL) stop_bazarr(EXIT_NORMAL)
else: else:
migrate_db(app) migrate_db(app)
upgrade_languages_profile_hi_values()
configure_proxy_func() configure_proxy_func()

@ -5,7 +5,7 @@ import logging
from app.config import settings from app.config import settings
from radarr.info import url_api_radarr from radarr.info import url_api_radarr
from constants import headers from constants import HEADERS
def browse_radarr_filesystem(path='#'): def browse_radarr_filesystem(path='#'):
@ -16,7 +16,7 @@ def browse_radarr_filesystem(path='#'):
f"includeFiles=false&apikey={settings.radarr.apikey}") f"includeFiles=false&apikey={settings.radarr.apikey}")
try: try:
r = requests.get(url_radarr_api_filesystem, timeout=int(settings.radarr.http_timeout), verify=False, r = requests.get(url_radarr_api_filesystem, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get series from Radarr. Http error.") logging.exception("BAZARR Error trying to get series from Radarr. Http error.")

@ -3,12 +3,12 @@
import logging import logging
import requests import requests
import datetime import datetime
import json from requests.exceptions import JSONDecodeError
from dogpile.cache import make_region from dogpile.cache import make_region
from app.config import settings, empty_values from app.config import settings, empty_values
from constants import headers from constants import HEADERS
region = make_region().configure('dogpile.cache.memory') region = make_region().configure('dogpile.cache.memory')
@ -30,17 +30,17 @@ class GetRadarrInfo:
try: try:
rv = f"{url_radarr()}/api/system/status?apikey={settings.radarr.apikey}" rv = f"{url_radarr()}/api/system/status?apikey={settings.radarr.apikey}"
radarr_json = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False, radarr_json = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers).json() headers=HEADERS).json()
if 'version' in radarr_json: if 'version' in radarr_json:
radarr_version = radarr_json['version'] radarr_version = radarr_json['version']
else: else:
raise json.decoder.JSONDecodeError raise JSONDecodeError
except json.decoder.JSONDecodeError: except JSONDecodeError:
try: try:
rv = f"{url_radarr()}/api/v3/system/status?apikey={settings.radarr.apikey}" rv = f"{url_radarr()}/api/v3/system/status?apikey={settings.radarr.apikey}"
radarr_version = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False, radarr_version = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False,
headers=headers).json()['version'] headers=HEADERS).json()['version']
except json.decoder.JSONDecodeError: except JSONDecodeError:
logging.debug('BAZARR cannot get Radarr version') logging.debug('BAZARR cannot get Radarr version')
radarr_version = 'unknown' radarr_version = 'unknown'
except Exception: except Exception:

@ -5,7 +5,7 @@ import requests
from app.config import settings from app.config import settings
from radarr.info import url_api_radarr from radarr.info import url_api_radarr
from constants import headers from constants import HEADERS
def notify_radarr(radarr_id): def notify_radarr(radarr_id):
@ -15,6 +15,6 @@ def notify_radarr(radarr_id):
'name': 'RescanMovie', 'name': 'RescanMovie',
'movieId': int(radarr_id) 'movieId': int(radarr_id)
} }
requests.post(url, json=data, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers) requests.post(url, json=data, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except Exception: except Exception:
logging.exception('BAZARR cannot notify Radarr') logging.exception('BAZARR cannot notify Radarr')

@ -8,7 +8,7 @@ from app.config import settings
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
from app.database import TableMoviesRootfolder, TableMovies, database, delete, update, insert, select from app.database import TableMoviesRootfolder, TableMovies, database, delete, update, insert, select
from radarr.info import url_api_radarr from radarr.info import url_api_radarr
from constants import headers from constants import HEADERS
def get_radarr_rootfolder(): def get_radarr_rootfolder():
@ -19,7 +19,7 @@ def get_radarr_rootfolder():
url_radarr_api_rootfolder = f"{url_api_radarr()}rootfolder?apikey={apikey_radarr}" url_radarr_api_rootfolder = f"{url_api_radarr()}rootfolder?apikey={apikey_radarr}"
try: try:
rootfolder = requests.get(url_radarr_api_rootfolder, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers) rootfolder = requests.get(url_radarr_api_rootfolder, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get rootfolder from Radarr. Connection Error.") logging.exception("BAZARR Error trying to get rootfolder from Radarr. Connection Error.")
return [] return []

@ -5,7 +5,7 @@ import logging
from app.config import settings from app.config import settings
from radarr.info import get_radarr_info, url_api_radarr from radarr.info import get_radarr_info, url_api_radarr
from constants import headers from constants import HEADERS
def get_profile_list(): def get_profile_list():
@ -16,7 +16,7 @@ def get_profile_list():
f"apikey={apikey_radarr}") f"apikey={apikey_radarr}")
try: try:
profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers) profiles_json = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get profiles from Radarr. Connection Error.") logging.exception("BAZARR Error trying to get profiles from Radarr. Connection Error.")
except requests.exceptions.Timeout: except requests.exceptions.Timeout:
@ -45,7 +45,7 @@ def get_tags():
url_radarr_api_series = f"{url_api_radarr()}tag?apikey={apikey_radarr}" url_radarr_api_series = f"{url_api_radarr()}tag?apikey={apikey_radarr}"
try: try:
tagsDict = requests.get(url_radarr_api_series, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers) tagsDict = requests.get(url_radarr_api_series, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get tags from Radarr. Connection Error.") logging.exception("BAZARR Error trying to get tags from Radarr. Connection Error.")
return [] return []
@ -69,7 +69,7 @@ def get_movies_from_radarr_api(apikey_radarr, radarr_id=None):
url_radarr_api_movies = f'{url_api_radarr()}movie{f"/{radarr_id}" if radarr_id else ""}?apikey={apikey_radarr}' url_radarr_api_movies = f'{url_api_radarr()}movie{f"/{radarr_id}" if radarr_id else ""}?apikey={apikey_radarr}'
try: try:
r = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers) r = requests.get(url_radarr_api_movies, timeout=int(settings.radarr.http_timeout), verify=False, headers=HEADERS)
if r.status_code == 404: if r.status_code == 404:
return return
r.raise_for_status() r.raise_for_status()
@ -100,7 +100,7 @@ def get_history_from_radarr_api(apikey_radarr, movie_id):
try: try:
r = requests.get(url_radarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False, r = requests.get(url_radarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get history from Radarr. Http error.") logging.exception("BAZARR Error trying to get history from Radarr. Http error.")

@ -5,7 +5,7 @@ import logging
from app.config import settings from app.config import settings
from sonarr.info import url_api_sonarr from sonarr.info import url_api_sonarr
from constants import headers from constants import HEADERS
def browse_sonarr_filesystem(path='#'): def browse_sonarr_filesystem(path='#'):
@ -15,7 +15,7 @@ def browse_sonarr_filesystem(path='#'):
f"includeFiles=false&apikey={settings.sonarr.apikey}") f"includeFiles=false&apikey={settings.sonarr.apikey}")
try: try:
r = requests.get(url_sonarr_api_filesystem, timeout=int(settings.sonarr.http_timeout), verify=False, r = requests.get(url_sonarr_api_filesystem, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get series from Sonarr. Http error.") logging.exception("BAZARR Error trying to get series from Sonarr. Http error.")

@ -3,12 +3,12 @@
import logging import logging
import requests import requests
import datetime import datetime
import json from requests.exceptions import JSONDecodeError
from dogpile.cache import make_region from dogpile.cache import make_region
from app.config import settings, empty_values from app.config import settings, empty_values
from constants import headers from constants import HEADERS
region = make_region().configure('dogpile.cache.memory') region = make_region().configure('dogpile.cache.memory')
@ -30,17 +30,17 @@ class GetSonarrInfo:
try: try:
sv = f"{url_sonarr()}/api/system/status?apikey={settings.sonarr.apikey}" sv = f"{url_sonarr()}/api/system/status?apikey={settings.sonarr.apikey}"
sonarr_json = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False, sonarr_json = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers).json() headers=HEADERS).json()
if 'version' in sonarr_json: if 'version' in sonarr_json:
sonarr_version = sonarr_json['version'] sonarr_version = sonarr_json['version']
else: else:
raise json.decoder.JSONDecodeError raise JSONDecodeError
except json.decoder.JSONDecodeError: except JSONDecodeError:
try: try:
sv = f"{url_sonarr()}/api/v3/system/status?apikey={settings.sonarr.apikey}" sv = f"{url_sonarr()}/api/v3/system/status?apikey={settings.sonarr.apikey}"
sonarr_version = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False, sonarr_version = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers).json()['version'] headers=HEADERS).json()['version']
except json.decoder.JSONDecodeError: except JSONDecodeError:
logging.debug('BAZARR cannot get Sonarr version') logging.debug('BAZARR cannot get Sonarr version')
sonarr_version = 'unknown' sonarr_version = 'unknown'
except Exception: except Exception:

@ -5,7 +5,7 @@ import requests
from app.config import settings from app.config import settings
from sonarr.info import url_api_sonarr from sonarr.info import url_api_sonarr
from constants import headers from constants import HEADERS
def notify_sonarr(sonarr_series_id): def notify_sonarr(sonarr_series_id):
@ -15,6 +15,6 @@ def notify_sonarr(sonarr_series_id):
'name': 'RescanSeries', 'name': 'RescanSeries',
'seriesId': int(sonarr_series_id) 'seriesId': int(sonarr_series_id)
} }
requests.post(url, json=data, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers) requests.post(url, json=data, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except Exception: except Exception:
logging.exception('BAZARR cannot notify Sonarr') logging.exception('BAZARR cannot notify Sonarr')

@ -8,7 +8,7 @@ from app.config import settings
from app.database import TableShowsRootfolder, TableShows, database, insert, update, delete, select from app.database import TableShowsRootfolder, TableShows, database, insert, update, delete, select
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
from sonarr.info import url_api_sonarr from sonarr.info import url_api_sonarr
from constants import headers from constants import HEADERS
def get_sonarr_rootfolder(): def get_sonarr_rootfolder():
@ -19,7 +19,7 @@ def get_sonarr_rootfolder():
url_sonarr_api_rootfolder = f"{url_api_sonarr()}rootfolder?apikey={apikey_sonarr}" url_sonarr_api_rootfolder = f"{url_api_sonarr()}rootfolder?apikey={apikey_sonarr}"
try: try:
rootfolder = requests.get(url_sonarr_api_rootfolder, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers) rootfolder = requests.get(url_sonarr_api_rootfolder, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get rootfolder from Sonarr. Connection Error.") logging.exception("BAZARR Error trying to get rootfolder from Sonarr. Connection Error.")
return [] return []

@ -5,7 +5,7 @@ import logging
from app.config import settings from app.config import settings
from sonarr.info import get_sonarr_info, url_api_sonarr from sonarr.info import get_sonarr_info, url_api_sonarr
from constants import headers from constants import HEADERS
def get_profile_list(): def get_profile_list():
@ -23,7 +23,7 @@ def get_profile_list():
try: try:
profiles_json = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, profiles_json = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get profiles from Sonarr. Connection Error.") logging.exception("BAZARR Error trying to get profiles from Sonarr. Connection Error.")
return None return None
@ -53,7 +53,7 @@ def get_tags():
url_sonarr_api_series = f"{url_api_sonarr()}tag?apikey={apikey_sonarr}" url_sonarr_api_series = f"{url_api_sonarr()}tag?apikey={apikey_sonarr}"
try: try:
tagsDict = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers) tagsDict = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
except requests.exceptions.ConnectionError: except requests.exceptions.ConnectionError:
logging.exception("BAZARR Error trying to get tags from Sonarr. Connection Error.") logging.exception("BAZARR Error trying to get tags from Sonarr. Connection Error.")
return [] return []
@ -71,7 +71,7 @@ def get_series_from_sonarr_api(apikey_sonarr, sonarr_series_id=None):
url_sonarr_api_series = (f"{url_api_sonarr()}series/{sonarr_series_id if sonarr_series_id else ''}?" url_sonarr_api_series = (f"{url_api_sonarr()}series/{sonarr_series_id if sonarr_series_id else ''}?"
f"apikey={apikey_sonarr}") f"apikey={apikey_sonarr}")
try: try:
r = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers) r = requests.get(url_sonarr_api_series, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError as e: except requests.exceptions.HTTPError as e:
if e.response.status_code: if e.response.status_code:
@ -110,7 +110,7 @@ def get_episodes_from_sonarr_api(apikey_sonarr, series_id=None, episode_id=None)
return return
try: try:
r = requests.get(url_sonarr_api_episode, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers) r = requests.get(url_sonarr_api_episode, timeout=int(settings.sonarr.http_timeout), verify=False, headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get episodes from Sonarr. Http error.") logging.exception("BAZARR Error trying to get episodes from Sonarr. Http error.")
@ -144,7 +144,7 @@ def get_episodesFiles_from_sonarr_api(apikey_sonarr, series_id=None, episode_fil
try: try:
r = requests.get(url_sonarr_api_episodeFiles, timeout=int(settings.sonarr.http_timeout), verify=False, r = requests.get(url_sonarr_api_episodeFiles, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get episodeFiles from Sonarr. Http error.") logging.exception("BAZARR Error trying to get episodeFiles from Sonarr. Http error.")
@ -173,7 +173,7 @@ def get_history_from_sonarr_api(apikey_sonarr, episode_id):
try: try:
r = requests.get(url_sonarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False, r = requests.get(url_sonarr_api_history, timeout=int(settings.sonarr.http_timeout), verify=False,
headers=headers) headers=HEADERS)
r.raise_for_status() r.raise_for_status()
except requests.exceptions.HTTPError: except requests.exceptions.HTTPError:
logging.exception("BAZARR Error trying to get history from Sonarr. Http error.") logging.exception("BAZARR Error trying to get history from Sonarr. Http error.")

@ -24,8 +24,9 @@ from .processing import process_subtitle
@update_pools @update_pools
def generate_subtitles(path, languages, audio_language, sceneName, title, media_type, def generate_subtitles(path, languages, audio_language, sceneName, title, media_type, forced_minimum_score=None,
forced_minimum_score=None, is_upgrade=False, profile_id=None, check_if_still_required=False): is_upgrade=False, profile_id=None, check_if_still_required=False,
previous_subtitles_to_delete=None):
if not languages: if not languages:
return None return None
@ -87,6 +88,13 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
fld = get_target_folder(path) fld = get_target_folder(path)
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith( chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
'win') and settings.general.chmod_enabled else None 'win') and settings.general.chmod_enabled else None
if is_upgrade and previous_subtitles_to_delete:
try:
# delete previously downloaded subtitles in case of an upgrade to prevent edge loop
# issue.
os.remove(previous_subtitles_to_delete)
except (OSError, FileNotFoundError):
pass
saved_subtitles = save_subtitles(video.original_path, subtitles, saved_subtitles = save_subtitles(video.original_path, subtitles,
single=settings.general.single_language, single=settings.general.single_language,
tags=None, # fixme tags=None, # fixme

@ -182,7 +182,9 @@ def list_missing_subtitles_movies(no=None, send_event=True):
if any(x['code2'] == language['language'] for x in get_audio_profile_languages( if any(x['code2'] == language['language'] for x in get_audio_profile_languages(
movie_subtitles.audio_language)): movie_subtitles.audio_language)):
continue continue
desired_subtitles_list.append([language['language'], language['forced'], language['hi']]) desired_subtitles_list.append({'language': language['language'],
'forced': language['forced'],
'hi': language['hi']})
# get existing subtitles # get existing subtitles
actual_subtitles_list = [] actual_subtitles_list = []
@ -204,7 +206,9 @@ def list_missing_subtitles_movies(no=None, send_event=True):
elif subtitles[1] == 'hi': elif subtitles[1] == 'hi':
forced = False forced = False
hi = True hi = True
actual_subtitles_list.append([lang, str(forced), str(hi)]) actual_subtitles_list.append({'language': lang,
'forced': str(forced),
'hi': str(hi)})
# check if cutoff is reached and skip any further check # check if cutoff is reached and skip any further check
cutoff_met = False cutoff_met = False
@ -234,19 +238,21 @@ def list_missing_subtitles_movies(no=None, send_event=True):
# remove missing that have forced or hi subtitles for this language in existing # remove missing that have forced or hi subtitles for this language in existing
for item in actual_subtitles_list: for item in actual_subtitles_list:
if item[2] == 'True': if item['hi'] == 'True':
try: try:
missing_subtitles_list.remove([item[0], 'False', 'False']) missing_subtitles_list.remove({'language': item['language'],
'forced': 'False',
'hi': 'False'})
except ValueError: except ValueError:
pass pass
# make the missing languages list looks like expected # make the missing languages list looks like expected
missing_subtitles_output_list = [] missing_subtitles_output_list = []
for item in missing_subtitles_list: for item in missing_subtitles_list:
lang = item[0] lang = item['language']
if item[1] == 'True': if item['forced'] == 'True':
lang += ':forced' lang += ':forced'
elif item[2] == 'True': elif item['hi'] == 'True':
lang += ':hi' lang += ':hi'
missing_subtitles_output_list.append(lang) missing_subtitles_output_list.append(lang)

@ -182,7 +182,9 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
if any(x['code2'] == language['language'] for x in get_audio_profile_languages( if any(x['code2'] == language['language'] for x in get_audio_profile_languages(
episode_subtitles.audio_language)): episode_subtitles.audio_language)):
continue continue
desired_subtitles_list.append([language['language'], language['forced'], language['hi']]) desired_subtitles_list.append({'language': language['language'],
'forced': language['forced'],
'hi': language['hi']})
# get existing subtitles # get existing subtitles
actual_subtitles_list = [] actual_subtitles_list = []
@ -204,7 +206,9 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
elif subtitles[1] == 'hi': elif subtitles[1] == 'hi':
forced = False forced = False
hi = True hi = True
actual_subtitles_list.append([lang, str(forced), str(hi)]) actual_subtitles_list.append({'language': lang,
'forced': str(forced),
'hi': str(hi)})
# check if cutoff is reached and skip any further check # check if cutoff is reached and skip any further check
cutoff_met = False cutoff_met = False
@ -236,19 +240,21 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
# remove missing that have hi subtitles for this language in existing # remove missing that have hi subtitles for this language in existing
for item in actual_subtitles_list: for item in actual_subtitles_list:
if item[2] == 'True': if item['hi'] == 'True':
try: try:
missing_subtitles_list.remove([item[0], 'False', 'False']) missing_subtitles_list.remove({'language': item['language'],
'forced': 'False',
'hi': 'False'})
except ValueError: except ValueError:
pass pass
# make the missing languages list looks like expected # make the missing languages list looks like expected
missing_subtitles_output_list = [] missing_subtitles_output_list = []
for item in missing_subtitles_list: for item in missing_subtitles_list:
lang = item[0] lang = item['language']
if item[1] == 'True': if item['forced'] == 'True':
lang += ':forced' lang += ':forced'
elif item[2] == 'True': elif item['hi'] == 'True':
lang += ':hi' lang += ':hi'
missing_subtitles_output_list.append(lang) missing_subtitles_output_list.append(lang)

@ -9,8 +9,8 @@ from subliminal_patch import core
from subzero.language import Language from subzero.language import Language
from charset_normalizer import detect from charset_normalizer import detect
from constants import MAXIMUM_SUBTITLE_SIZE
from app.config import settings from app.config import settings
from constants import hi_regex
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
@ -68,7 +68,7 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
forced = True if os.path.splitext(os.path.splitext(subtitle)[0])[1] == '.forced' else False forced = True if os.path.splitext(os.path.splitext(subtitle)[0])[1] == '.forced' else False
# to improve performance, skip detection of files larger that 1M # to improve performance, skip detection of files larger that 1M
if os.path.getsize(subtitle_path) > 1 * 1024 * 1024: if os.path.getsize(subtitle_path) > MAXIMUM_SUBTITLE_SIZE:
logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: " logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: "
f"{subtitle_path}") f"{subtitle_path}")
continue continue
@ -119,7 +119,7 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
# check if file exist: # check if file exist:
if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS: if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS:
# to improve performance, skip detection of files larger that 1M # to improve performance, skip detection of files larger that 1M
if os.path.getsize(subtitle_path) > 1 * 1024 * 1024: if os.path.getsize(subtitle_path) > MAXIMUM_SUBTITLE_SIZE:
logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: " logging.debug(f"BAZARR subtitles file is too large to be text based. Skipping this file: "
f"{subtitle_path}") f"{subtitle_path}")
continue continue
@ -136,6 +136,6 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
continue continue
text = text.decode(encoding) text = text.decode(encoding)
if bool(re.search(hi_regex, text)): if bool(re.search(core.HI_REGEX, text)):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True) subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
return subtitles return subtitles

@ -18,7 +18,7 @@ from app.config import get_scores, settings, get_array_from
from utilities.helper import get_target_folder, force_unicode from utilities.helper import get_target_folder, force_unicode
from app.database import get_profiles_list from app.database import get_profiles_list
from .pool import update_pools, _get_pool, _init_pool from .pool import update_pools, _get_pool
from .utils import get_video, _get_lang_obj, _get_scores, _set_forced_providers from .utils import get_video, _get_lang_obj, _get_scores, _set_forced_providers
from .processing import process_subtitle from .processing import process_subtitle
@ -46,21 +46,7 @@ def manual_search(path, profile_id, providers, sceneName, title, media_type):
try: try:
if providers: if providers:
subtitles = list_all_subtitles([video], language_set, pool) subtitles = list_all_subtitles([video], language_set, pool)
if 'subscene' in providers:
s_pool = _init_pool("movie", profile_id, {"subscene"})
subscene_language_set = set()
for language in language_set:
if language.forced:
subscene_language_set.add(language)
if len(subscene_language_set):
s_pool.provider_configs.update({"subscene": {"only_foreign": True}})
subtitles_subscene = list_all_subtitles([video], subscene_language_set, s_pool)
s_pool.provider_configs.update({"subscene": {"only_foreign": False}})
subtitles[video] += subtitles_subscene[video]
else: else:
subtitles = []
logging.info("BAZARR All providers are throttled") logging.info("BAZARR All providers are throttled")
return 'All providers are throttled' return 'All providers are throttled'
except Exception: except Exception:
@ -172,8 +158,9 @@ def manual_download_subtitle(path, audio_language, hi, forced, subtitle, provide
subtitle.language.forced = True subtitle.language.forced = True
else: else:
subtitle.language.forced = False subtitle.language.forced = False
if use_original_format == 'True': if use_original_format in ("1", "True"):
subtitle.use_original_format = use_original_format subtitle.use_original_format = True
subtitle.mods = get_array_from(settings.general.subzero_mods) subtitle.mods = get_array_from(settings.general.subzero_mods)
video = get_video(force_unicode(path), title, sceneName, providers={provider}, media_type=media_type) video = get_video(force_unicode(path), title, sceneName, providers={provider}, media_type=media_type)
if video: if video:

@ -88,6 +88,7 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
from .sync import sync_subtitles from .sync import sync_subtitles
sync_subtitles(video_path=path, srt_path=downloaded_path, sync_subtitles(video_path=path, srt_path=downloaded_path,
forced=subtitle.language.forced, forced=subtitle.language.forced,
hi=subtitle.language.hi,
srt_lang=downloaded_language_code2, srt_lang=downloaded_language_code2,
percent_score=percent_score, percent_score=percent_score,
sonarr_series_id=episode_metadata.sonarrSeriesId, sonarr_series_id=episode_metadata.sonarrSeriesId,
@ -106,6 +107,7 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
from .sync import sync_subtitles from .sync import sync_subtitles
sync_subtitles(video_path=path, srt_path=downloaded_path, sync_subtitles(video_path=path, srt_path=downloaded_path,
forced=subtitle.language.forced, forced=subtitle.language.forced,
hi=subtitle.language.hi,
srt_lang=downloaded_language_code2, srt_lang=downloaded_language_code2,
percent_score=percent_score, percent_score=percent_score,
radarr_id=movie_metadata.radarrId) radarr_id=movie_metadata.radarrId)

@ -3,9 +3,11 @@
from .ffprobe import refine_from_ffprobe from .ffprobe import refine_from_ffprobe
from .database import refine_from_db from .database import refine_from_db
from .arr_history import refine_from_arr_history from .arr_history import refine_from_arr_history
from .anidb import refine_from_anidb
registered = { registered = {
"database": refine_from_db, "database": refine_from_db,
"ffprobe": refine_from_ffprobe, "ffprobe": refine_from_ffprobe,
"arr_history": refine_from_arr_history, "arr_history": refine_from_arr_history,
"anidb": refine_from_anidb,
} }

@ -0,0 +1,195 @@
# coding=utf-8
# fmt: off
import logging
import requests
from collections import namedtuple
from datetime import datetime, timedelta
from requests.exceptions import HTTPError
from app.config import settings
from subliminal import Episode, region
from subliminal.cache import REFINER_EXPIRATION_TIME
from subliminal_patch.exceptions import TooManyRequests
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
refined_providers = {'animetosho'}
api_url = 'http://api.anidb.net:9001/httpapi'
cache_key_refiner = "anidb_refiner"
# Soft Limit for amount of requests per day
daily_limit_request_count = 200
class AniDBClient(object):
def __init__(self, api_client_key=None, api_client_ver=1, session=None):
self.session = session or requests.Session()
self.api_client_key = api_client_key
self.api_client_ver = api_client_ver
self.cache = region.get(cache_key_refiner, expiration_time=timedelta(days=1).total_seconds())
@property
def is_throttled(self):
return self.cache and self.cache.get('is_throttled')
@property
def daily_api_request_count(self):
if not self.cache:
return 0
return self.cache.get('daily_api_request_count', 0)
AnimeInfo = namedtuple('AnimeInfo', ['anime', 'episode_offset'])
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_mappings(self):
r = self.session.get(
'https://raw.githubusercontent.com/Anime-Lists/anime-lists/master/anime-list.xml',
timeout=10
)
r.raise_for_status()
return r.content
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_id(self, mappings, tvdb_series_season, tvdb_series_id, episode):
# Enrich the collection of anime with the episode offset
animes = [
self.AnimeInfo(anime, int(anime.attrib.get('episodeoffset', 0)))
for anime in mappings.findall(
f".//anime[@tvdbid='{tvdb_series_id}'][@defaulttvdbseason='{tvdb_series_season}']"
)
]
if not animes:
return None, None
# Sort the anime by offset in ascending order
animes.sort(key=lambda a: a.episode_offset)
# Different from Tvdb, Anidb have different ids for the Parts of a season
anidb_id = None
offset = 0
for index, anime_info in enumerate(animes):
anime, episode_offset = anime_info
anidb_id = int(anime.attrib.get('anidbid'))
if episode > episode_offset:
anidb_id = anidb_id
offset = episode_offset
return anidb_id, episode - offset
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
def get_series_episodes_ids(self, tvdb_series_id, season, episode):
mappings = etree.fromstring(self.get_series_mappings())
series_id, episode_no = self.get_series_id(mappings, season, tvdb_series_id, episode)
if not series_id:
return None, None
episodes = etree.fromstring(self.get_episodes(series_id))
return series_id, int(episodes.find(f".//episode[epno='{episode_no}']").attrib.get('id'))
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def get_episodes(self, series_id):
if self.daily_api_request_count >= 200:
raise TooManyRequests('Daily API request limit exceeded')
r = self.session.get(
api_url,
params={
'request': 'anime',
'client': self.api_client_key,
'clientver': self.api_client_ver,
'protover': 1,
'aid': series_id
},
timeout=10)
r.raise_for_status()
xml_root = etree.fromstring(r.content)
response_code = xml_root.attrib.get('code')
if response_code == '500':
raise TooManyRequests('AniDB API Abuse detected. Banned status.')
elif response_code == '302':
raise HTTPError('AniDB API Client error. Client is disabled or does not exists.')
self.increment_daily_quota()
episode_elements = xml_root.find('episodes')
if not episode_elements:
raise ValueError
return etree.tostring(episode_elements, encoding='utf8', method='xml')
def increment_daily_quota(self):
daily_quota = self.daily_api_request_count + 1
if not self.cache:
region.set(cache_key_refiner, {'daily_api_request_count': daily_quota})
return
self.cache['daily_api_request_count'] = daily_quota
region.set(cache_key_refiner, self.cache)
@staticmethod
def mark_as_throttled():
region.set(cache_key_refiner, {'is_throttled': True})
def refine_from_anidb(path, video):
if not isinstance(video, Episode) or not video.series_tvdb_id:
logging.debug(f'Video is not an Anime TV series, skipping refinement for {video}')
return
if refined_providers.intersection(settings.general.enabled_providers) and video.series_anidb_id is None:
refine_anidb_ids(video)
def refine_anidb_ids(video):
anidb_client = AniDBClient(settings.anidb.api_client, settings.anidb.api_client_ver)
season = video.season if video.season else 0
if anidb_client.is_throttled:
logging.warning(f'API daily limit reached. Skipping refinement for {video.series}')
return video
try:
anidb_series_id, anidb_episode_id = anidb_client.get_series_episodes_ids(
video.series_tvdb_id,
season, video.episode,
)
except TooManyRequests:
logging.error(f'API daily limit reached while refining {video.series}')
anidb_client.mark_as_throttled()
return video
if not anidb_episode_id:
logging.error(f'Could not find anime series {video.series}')
return video
video.series_anidb_id = anidb_series_id
video.series_anidb_episode_id = anidb_episode_id

@ -8,7 +8,7 @@ from app.config import settings
from subtitles.tools.subsyncer import SubSyncer from subtitles.tools.subsyncer import SubSyncer
def sync_subtitles(video_path, srt_path, srt_lang, forced, percent_score, sonarr_series_id=None, def sync_subtitles(video_path, srt_path, srt_lang, forced, hi, percent_score, sonarr_series_id=None,
sonarr_episode_id=None, radarr_id=None): sonarr_episode_id=None, radarr_id=None):
if forced: if forced:
logging.debug('BAZARR cannot sync forced subtitles. Skipping sync routine.') logging.debug('BAZARR cannot sync forced subtitles. Skipping sync routine.')
@ -30,6 +30,8 @@ def sync_subtitles(video_path, srt_path, srt_lang, forced, percent_score, sonarr
'video_path': video_path, 'video_path': video_path,
'srt_path': srt_path, 'srt_path': srt_path,
'srt_lang': srt_lang, 'srt_lang': srt_lang,
'forced': forced,
'hi': hi,
'max_offset_seconds': str(settings.subsync.max_offset_seconds), 'max_offset_seconds': str(settings.subsync.max_offset_seconds),
'no_fix_framerate': settings.subsync.no_fix_framerate, 'no_fix_framerate': settings.subsync.no_fix_framerate,
'gss': settings.subsync.gss, 'gss': settings.subsync.gss,

@ -30,7 +30,7 @@ class SubSyncer:
self.vad = 'subs_then_webrtc' self.vad = 'subs_then_webrtc'
self.log_dir_path = os.path.join(args.config_dir, 'log') self.log_dir_path = os.path.join(args.config_dir, 'log')
def sync(self, video_path, srt_path, srt_lang, def sync(self, video_path, srt_path, srt_lang, hi, forced,
max_offset_seconds, no_fix_framerate, gss, reference=None, max_offset_seconds, no_fix_framerate, gss, reference=None,
sonarr_series_id=None, sonarr_episode_id=None, radarr_id=None): sonarr_series_id=None, sonarr_episode_id=None, radarr_id=None):
self.reference = video_path self.reference = video_path
@ -118,10 +118,10 @@ class SubSyncer:
downloaded_language_code2=srt_lang, downloaded_language_code2=srt_lang,
downloaded_provider=None, downloaded_provider=None,
score=None, score=None,
forced=None, forced=forced,
subtitle_id=None, subtitle_id=None,
reversed_subtitles_path=srt_path, reversed_subtitles_path=srt_path,
hearing_impaired=None) hearing_impaired=hi)
if sonarr_episode_id: if sonarr_episode_id:
history_log(action=5, sonarr_series_id=sonarr_series_id, sonarr_episode_id=sonarr_episode_id, history_log(action=5, sonarr_series_id=sonarr_series_id, sonarr_episode_id=sonarr_episode_id,

@ -53,7 +53,7 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
partial_lines_str = joined_lines_str[:max_characters] partial_lines_str = joined_lines_str[:max_characters]
if len(joined_lines_str) > max_characters: if len(joined_lines_str) > max_characters:
new_partial_lines_str = partial_lines_str.rsplit('\n\n\n', 1)[0] new_partial_lines_str = partial_lines_str.rsplit('\n\n', 1)[0]
else: else:
new_partial_lines_str = partial_lines_str new_partial_lines_str = partial_lines_str
@ -71,7 +71,7 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
logging.exception(f'BAZARR Unable to translate subtitles {source_srt_file}') logging.exception(f'BAZARR Unable to translate subtitles {source_srt_file}')
return False return False
else: else:
translated_partial_srt_list = translated_partial_srt_text.split('\n\n\n') translated_partial_srt_list = translated_partial_srt_text.split('\n\n')
translated_lines_list += translated_partial_srt_list translated_lines_list += translated_partial_srt_list
logging.debug(f'BAZARR saving translated subtitles to {dest_srt_file}') logging.debug(f'BAZARR saving translated subtitles to {dest_srt_file}')
@ -94,10 +94,10 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
downloaded_language_code2=to_lang, downloaded_language_code2=to_lang,
downloaded_provider=None, downloaded_provider=None,
score=None, score=None,
forced=None, forced=forced,
subtitle_id=None, subtitle_id=None,
reversed_subtitles_path=dest_srt_file, reversed_subtitles_path=dest_srt_file,
hearing_impaired=None) hearing_impaired=hi)
if media_type == 'series': if media_type == 'series':
history_log(action=6, sonarr_series_id=sonarr_series_id, sonarr_episode_id=sonarr_episode_id, result=result) history_log(action=6, sonarr_series_id=sonarr_series_id, sonarr_episode_id=sonarr_episode_id, result=result)

@ -69,14 +69,12 @@ def upgrade_subtitles():
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId) .join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId)
.join(episodes_to_upgrade, onclause=TableHistory.id == episodes_to_upgrade.c.id, isouter=True) .join(episodes_to_upgrade, onclause=TableHistory.id == episodes_to_upgrade.c.id, isouter=True)
.where(episodes_to_upgrade.c.id.is_not(None))) .where(episodes_to_upgrade.c.id.is_not(None)))
.all() if _language_still_desired(x.language, x.profileId)] .all() if _language_still_desired(x.language, x.profileId) and
x.subtitles_path in x.external_subtitles and
x.video_path == x.path
]
for item in episodes_data: for item in episodes_data:
if item['upgradable']:
if item['subtitles_path'] not in item['external_subtitles'] or \
not item['video_path'] == item['path']:
item.update({"upgradable": False})
del item['path'] del item['path']
del item['external_subtitles'] del item['external_subtitles']
@ -110,7 +108,9 @@ def upgrade_subtitles():
episode['seriesTitle'], episode['seriesTitle'],
'series', 'series',
forced_minimum_score=int(episode['score']), forced_minimum_score=int(episode['score']),
is_upgrade=True)) is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace(
episode['subtitles_path'])))
if result: if result:
if isinstance(result, list) and len(result): if isinstance(result, list) and len(result):
@ -154,14 +154,12 @@ def upgrade_subtitles():
.join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId) .join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId)
.join(movies_to_upgrade, onclause=TableHistoryMovie.id == movies_to_upgrade.c.id, isouter=True) .join(movies_to_upgrade, onclause=TableHistoryMovie.id == movies_to_upgrade.c.id, isouter=True)
.where(movies_to_upgrade.c.id.is_not(None))) .where(movies_to_upgrade.c.id.is_not(None)))
.all() if _language_still_desired(x.language, x.profileId)] .all() if _language_still_desired(x.language, x.profileId) and
x.subtitles_path in x.external_subtitles and
x.video_path == x.path
]
for item in movies_data: for item in movies_data:
if item['upgradable']:
if item['subtitles_path'] not in item['external_subtitles'] or \
not item['video_path'] == item['path']:
item.update({"upgradable": False})
del item['path'] del item['path']
del item['external_subtitles'] del item['external_subtitles']
@ -195,7 +193,9 @@ def upgrade_subtitles():
movie['title'], movie['title'],
'movie', 'movie',
forced_minimum_score=int(movie['score']), forced_minimum_score=int(movie['score']),
is_upgrade=True)) is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie(
movie['subtitles_path'])))
if result: if result:
if isinstance(result, list) and len(result): if isinstance(result, list) and len(result):
result = result[0] result = result[0]

@ -138,7 +138,7 @@ def manual_upload_subtitle(path, language, forced, hi, media_type, subtitle, aud
series_id = episode_metadata.sonarrSeriesId series_id = episode_metadata.sonarrSeriesId
episode_id = episode_metadata.sonarrEpisodeId episode_id = episode_metadata.sonarrEpisodeId
sync_subtitles(video_path=path, srt_path=subtitle_path, srt_lang=uploaded_language_code2, percent_score=100, sync_subtitles(video_path=path, srt_path=subtitle_path, srt_lang=uploaded_language_code2, percent_score=100,
sonarr_series_id=episode_metadata.sonarrSeriesId, forced=forced, sonarr_series_id=episode_metadata.sonarrSeriesId, forced=forced, hi=hi,
sonarr_episode_id=episode_metadata.sonarrEpisodeId) sonarr_episode_id=episode_metadata.sonarrEpisodeId)
else: else:
if not movie_metadata: if not movie_metadata:
@ -146,7 +146,7 @@ def manual_upload_subtitle(path, language, forced, hi, media_type, subtitle, aud
series_id = "" series_id = ""
episode_id = movie_metadata.radarrId episode_id = movie_metadata.radarrId
sync_subtitles(video_path=path, srt_path=subtitle_path, srt_lang=uploaded_language_code2, percent_score=100, sync_subtitles(video_path=path, srt_path=subtitle_path, srt_lang=uploaded_language_code2, percent_score=100,
radarr_id=movie_metadata.radarrId, forced=forced) radarr_id=movie_metadata.radarrId, forced=forced, hi=hi)
if use_postprocessing: if use_postprocessing:
command = pp_replace(postprocessing_cmd, path, subtitle_path, uploaded_language, uploaded_language_code2, command = pp_replace(postprocessing_cmd, path, subtitle_path, uploaded_language, uploaded_language_code2,

@ -97,7 +97,6 @@ def _set_forced_providers(pool, also_forced=False, forced_required=False):
pool.provider_configs.update( pool.provider_configs.update(
{ {
"podnapisi": {'also_foreign': also_forced, "only_foreign": forced_required}, "podnapisi": {'also_foreign': also_forced, "only_foreign": forced_required},
"subscene": {"only_foreign": forced_required},
"opensubtitles": {'also_foreign': also_forced, "only_foreign": forced_required} "opensubtitles": {'also_foreign': also_forced, "only_foreign": forced_required}
} }
) )

@ -33,7 +33,7 @@ def get_restore_path():
def get_backup_files(fullpath=True): def get_backup_files(fullpath=True):
backup_file_pattern = os.path.join(get_backup_path(), 'bazarr_backup_v*.zip') backup_file_pattern = os.path.join(get_backup_path(), 'bazarr_backup_v*.zip')
file_list = glob(backup_file_pattern) file_list = glob(backup_file_pattern)
file_list.sort(key=os.path.getmtime) file_list.sort(key=os.path.getmtime, reverse=True)
if fullpath: if fullpath:
return file_list return file_list
else: else:

@ -3,30 +3,38 @@
# only methods can be specified here that do not cause other moudules to be loaded # only methods can be specified here that do not cause other moudules to be loaded
# for other methods that use settings, etc., use utilities/helper.py # for other methods that use settings, etc., use utilities/helper.py
import contextlib
import logging import logging
import os import os
from pathlib import Path from pathlib import Path
from literals import *
from literals import ENV_BAZARR_ROOT_DIR, DIR_LOG, ENV_STOPFILE, ENV_RESTARTFILE, EXIT_NORMAL, FILE_LOG
def get_bazarr_dir(sub_dir): def get_bazarr_dir(sub_dir):
path = os.path.join(os.environ[ENV_BAZARR_ROOT_DIR], sub_dir) path = os.path.join(os.environ[ENV_BAZARR_ROOT_DIR], sub_dir)
return path return path
def make_bazarr_dir(sub_dir): def make_bazarr_dir(sub_dir):
path = get_bazarr_dir(sub_dir) path = get_bazarr_dir(sub_dir)
if not os.path.exists(path): if not os.path.exists(path):
os.mkdir(path) os.mkdir(path)
def get_log_file_path(): def get_log_file_path():
path = os.path.join(get_bazarr_dir(DIR_LOG), FILE_LOG) path = os.path.join(get_bazarr_dir(DIR_LOG), FILE_LOG)
return path return path
def get_stop_file_path(): def get_stop_file_path():
return os.environ[ENV_STOPFILE] return os.environ[ENV_STOPFILE]
def get_restart_file_path(): def get_restart_file_path():
return os.environ[ENV_RESTARTFILE] return os.environ[ENV_RESTARTFILE]
def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True): def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True):
try: try:
with open(get_stop_file_path(), 'w', encoding='UTF-8') as file: with open(get_stop_file_path(), 'w', encoding='UTF-8') as file:
@ -39,11 +47,15 @@ def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True):
if exit_main: if exit_main:
raise SystemExit(status_code) raise SystemExit(status_code)
def restart_bazarr(): def restart_bazarr():
try: try:
Path(get_restart_file_path()).touch() Path(get_restart_file_path()).touch()
except Exception as e: except Exception as e:
logging.error(f'BAZARR Cannot create restart file: {repr(e)}') logging.error(f'BAZARR Cannot create restart file: {repr(e)}')
logging.info('Bazarr is being restarted...') logging.info('Bazarr is being restarted...')
raise SystemExit(EXIT_NORMAL)
# Wrap the SystemExit for a graceful restart. The SystemExit still performs the cleanup but the traceback is omitted
# preventing to throw the exception to the caller but still terminates the Python process with the desired Exit Code
with contextlib.suppress(SystemExit):
raise SystemExit(EXIT_NORMAL)

@ -121,7 +121,9 @@ def subtitles_sync_references(subtitles_path, sonarr_episode_id=None, radarr_mov
if not media_data: if not media_data:
return references_dict return references_dict
data = parse_video_metadata(media_data.path, media_data.file_size, media_data.episode_file_id, None, mapped_path = path_mappings.path_replace(media_data.path)
data = parse_video_metadata(mapped_path, media_data.file_size, media_data.episode_file_id, None,
use_cache=True) use_cache=True)
elif radarr_movie_id: elif radarr_movie_id:
media_data = database.execute( media_data = database.execute(
@ -132,7 +134,9 @@ def subtitles_sync_references(subtitles_path, sonarr_episode_id=None, radarr_mov
if not media_data: if not media_data:
return references_dict return references_dict
data = parse_video_metadata(media_data.path, media_data.file_size, None, media_data.movie_file_id, mapped_path = path_mappings.path_replace_movie(media_data.path)
data = parse_video_metadata(mapped_path, media_data.file_size, None, media_data.movie_file_id,
use_cache=True) use_cache=True)
if not data: if not data:
@ -213,6 +217,25 @@ def subtitles_sync_references(subtitles_path, sonarr_episode_id=None, radarr_mov
def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=None, use_cache=True): def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=None, use_cache=True):
"""
This function return the video file properties as parsed by knowit using ffprobe or mediainfo using the cached
value by default.
@type file: string
@param file: Properly mapped path of a video file
@type file_size: int
@param file_size: File size in bytes of the video file
@type episode_file_id: int or None
@param episode_file_id: episode ID of the video file from Sonarr (or None if it's a movie)
@type movie_file_id: int or None
@param movie_file_id: movie ID of the video file from Radarr (or None if it's an episode)
@type use_cache: bool
@param use_cache:
@rtype: dict or None
@return: return a dictionary including the video file properties as parsed by ffprobe or mediainfo
"""
# Define default data keys value # Define default data keys value
data = { data = {
"ffprobe": {}, "ffprobe": {},
@ -228,12 +251,12 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
if episode_file_id: if episode_file_id:
cache_key = database.execute( cache_key = database.execute(
select(TableEpisodes.ffprobe_cache) select(TableEpisodes.ffprobe_cache)
.where(TableEpisodes.path == path_mappings.path_replace_reverse(file))) \ .where(TableEpisodes.episode_file_id == episode_file_id)) \
.first() .first()
elif movie_file_id: elif movie_file_id:
cache_key = database.execute( cache_key = database.execute(
select(TableMovies.ffprobe_cache) select(TableMovies.ffprobe_cache)
.where(TableMovies.path == path_mappings.path_replace_reverse_movie(file))) \ .where(TableMovies.movie_file_id == movie_file_id)) \
.first() .first()
else: else:
cache_key = None cache_key = None
@ -243,6 +266,7 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
# Unpickle ffprobe cache # Unpickle ffprobe cache
cached_value = pickle.loads(cache_key.ffprobe_cache) cached_value = pickle.loads(cache_key.ffprobe_cache)
except Exception: except Exception:
# No cached value available, we'll parse the file
pass pass
else: else:
# Check if file size and file id matches and if so, we return the cached value if available for the # Check if file size and file id matches and if so, we return the cached value if available for the
@ -281,9 +305,7 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
# or if we have mediainfo available # or if we have mediainfo available
elif mediainfo_path: elif mediainfo_path:
try: try:
# disabling mediainfo path temporarily until issue with knowit is fixed. data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo", "mediainfo": mediainfo_path})
# data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo", "mediainfo": mediainfo_path})
data["mediainfo"] = know(video_path=file, context={"provider": "mediainfo"})
except KnowitException as e: except KnowitException as e:
logging.error(f"BAZARR mediainfo cannot analyze this video file {file}. Could it be corrupted? {e}") logging.error(f"BAZARR mediainfo cannot analyze this video file {file}. Could it be corrupted? {e}")
return None return None
@ -291,19 +313,19 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
else: else:
logging.error("BAZARR require ffmpeg/ffprobe or mediainfo, please install it and make sure to choose it in " logging.error("BAZARR require ffmpeg/ffprobe or mediainfo, please install it and make sure to choose it in "
"Settings-->Subtitles.") "Settings-->Subtitles.")
return return None
# we write to db the result and return the newly cached ffprobe dict # we write to db the result and return the newly cached ffprobe dict
if episode_file_id: if episode_file_id:
database.execute( database.execute(
update(TableEpisodes) update(TableEpisodes)
.values(ffprobe_cache=pickle.dumps(data, pickle.HIGHEST_PROTOCOL)) .values(ffprobe_cache=pickle.dumps(data, pickle.HIGHEST_PROTOCOL))
.where(TableEpisodes.path == path_mappings.path_replace_reverse(file))) .where(TableEpisodes.episode_file_id == episode_file_id))
elif movie_file_id: elif movie_file_id:
database.execute( database.execute(
update(TableMovies) update(TableMovies)
.values(ffprobe_cache=pickle.dumps(data, pickle.HIGHEST_PROTOCOL)) .values(ffprobe_cache=pickle.dumps(data, pickle.HIGHEST_PROTOCOL))
.where(TableMovies.path == path_mappings.path_replace_reverse_movie(file))) .where(TableMovies.movie_file_id == movie_file_id))
return data return data

@ -15,5 +15,4 @@ deathbycaptcha # unknown version, only found on gist
git+https://github.com/pannal/libfilebot#egg=libfilebot git+https://github.com/pannal/libfilebot#egg=libfilebot
git+https://github.com/RobinDavid/pyADS.git@28a2f6dbfb357f85b2c2f49add770b336e88840d#egg=pyads git+https://github.com/RobinDavid/pyADS.git@28a2f6dbfb357f85b2c2f49add770b336e88840d#egg=pyads
py7zr==0.7.0 # modified to prevent importing of modules that can't be vendored py7zr==0.7.0 # modified to prevent importing of modules that can't be vendored
subscene-api==1.0.0 # modified specificaly for Bazarr
subliminal==2.1.0 # modified specifically for Bazarr subliminal==2.1.0 # modified specifically for Bazarr

@ -50,7 +50,7 @@ def default_xattr(fn):
XATTR_MAP = { XATTR_MAP = {
"default": ( "default": (
default_xattr, default_xattr,
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)', lambda result: re.search(r'(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2) result).group(2)
), ),
# "darwin": ( # "darwin": (
@ -60,7 +60,7 @@ XATTR_MAP = {
# ), # ),
"darwin": ( "darwin": (
lambda fn: ["filebot", "-script", "fn:xattr", fn], lambda fn: ["filebot", "-script", "fn:xattr", fn],
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)', lambda result: re.search(r'(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2) result).group(2)
), ),
"win32": ( "win32": (

@ -591,7 +591,7 @@ def scan_videos(path, age=None, archives=True):
def refine(video, episode_refiners=None, movie_refiners=None, **kwargs): def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
"""Refine a video using :ref:`refiners`. r"""Refine a video using :ref:`refiners`.
.. note:: .. note::
@ -619,7 +619,7 @@ def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs): def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
"""List subtitles. r"""List subtitles.
The `videos` must pass the `languages` check of :func:`check_video`. The `videos` must pass the `languages` check of :func:`check_video`.
@ -660,7 +660,7 @@ def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs): def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`. r"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
:param subtitles: subtitles to download. :param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
@ -677,7 +677,7 @@ def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None, def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, **kwargs): pool_class=ProviderPool, **kwargs):
"""List and download the best matching subtitles. r"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`. The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.

@ -6,7 +6,7 @@ from stevedore import ExtensionManager
class RegistrableExtensionManager(ExtensionManager): class RegistrableExtensionManager(ExtensionManager):
""":class:~stevedore.extensions.ExtensionManager` with support for registration. r""":class:~stevedore.extensions.ExtensionManager` with support for registration.
It allows loading of internal extensions without setup and registering/unregistering additional extensions. It allows loading of internal extensions without setup and registering/unregistering additional extensions.

@ -1,4 +1,4 @@
""" r"""
Refiners enrich a :class:`~subliminal.video.Video` object by adding information to it. Refiners enrich a :class:`~subliminal.video.Video` object by adding information to it.
A refiner is a simple function: A refiner is a simple function:

@ -115,7 +115,7 @@ class Video(object):
class Episode(Video): class Episode(Video):
"""Episode :class:`Video`. r"""Episode :class:`Video`.
:param str series: series of the episode. :param str series: series of the episode.
:param int season: season number of the episode. :param int season: season number of the episode.
@ -129,7 +129,8 @@ class Episode(Video):
""" """
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None, def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, alternative_series=None, **kwargs): series_tvdb_id=None, series_imdb_id=None, alternative_series=None, series_anidb_id=None,
series_anidb_episode_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs) super(Episode, self).__init__(name, **kwargs)
#: Series of the episode #: Series of the episode
@ -162,6 +163,9 @@ class Episode(Video):
#: Alternative names of the series #: Alternative names of the series
self.alternative_series = alternative_series or [] self.alternative_series = alternative_series or []
self.series_anidb_episode_id = series_anidb_episode_id
self.series_anidb_id = series_anidb_id
@classmethod @classmethod
def fromguess(cls, name, guess): def fromguess(cls, name, guess):
if guess['type'] != 'episode': if guess['type'] != 'episode':
@ -198,7 +202,7 @@ class Episode(Video):
class Movie(Video): class Movie(Video):
"""Movie :class:`Video`. r"""Movie :class:`Video`.
:param str title: title of the movie. :param str title: title of the movie.
:param int year: year of the movie. :param int year: year of the movie.

@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from subliminal.exceptions import ConfigurationError
class SubdlConverter(LanguageReverseConverter):
def __init__(self):
self.from_subdl = {
"AR": ("ara", None, None), # Arabic
"DA": ("dan", None, None), # Danish
"NL": ("nld", None, None), # Dutch
"EN": ("eng", None, None), # English
"FA": ("fas", None, None), # Farsi_Persian
"FI": ("fin", None, None), # Finnish
"FR": ("fra", None, None), # French
"ID": ("ind", None, None), # Indonesian
"IT": ("ita", None, None), # Italian
"NO": ("nor", None, None), # Norwegian
"RO": ("ron", None, None), # Romanian
"ES": ("spa", None, None), # Spanish
"SV": ("swe", None, None), # Swedish
"VI": ("vie", None, None), # Vietnamese
"SQ": ("sqi", None, None), # Albanian
"AZ": ("aze", None, None), # Azerbaijani
"BE": ("bel", None, None), # Belarusian
"BN": ("ben", None, None), # Bengali
"BS": ("bos", None, None), # Bosnian
"BG": ("bul", None, None), # Bulgarian
"MY": ("mya", None, None), # Burmese
"CA": ("cat", None, None), # Catalan
"ZH": ("zho", None, None), # Chinese BG code
"HR": ("hrv", None, None), # Croatian
"CS": ("ces", None, None), # Czech
"EO": ("epo", None, None), # Esperanto
"ET": ("est", None, None), # Estonian
"KA": ("kat", None, None), # Georgian
"DE": ("deu", None, None), # German
"EL": ("ell", None, None), # Greek
"KL": ("kal", None, None), # Greenlandic
"HE": ("heb", None, None), # Hebrew
"HI": ("hin", None, None), # Hindi
"HU": ("hun", None, None), # Hungarian
"IS": ("isl", None, None), # Icelandic
"JA": ("jpn", None, None), # Japanese
"KO": ("kor", None, None), # Korean
"KU": ("kur", None, None), # Kurdish
"LV": ("lav", None, None), # Latvian
"LT": ("lit", None, None), # Lithuanian
"MK": ("mkd", None, None), # Macedonian
"MS": ("msa", None, None), # Malay
"ML": ("mal", None, None), # Malayalam
"PL": ("pol", None, None), # Polish
"PT": ("por", None, None), # Portuguese
"RU": ("rus", None, None), # Russian
"SR": ("srp", None, None), # Serbian
"SI": ("sin", None, None), # Sinhala
"SK": ("slk", None, None), # Slovak
"SL": ("slv", None, None), # Slovenian
"TL": ("tgl", None, None), # Tagalog
"TA": ("tam", None, None), # Tamil
"TE": ("tel", None, None), # Telugu
"TH": ("tha", None, None), # Thai
"TR": ("tur", None, None), # Turkish
"UK": ("ukr", None, None), # Ukrainian
"UR": ("urd", None, None), # Urdu
# custom languages
"BR_PT": ("por", "BR", None), # Brazilian Portuguese
"ZH_BG": ("zho", None, "Hant"), # Big 5 code
# unsupported language in Bazarr
# "BG_EN": "Bulgarian_English",
# "NL_EN": "Dutch_English",
# "EN_DE": "English_German",
# "HU_EN": "Hungarian_English",
# "MNI": "Manipuri",
}
self.to_subdl = {v: k for k, v in self.from_subdl.items()}
self.codes = set(self.from_subdl.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country, script) in self.to_subdl:
return self.to_subdl[(alpha3, country, script)]
raise ConfigurationError('Unsupported language for subdl: %s, %s, %s' % (alpha3, country, script))
def reverse(self, subdl):
if subdl in self.from_subdl:
return self.from_subdl[subdl]
raise ConfigurationError('Unsupported language code for subdl: %s' % subdl)

@ -1,92 +0,0 @@
# coding=utf-8
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from subliminal.exceptions import ConfigurationError
from subzero.language import Language
# alpha3 codes extracted from `https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes`
# Subscene language list extracted from it's upload form
from_subscene = {
'Farsi/Persian': 'fas', 'Greek': 'ell', 'Greenlandic': 'kal',
'Malay': 'msa', 'Pashto': 'pus', 'Punjabi': 'pan', 'Swahili': 'swa'
}
from_subscene_with_country = {
'Brazillian Portuguese': ('por', 'BR')
}
to_subscene_with_country = {val: key for key, val in from_subscene_with_country.items()}
to_subscene = {v: k for k, v in from_subscene.items()}
exact_languages_alpha3 = [
'ara', 'aze', 'bel', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu',
'eng', 'epo', 'est', 'eus', 'fin', 'fra', 'heb', 'hin', 'hrv', 'hun',
'hye', 'ind', 'isl', 'ita', 'jpn', 'kat', 'kor', 'kur', 'lav', 'lit',
'mal', 'mkd', 'mni', 'mon', 'mya', 'nld', 'nor', 'pol', 'por', 'ron',
'rus', 'sin', 'slk', 'slv', 'som', 'spa', 'sqi', 'srp', 'sun', 'swe',
'tam', 'tel', 'tgl', 'tha', 'tur', 'ukr', 'urd', 'vie', 'yor'
]
language_ids = {
'ara': 2, 'dan': 10, 'nld': 11, 'eng': 13, 'fas': 46, 'fin': 17,
'fra': 18, 'heb': 22, 'ind': 44, 'ita': 26, 'msa': 50, 'nor': 30,
'ron': 33, 'spa': 38, 'swe': 39, 'vie': 45, 'sqi': 1, 'hye': 73,
'aze': 55, 'eus': 74, 'bel': 68, 'ben': 54, 'bos': 60, 'bul': 5,
'mya': 61, 'cat': 49, 'hrv': 8, 'ces': 9, 'epo': 47, 'est': 16,
'kat': 62, 'deu': 19, 'ell': 21, 'kal': 57, 'hin': 51, 'hun': 23,
'isl': 25, 'jpn': 27, 'kor': 28, 'kur': 52, 'lav': 29, 'lit': 43,
'mkd': 48, 'mal': 64, 'mni': 65, 'mon': 72, 'pus': 67, 'pol': 31,
'por': 32, 'pan': 66, 'rus': 34, 'srp': 35, 'sin': 58, 'slk': 36,
'slv': 37, 'som': 70, 'tgl': 53, 'tam': 59, 'tel': 63, 'tha': 40,
'tur': 41, 'ukr': 56, 'urd': 42, 'yor': 71, 'pt-BR': 4
}
# TODO: specify codes for unspecified_languages
unspecified_languages = [
'Big 5 code', 'Bulgarian/ English',
'Chinese BG code', 'Dutch/ English', 'English/ German',
'Hungarian/ English', 'Rohingya'
]
supported_languages = {Language(l) for l in exact_languages_alpha3}
alpha3_of_code = {l.name: l.alpha3 for l in supported_languages}
supported_languages.update({Language(l) for l in to_subscene})
supported_languages.update({Language(lang, cr) for lang, cr in to_subscene_with_country})
class SubsceneConverter(LanguageReverseConverter):
codes = {l.name for l in supported_languages}
def convert(self, alpha3, country=None, script=None):
if alpha3 in exact_languages_alpha3:
return Language(alpha3).name
if alpha3 in to_subscene:
return to_subscene[alpha3]
if (alpha3, country) in to_subscene_with_country:
return to_subscene_with_country[(alpha3, country)]
raise ConfigurationError('Unsupported language for subscene: %s, %s, %s' % (alpha3, country, script))
def reverse(self, code):
if code in from_subscene_with_country:
return from_subscene_with_country[code]
if code in from_subscene:
return (from_subscene[code],)
if code in alpha3_of_code:
return (alpha3_of_code[code],)
if code in unspecified_languages:
raise NotImplementedError("currently this language is unspecified: %s" % code)
raise ConfigurationError('Unsupported language code for subscene: %s' % code)

@ -49,6 +49,8 @@ SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl', '
_POOL_LIFETIME = datetime.timedelta(hours=12) _POOL_LIFETIME = datetime.timedelta(hours=12)
HI_REGEX = re.compile(r'[*¶♫♪].{3,}[*¶♫♪]|[\[\(\{].{3,}[\]\)\}](?<!{\\an\d})')
def remove_crap_from_fn(fn): def remove_crap_from_fn(fn):
# in case of the second regex part, the legit release group name will be in group(2), if it's followed by [string] # in case of the second regex part, the legit release group name will be in group(2), if it's followed by [string]
@ -539,6 +541,7 @@ class SZProviderPool(ProviderPool):
use_hearing_impaired = hearing_impaired in ("prefer", "force HI") use_hearing_impaired = hearing_impaired in ("prefer", "force HI")
is_episode = isinstance(video, Episode) is_episode = isinstance(video, Episode)
max_score = sum(val for key, val in compute_score._scores['episode' if is_episode else 'movie'].items() if key != "hash")
# sort subtitles by score # sort subtitles by score
unsorted_subtitles = [] unsorted_subtitles = []
@ -570,7 +573,9 @@ class SZProviderPool(ProviderPool):
for subtitle, score, score_without_hash, matches, orig_matches in scored_subtitles: for subtitle, score, score_without_hash, matches, orig_matches in scored_subtitles:
# check score # check score
if score < min_score: if score < min_score:
logger.info('%r: Score %d is below min_score (%d)', subtitle, score, min_score) min_score_in_percent = round(min_score * 100 / max_score, 2) if min_score > 0 else 0
logger.info('%r: Score %d is below min_score: %d out of %d (or %r%%)',
subtitle, score, min_score, max_score, min_score_in_percent)
break break
# stop when all languages are downloaded # stop when all languages are downloaded
@ -941,8 +946,8 @@ def _search_external_subtitles(path, languages=None, only_one=False, match_stric
lambda m: "" if str(m.group(1)).lower() in FULL_LANGUAGE_LIST else m.group(0), p_root) lambda m: "" if str(m.group(1)).lower() in FULL_LANGUAGE_LIST else m.group(0), p_root)
p_root_lower = p_root_bare.lower() p_root_lower = p_root_bare.lower()
# comparing to both unicode normalization forms to prevent broking stuff and improve indexing on some platforms.
filename_matches = p_root_lower == fn_no_ext_lower filename_matches = fn_no_ext_lower in [p_root_lower, unicodedata.normalize('NFC', p_root_lower)]
filename_contains = p_root_lower in fn_no_ext_lower filename_contains = p_root_lower in fn_no_ext_lower
if not filename_matches: if not filename_matches:
@ -1054,7 +1059,7 @@ def list_supported_video_types(pool_class, **kwargs):
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs): def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`. r"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
:param subtitles: subtitles to download. :param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
@ -1071,7 +1076,7 @@ def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None, def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, throttle_time=0, **kwargs): pool_class=ProviderPool, throttle_time=0, **kwargs):
"""List and download the best matching subtitles. r"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`. The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.
@ -1188,7 +1193,7 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
must_remove_hi = 'remove_HI' in subtitle.mods must_remove_hi = 'remove_HI' in subtitle.mods
# check content # check content
if subtitle.content is None: if subtitle.content is None or subtitle.text is None:
logger.error('Skipping subtitle %r: no content', subtitle) logger.error('Skipping subtitle %r: no content', subtitle)
continue continue
@ -1198,6 +1203,8 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
continue continue
# create subtitle path # create subtitle path
if subtitle.text and bool(re.search(HI_REGEX, subtitle.text)):
subtitle.language.hi = True
subtitle_path = get_subtitle_path(file_path, None if single else subtitle.language, subtitle_path = get_subtitle_path(file_path, None if single else subtitle.language,
forced_tag=subtitle.language.forced, forced_tag=subtitle.language.forced,
hi_tag=False if must_remove_hi else subtitle.language.hi, tags=tags) hi_tag=False if must_remove_hi else subtitle.language.hi, tags=tags)
@ -1242,7 +1249,7 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
def refine(video, episode_refiners=None, movie_refiners=None, **kwargs): def refine(video, episode_refiners=None, movie_refiners=None, **kwargs):
"""Refine a video using :ref:`refiners`. r"""Refine a video using :ref:`refiners`.
patch: add traceback logging patch: add traceback logging

@ -64,4 +64,3 @@ subliminal.refiner_manager.register('drone = subliminal_patch.refiners.drone:ref
subliminal.refiner_manager.register('filebot = subliminal_patch.refiners.filebot:refine') subliminal.refiner_manager.register('filebot = subliminal_patch.refiners.filebot:refine')
subliminal.refiner_manager.register('file_info_file = subliminal_patch.refiners.file_info_file:refine') subliminal.refiner_manager.register('file_info_file = subliminal_patch.refiners.file_info_file:refine')
subliminal.refiner_manager.register('symlinks = subliminal_patch.refiners.symlinks:refine') subliminal.refiner_manager.register('symlinks = subliminal_patch.refiners.symlinks:refine')

@ -0,0 +1,186 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import lzma
from guessit import guessit
from requests import Session
from subzero.language import Language
from subliminal.exceptions import ConfigurationError, ProviderError
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.video import Episode
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
logger = logging.getLogger(__name__)
supported_languages = [
"ara", # Arabic
"eng", # English
"fin", # Finnish
"fra", # French
"heb", # Hebrew
"ita", # Italian
"jpn", # Japanese
"por", # Portuguese
"pol", # Polish
"spa", # Spanish
"swe", # Swedish
"tha", # Thai
"tur", # Turkish
]
class AnimeToshoSubtitle(Subtitle):
"""AnimeTosho.org Subtitle."""
provider_name = 'animetosho'
def __init__(self, language, download_link, meta, release_info):
super(AnimeToshoSubtitle, self).__init__(language, page_link=download_link)
self.meta = meta
self.download_link = download_link
self.release_info = release_info
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
matches |= guess_matches(video, guessit(self.meta['filename']))
# Add these data are explicit extracted from the API and they always have to match otherwise they wouldn't
# arrive at this point and would stop on list_subtitles.
matches.update(['title', 'series', 'tvdb_id', 'season', 'episode'])
return matches
class AnimeToshoProvider(Provider, ProviderSubtitleArchiveMixin):
"""AnimeTosho.org Provider."""
subtitle_class = AnimeToshoSubtitle
languages = {Language('por', 'BR')} | {Language(sl) for sl in supported_languages}
video_types = Episode
def __init__(self, search_threshold=None):
self.session = None
if not all([search_threshold]):
raise ConfigurationError("Search threshold, Api Client and Version must be specified!")
self.search_threshold = search_threshold
def initialize(self):
self.session = Session()
def terminate(self):
self.session.close()
def list_subtitles(self, video, languages):
if not video.series_anidb_episode_id:
logger.debug('Skipping video %r. It is not an anime or the anidb_episode_id could not be identified', video)
return []
return [s for s in self._get_series(video.series_anidb_episode_id) if s.language in languages]
def download_subtitle(self, subtitle):
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(subtitle.page_link, timeout=10)
r.raise_for_status()
# Check if the bytes content starts with the xz magic number of the xz archives
if not self._is_xz_file(r.content):
raise ProviderError('Unidentified archive type')
subtitle.content = lzma.decompress(r.content)
return subtitle
@staticmethod
def _is_xz_file(content):
return content.startswith(b'\xFD\x37\x7A\x58\x5A\x00')
def _get_series(self, episode_id):
storage_download_url = 'https://animetosho.org/storage/attach/'
feed_api_url = 'https://feed.animetosho.org/json'
subtitles = []
entries = self._get_series_entries(episode_id)
for entry in entries:
r = self.session.get(
feed_api_url,
params={
'show': 'torrent',
'id': entry['id'],
},
timeout=10
)
r.raise_for_status()
for file in r.json()['files']:
if 'attachments' not in file:
continue
subtitle_files = list(filter(lambda f: f['type'] == 'subtitle', file['attachments']))
for subtitle_file in subtitle_files:
hex_id = format(subtitle_file['id'], '08x')
lang = Language.fromalpha3b(subtitle_file['info']['lang'])
# For Portuguese and Portuguese Brazilian they both share the same code, the name is the only
# identifier AnimeTosho provides. Also, some subtitles does not have name, in this case it could
# be a false negative but there is nothing we can use to guarantee it is PT-BR, we rather skip it.
if lang.alpha3 == 'por' and subtitle_file['info'].get('name', '').lower().find('brazil'):
lang = Language('por', 'BR')
subtitle = self.subtitle_class(
lang,
storage_download_url + '{}/{}.xz'.format(hex_id, subtitle_file['id']),
meta=file,
release_info=entry.get('title'),
)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def _get_series_entries(self, episode_id):
api_url = 'https://feed.animetosho.org/json'
r = self.session.get(
api_url,
params={
'eid': episode_id,
},
timeout=10
)
r.raise_for_status()
j = r.json()
# Ignore records that are not yet ready or has been abandoned by AnimeTosho.
entries = list(filter(lambda t: t['status'] == 'complete', j))[:self.search_threshold]
# Return the latest entries that have been added as it is used to cutoff via the user configuration threshold
entries.sort(key=lambda t: t['timestamp'], reverse=True)
return entries

@ -83,6 +83,14 @@ class BetaSeriesProvider(Provider):
logger.debug('Searching subtitles %r', params) logger.debug('Searching subtitles %r', params)
res = self.session.get( res = self.session.get(
server_url + 'episodes/display', params=params, timeout=10) server_url + 'episodes/display', params=params, timeout=10)
try:
if res.status_code == 400 and res.json()['errors'][0]['code'] == 4001:
# this is to catch no series found
return []
elif res.status_code == 400 and res.json()['errors'][0]['code'] == 1001:
raise AuthenticationError("Invalid token provided")
except Exception:
pass
res.raise_for_status() res.raise_for_status()
result = res.json() result = res.json()
matches.add('tvdb_id') matches.add('tvdb_id')
@ -96,8 +104,14 @@ class BetaSeriesProvider(Provider):
logger.debug('Searching subtitles %r', params) logger.debug('Searching subtitles %r', params)
res = self.session.get( res = self.session.get(
server_url + 'shows/episodes', params=params, timeout=10) server_url + 'shows/episodes', params=params, timeout=10)
if res.status_code == 400: try:
if res.status_code == 400 and res.json()['errors'][0]['code'] == 4001:
# this is to catch no series found
return []
elif res.status_code == 400 and res.json()['errors'][0]['code'] == 1001:
raise AuthenticationError("Invalid token provided") raise AuthenticationError("Invalid token provided")
except Exception:
pass
res.raise_for_status() res.raise_for_status()
result = res.json() result = res.json()
matches.add('series_tvdb_id') matches.add('series_tvdb_id')

@ -208,8 +208,11 @@ class EmbeddedSubtitlesProvider(Provider):
except Exception as error: except Exception as error:
logger.debug("'%s' raised running modifier", error) logger.debug("'%s' raised running modifier", error)
if os.path.exists(path):
with open(path, "rb") as sub: with open(path, "rb") as sub:
subtitle.content = sub.read() subtitle.content = sub.read()
else:
logger.error("%s not found in filesystem", path)
def _get_subtitle_path(self, subtitle: EmbeddedSubtitle): def _get_subtitle_path(self, subtitle: EmbeddedSubtitle):
container = subtitle.container container = subtitle.container
@ -379,7 +382,7 @@ def _clean_ass_subtitles(path, output_path):
logger.debug("Cleaned lines: %d", abs(len(lines) - len(clean_lines))) logger.debug("Cleaned lines: %d", abs(len(lines) - len(clean_lines)))
with open(output_path, "w") as f: with open(output_path, "w", encoding="utf-8", errors="ignore") as f:
f.writelines(clean_lines) f.writelines(clean_lines)
logger.debug("Lines written to output path: %s", output_path) logger.debug("Lines written to output path: %s", output_path)

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import functools import functools
from json import JSONDecodeError from requests.exceptions import JSONDecodeError
import logging import logging
import re import re
import time import time

@ -3,6 +3,7 @@ import io
import logging import logging
import os import os
import json import json
from requests.exceptions import JSONDecodeError
from subzero.language import Language from subzero.language import Language
from guessit import guessit from guessit import guessit
@ -144,7 +145,7 @@ class KtuvitProvider(Provider):
self.session.headers["Pragma"] = "no-cache" self.session.headers["Pragma"] = "no-cache"
self.session.headers["Cache-Control"] = "no-cache" self.session.headers["Cache-Control"] = "no-cache"
self.session.headers["Content-Type"] = "application/json" self.session.headers["Content-Type"] = "application/json"
self.session.headers["User-Agent"]: os.environ.get( self.session.headers["User-Agent"] = os.environ.get(
"SZ_USER_AGENT", "Sub-Zero/2" "SZ_USER_AGENT", "Sub-Zero/2"
) )
@ -161,13 +162,13 @@ class KtuvitProvider(Provider):
is_success = self.parse_d_response( is_success = self.parse_d_response(
r, "IsSuccess", False, "Authentication to the provider" r, "IsSuccess", False, "Authentication to the provider"
) )
except json.decoder.JSONDecodeError: except JSONDecodeError:
logger.info("Failed to Login to Ktuvit") logger.info("Failed to Login to Ktuvit")
if not is_success: if not is_success:
error_message = '' error_message = ''
try: try:
error_message = self.parse_d_response(r, "ErrorMessage", "[None]") error_message = self.parse_d_response(r, "ErrorMessage", "[None]")
except json.decode.JSONDecoderError: except JSONDecodeError:
raise AuthenticationError( raise AuthenticationError(
"Error Logging in to Ktuvit Provider: " + str(r.content) "Error Logging in to Ktuvit Provider: " + str(r.content)
) )
@ -473,8 +474,8 @@ class KtuvitProvider(Provider):
try: try:
response_content = response.json() response_content = response.json()
except json.decoder.JSONDecodeError as ex: except JSONDecodeError as ex:
raise json.decoder.JSONDecodeError( raise JSONDecodeError(
"Unable to parse JSON returned while getting " + message, ex.doc, ex.pos "Unable to parse JSON returned while getting " + message, ex.doc, ex.pos
) )
else: else:
@ -486,11 +487,11 @@ class KtuvitProvider(Provider):
value = response_content.get(field, default_value) value = response_content.get(field, default_value)
if not value and value != default_value: if not value and value != default_value:
raise json.decoder.JSONDecodeError( raise JSONDecodeError(
"Missing " + message, str(response_content), 0 "Missing " + message, str(response_content), 0
) )
else: else:
raise json.decoder.JSONDecodeError( raise JSONDecodeError(
"Incomplete JSON returned while getting " + message, "Incomplete JSON returned while getting " + message,
str(response_content), str(response_content),
0 0

@ -29,6 +29,7 @@ from dogpile.cache.api import NO_VALUE
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class LegendasdivxSubtitle(Subtitle): class LegendasdivxSubtitle(Subtitle):
"""Legendasdivx Subtitle.""" """Legendasdivx Subtitle."""
provider_name = 'legendasdivx' provider_name = 'legendasdivx'
@ -69,10 +70,12 @@ class LegendasdivxSubtitle(Subtitle):
self.wrong_fps = True self.wrong_fps = True
if self.skip_wrong_fps: if self.skip_wrong_fps:
logger.debug("Legendasdivx :: Skipping subtitle due to FPS mismatch (expected: %s, got: %s)", video.fps, self.sub_frame_rate) logger.debug("Legendasdivx :: Skipping subtitle due to FPS mismatch (expected: %s, got: %s)", video.fps,
self.sub_frame_rate)
# not a single match :) # not a single match :)
return set() return set()
logger.debug("Legendasdivx :: Frame rate mismatch (expected: %s, got: %s, but continuing...)", video.fps, self.sub_frame_rate) logger.debug("Legendasdivx :: Frame rate mismatch (expected: %s, got: %s, but continuing...)", video.fps,
self.sub_frame_rate)
description = sanitize(self.description) description = sanitize(self.description)
@ -112,6 +115,11 @@ class LegendasdivxSubtitle(Subtitle):
matches.update(['season']) matches.update(['season'])
if video.episode and 'e{:02d}'.format(video.episode) in description: if video.episode and 'e{:02d}'.format(video.episode) in description:
matches.update(['episode']) matches.update(['episode'])
# All the search is already based on the series_imdb_id when present in the video and controlled via the
# the legendasdivx backend it, so if there is a result, it matches, either inside of a pack or a specific
# series and episode, so we can assume the season and episode matches.
if video.series_imdb_id:
matches.update(['series', 'series_imdb_id', 'season', 'episode'])
# release_group # release_group
if video.release_group and sanitize_release_group(video.release_group) in sanitize_release_group(description): if video.release_group and sanitize_release_group(video.release_group) in sanitize_release_group(description):
@ -121,6 +129,7 @@ class LegendasdivxSubtitle(Subtitle):
return matches return matches
class LegendasdivxProvider(Provider): class LegendasdivxProvider(Provider):
"""Legendasdivx Provider.""" """Legendasdivx Provider."""
languages = {Language('por', 'BR')} | {Language('por')} languages = {Language('por', 'BR')} | {Language('por')}
@ -135,7 +144,7 @@ class LegendasdivxProvider(Provider):
'Referer': 'https://www.legendasdivx.pt' 'Referer': 'https://www.legendasdivx.pt'
} }
loginpage = site + '/forum/ucp.php?mode=login' loginpage = site + '/forum/ucp.php?mode=login'
searchurl = site + '/modules.php?name=Downloads&file=jz&d_op=search&op=_jz00&query={query}' searchurl = site + '/modules.php?name=Downloads&file=jz&d_op={d_op}&op={op}&query={query}&temporada={season}&episodio={episode}&imdb={imdbid}'
download_link = site + '/modules.php{link}' download_link = site + '/modules.php{link}'
def __init__(self, username, password, skip_wrong_fps=True): def __init__(self, username, password, skip_wrong_fps=True):
@ -186,7 +195,8 @@ class LegendasdivxProvider(Provider):
res = self.session.post(self.loginpage, data) res = self.session.post(self.loginpage, data)
res.raise_for_status() res.raise_for_status()
# make sure we're logged in # make sure we're logged in
logger.debug('Legendasdivx.pt :: Logged in successfully: PHPSESSID: %s', self.session.cookies.get_dict()['PHPSESSID']) logger.debug('Legendasdivx.pt :: Logged in successfully: PHPSESSID: %s',
self.session.cookies.get_dict()['PHPSESSID'])
cj = self.session.cookies.copy() cj = self.session.cookies.copy()
store_cks = ("PHPSESSID", "phpbb3_2z8zs_sid", "phpbb3_2z8zs_k", "phpbb3_2z8zs_u", "lang") store_cks = ("PHPSESSID", "phpbb3_2z8zs_sid", "phpbb3_2z8zs_k", "phpbb3_2z8zs_u", "lang")
for cn in iter(self.session.cookies.keys()): for cn in iter(self.session.cookies.keys()):
@ -278,11 +288,23 @@ class LegendasdivxProvider(Provider):
subtitles = [] subtitles = []
# Set the default search criteria
d_op = 'search'
op = '_jz00'
lang_filter_key = 'form_cat'
if isinstance(video, Movie): if isinstance(video, Movie):
querytext = video.imdb_id if video.imdb_id else video.title querytext = video.imdb_id if video.imdb_id else video.title
if isinstance(video, Episode): if isinstance(video, Episode):
querytext = '%22{}%20S{:02d}E{:02d}%22'.format(video.series, video.season, video.episode) # Overwrite the parameters to refine via imdb_id
if video.series_imdb_id:
querytext = '&faz=pesquisa_episodio'
lang_filter_key = 'idioma'
d_op = 'jz_00'
op = ''
else:
querytext = '%22{}%22%20S{:02d}E{:02d}'.format(video.series, video.season, video.episode)
querytext = quote(querytext.lower()) querytext = quote(querytext.lower())
# language query filter # language query filter
@ -293,21 +315,30 @@ class LegendasdivxProvider(Provider):
logger.debug("Legendasdivx.pt :: searching for %s subtitles.", language) logger.debug("Legendasdivx.pt :: searching for %s subtitles.", language)
language_id = language.opensubtitles language_id = language.opensubtitles
if 'por' in language_id: if 'por' in language_id:
lang_filter = '&form_cat=28' lang_filter = '&{}=28'.format(lang_filter_key)
elif 'pob' in language_id: elif 'pob' in language_id:
lang_filter = '&form_cat=29' lang_filter = '&{}=29'.format(lang_filter_key)
else: else:
lang_filter = '' lang_filter = ''
querytext = querytext + lang_filter if lang_filter else querytext querytext = querytext + lang_filter if lang_filter else querytext
search_url = _searchurl.format(
query=querytext,
season='' if isinstance(video, Movie) else video.season,
episode='' if isinstance(video, Movie) else video.episode,
imdbid='' if isinstance(video, Movie) else video.series_imdb_id.replace('tt', '') if video.series_imdb_id else None,
op=op,
d_op=d_op,
)
try: try:
# sleep for a 1 second before another request # sleep for a 1 second before another request
sleep(1) sleep(1)
searchLimitReached = False searchLimitReached = False
self.headers['Referer'] = self.site + '/index.php' self.headers['Referer'] = self.site + '/index.php'
self.session.headers.update(self.headers) self.session.headers.update(self.headers)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False) res = self.session.get(search_url, allow_redirects=False)
res.raise_for_status() res.raise_for_status()
if res.status_code == 200 and "<!--pesquisas:" in res.text: if res.status_code == 200 and "<!--pesquisas:" in res.text:
searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text) searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text)
@ -324,10 +355,10 @@ class LegendasdivxProvider(Provider):
# for series, if no results found, try again just with series and season (subtitle packs) # for series, if no results found, try again just with series and season (subtitle packs)
if isinstance(video, Episode): if isinstance(video, Episode):
logger.debug("Legendasdivx.pt :: trying again with just series and season on query.") logger.debug("Legendasdivx.pt :: trying again with just series and season on query.")
querytext = re.sub("(e|E)(\d{2})", "", querytext) querytext = re.sub(r"(e|E)(\d{2})", "", querytext)
# sleep for a 1 second before another request # sleep for a 1 second before another request
sleep(1) sleep(1)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False) res = self.session.get(search_url, allow_redirects=False)
res.raise_for_status() res.raise_for_status()
if res.status_code == 200 and "<!--pesquisas:" in res.text: if res.status_code == 200 and "<!--pesquisas:" in res.text:
searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text) searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text)
@ -340,7 +371,9 @@ class LegendasdivxProvider(Provider):
if searches_count >= self.SAFE_SEARCH_LIMIT: if searches_count >= self.SAFE_SEARCH_LIMIT:
searchLimitReached = True searchLimitReached = True
if (res.status_code == 200 and "A legenda não foi encontrada" in res.text): if (res.status_code == 200 and "A legenda não foi encontrada" in res.text):
logger.warning('Legendasdivx.pt :: query {0} return no results for language {1}(for series and season only).'.format(querytext, language_id)) logger.warning(
'Legendasdivx.pt :: query {0} return no results for language {1}(for series and season only).'.format(
querytext, language_id))
continue continue
if res.status_code == 302: # got redirected to login page. if res.status_code == 302: # got redirected to login page.
# seems that our session cookies are no longer valid... clean them from cache # seems that our session cookies are no longer valid... clean them from cache
@ -350,7 +383,7 @@ class LegendasdivxProvider(Provider):
self.login() self.login()
# sleep for a 1 second before another request # sleep for a 1 second before another request
sleep(1) sleep(1)
res = self.session.get(_searchurl.format(query=querytext)) res = self.session.get(search_url, allow_redirects=False)
res.raise_for_status() res.raise_for_status()
if res.status_code == 200 and "<!--pesquisas:" in res.text: if res.status_code == 200 and "<!--pesquisas:" in res.text:
searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text) searches_count_groups = re.search(r'<!--pesquisas: (\d*)-->', res.text)
@ -396,7 +429,7 @@ class LegendasdivxProvider(Provider):
if num_pages > 1: if num_pages > 1:
for num_page in range(2, num_pages + 1): for num_page in range(2, num_pages + 1):
sleep(1) # another 1 sec before requesting... sleep(1) # another 1 sec before requesting...
_search_next = self.searchurl.format(query=querytext) + "&page={0}".format(str(num_page)) _search_next = search_url + "&page={0}".format(str(num_page))
logger.debug("Legendasdivx.pt :: Moving on to next page: %s", _search_next) logger.debug("Legendasdivx.pt :: Moving on to next page: %s", _search_next)
# sleep for a 1 second before another request # sleep for a 1 second before another request
sleep(1) sleep(1)
@ -478,7 +511,8 @@ class LegendasdivxProvider(Provider):
if isinstance(subtitle.video, Episode): if isinstance(subtitle.video, Episode):
if all(key in _guess for key in ('season', 'episode')): if all(key in _guess for key in ('season', 'episode')):
logger.debug("Legendasdivx.pt :: guessing %s", name) logger.debug("Legendasdivx.pt :: guessing %s", name)
logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode) logger.debug("Legendasdivx.pt :: subtitle S%sE%s video S%sE%s", _guess['season'], _guess['episode'],
subtitle.video.season, subtitle.video.episode)
if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']: if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']:
logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping') logger.debug('Legendasdivx.pt :: subtitle does not match video, skipping')

@ -218,7 +218,7 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
try: try:
self.token = r.json()['token'] self.token = r.json()['token']
except (ValueError, JSONDecodeError): except (ValueError, JSONDecodeError, AttributeError):
log_request_response(r) log_request_response(r)
raise ProviderError("Cannot get token from provider login response") raise ProviderError("Cannot get token from provider login response")
else: else:
@ -543,10 +543,6 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
elif status_code == 429: elif status_code == 429:
log_request_response(response) log_request_response(response)
raise TooManyRequests() raise TooManyRequests()
elif status_code == 500:
logger.debug("Server side exception raised while downloading from opensubtitles.com website. They "
"should mitigate this soon.")
return None
elif status_code == 502: elif status_code == 502:
# this one should deal with Bad Gateway issue on their side. # this one should deal with Bad Gateway issue on their side.
raise APIThrottled() raise APIThrottled()

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import from __future__ import absolute_import
from json import JSONDecodeError from requests.exceptions import JSONDecodeError
import logging import logging
import random import random
import re import re
@ -126,7 +126,7 @@ class SubdivxSubtitlesProvider(Provider):
titles = [video.series if episode else video.title] titles = [video.series if episode else video.title]
try: try:
titles.extend(video.alternative_titles) titles.extend(video.alternative_series if episode else video.alternative_titles)
except: except:
pass pass
else: else:
@ -138,6 +138,7 @@ class SubdivxSubtitlesProvider(Provider):
# TODO: cache pack queries (TV SHOW S01). # TODO: cache pack queries (TV SHOW S01).
# Too many redundant server calls. # Too many redundant server calls.
for title in titles: for title in titles:
title = _series_sanitizer(title)
for query in ( for query in (
f"{title} S{video.season:02}E{video.episode:02}", f"{title} S{video.season:02}E{video.episode:02}",
f"{title} S{video.season:02}", f"{title} S{video.season:02}",
@ -297,20 +298,31 @@ def _check_episode(video, title):
) and season_num == video.season ) and season_num == video.season
series_title = _SERIES_RE.sub("", title).strip() series_title = _SERIES_RE.sub("", title).strip()
series_title = _series_sanitizer(series_title)
distance = abs(len(series_title) - len(video.series)) for video_series_title in [video.series] + video.alternative_series:
video_series_title = _series_sanitizer(video_series_title)
distance = abs(len(series_title) - len(video_series_title))
series_matched = distance < 4 and ep_matches series_matched = (distance < 4 or video_series_title in series_title) and ep_matches
logger.debug( logger.debug(
"Series matched? %s [%s -> %s] [title distance: %d]", "Series matched? %s [%s -> %s] [title distance: %d]",
series_matched, series_matched,
video, video_series_title,
title, series_title,
distance, distance,
) )
return series_matched if series_matched:
return True
return False
def _series_sanitizer(title):
title = re.sub(r"\'|\.+", '', title) # remove single quote and dot
title = re.sub(r"\W+", ' ', title) # replace by a space anything other than a letter, digit or underscore
return re.sub(r"([A-Z])\s(?=[A-Z]\b)", '', title).strip() # Marvels Agent of S.H.I.E.L.D
def _check_movie(video, title): def _check_movie(video, title):

@ -0,0 +1,278 @@
# -*- coding: utf-8 -*-
import logging
import os
import time
import io
from zipfile import ZipFile, is_zipfile
from urllib.parse import urljoin
from requests import Session
from babelfish import language_converters
from subzero.language import Language
from subliminal import Episode, Movie
from subliminal.exceptions import ConfigurationError, ProviderError, DownloadLimitExceeded
from subliminal_patch.exceptions import APIThrottled
from .mixins import ProviderRetryMixin
from subliminal_patch.subtitle import Subtitle
from subliminal.subtitle import fix_line_ending
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import guess_matches
from guessit import guessit
logger = logging.getLogger(__name__)
retry_amount = 3
retry_timeout = 5
language_converters.register('subdl = subliminal_patch.converters.subdl:SubdlConverter')
supported_languages = list(language_converters['subdl'].to_subdl.keys())
class SubdlSubtitle(Subtitle):
provider_name = 'subdl'
hash_verifiable = False
hearing_impaired_verifiable = True
def __init__(self, language, forced, hearing_impaired, page_link, download_link, file_id, release_names, uploader,
season=None, episode=None):
super().__init__(language)
language = Language.rebuild(language, hi=hearing_impaired, forced=forced)
self.season = season
self.episode = episode
self.releases = release_names
self.release_info = ', '.join(release_names)
self.language = language
self.forced = forced
self.hearing_impaired = hearing_impaired
self.file_id = file_id
self.page_link = page_link
self.download_link = download_link
self.uploader = uploader
self.matches = None
@property
def id(self):
return self.file_id
def get_matches(self, video):
matches = set()
type_ = "movie" if isinstance(video, Movie) else "episode"
# handle movies and series separately
if isinstance(video, Episode):
# series
matches.add('series')
# season
if video.season == self.season:
matches.add('season')
# episode
if video.episode == self.episode:
matches.add('episode')
# imdb
matches.add('series_imdb_id')
else:
# title
matches.add('title')
# imdb
matches.add('imdb_id')
# other properties
matches |= guess_matches(video, guessit(self.release_info, {"type": type_}))
self.matches = matches
return matches
class SubdlProvider(ProviderRetryMixin, Provider):
"""Subdl Provider"""
server_hostname = 'api.subdl.com'
languages = {Language(*lang) for lang in supported_languages}
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
video_types = (Episode, Movie)
def __init__(self, api_key=None):
if not api_key:
raise ConfigurationError('Api_key must be specified')
self.session = Session()
self.session.headers = {'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")}
self.api_key = api_key
self.video = None
self._started = None
def initialize(self):
self._started = time.time()
def terminate(self):
self.session.close()
def server_url(self):
return f'https://{self.server_hostname}/api/v1/'
def query(self, languages, video):
self.video = video
if isinstance(self.video, Episode):
title = self.video.series
else:
title = self.video.title
imdb_id = None
if isinstance(self.video, Episode) and self.video.series_imdb_id:
imdb_id = self.video.series_imdb_id
elif isinstance(self.video, Movie) and self.video.imdb_id:
imdb_id = self.video.imdb_id
# be sure to remove duplicates using list(set())
langs_list = sorted(list(set([lang.basename.upper() for lang in languages])))
langs = ','.join(langs_list)
logger.debug(f'Searching for those languages: {langs}')
# query the server
if isinstance(self.video, Episode):
res = self.retry(
lambda: self.session.get(self.server_url() + 'subtitles',
params=(('api_key', self.api_key),
('episode_number', self.video.episode),
('film_name', title if not imdb_id else None),
('imdb_id', imdb_id if imdb_id else None),
('languages', langs),
('season_number', self.video.season),
('subs_per_page', 30),
('type', 'tv'),
('comment', 1),
('releases', 1)),
timeout=30),
amount=retry_amount,
retry_timeout=retry_timeout
)
else:
res = self.retry(
lambda: self.session.get(self.server_url() + 'subtitles',
params=(('api_key', self.api_key),
('film_name', title if not imdb_id else None),
('imdb_id', imdb_id if imdb_id else None),
('languages', langs),
('subs_per_page', 30),
('type', 'movie'),
('comment', 1),
('releases', 1)),
timeout=30),
amount=retry_amount,
retry_timeout=retry_timeout
)
if res.status_code == 429:
raise APIThrottled("Too many requests")
elif res.status_code == 403:
raise ConfigurationError("Invalid API key")
elif res.status_code != 200:
res.raise_for_status()
subtitles = []
result = res.json()
if ('success' in result and not result['success']) or ('status' in result and not result['status']):
raise ProviderError(result['error'])
logger.debug(f"Query returned {len(result['subtitles'])} subtitles")
if len(result['subtitles']):
for item in result['subtitles']:
if item.get('episode_from', False) == item.get('episode_end', False): # ignore season packs
subtitle = SubdlSubtitle(
language=Language.fromsubdl(item['language']),
forced=self._is_forced(item),
hearing_impaired=item.get('hi', False) or self._is_hi(item),
page_link=urljoin("https://subdl.com", item.get('subtitlePage', '')),
download_link=item['url'],
file_id=item['name'],
release_names=item.get('releases', []),
uploader=item.get('author', ''),
season=item.get('season', None),
episode=item.get('episode', None),
)
subtitle.get_matches(self.video)
if subtitle.language in languages: # make sure only desired subtitles variants are returned
subtitles.append(subtitle)
return subtitles
@staticmethod
def _is_hi(item):
# Comments include specific mention of removed or non HI
non_hi_tag = ['hi remove', 'non hi', 'nonhi', 'non-hi', 'non-sdh', 'non sdh', 'nonsdh', 'sdh remove']
for tag in non_hi_tag:
if tag in item.get('comment', '').lower():
return False
# Archive filename include _HI_
if '_hi_' in item.get('name', '').lower():
return True
# Comments or release names include some specific strings
hi_keys = [item.get('comment', '').lower(), [x.lower() for x in item.get('releases', [])]]
hi_tag = ['_hi_', ' hi ', '.hi.', 'hi ', ' hi', 'sdh', '𝓢𝓓𝓗']
for key in hi_keys:
if any(x in key for x in hi_tag):
return True
# nothing match so we consider it as non-HI
return False
@staticmethod
def _is_forced(item):
# Comments include specific mention of forced subtitles
forced_tags = ['forced', 'foreign']
for tag in forced_tags:
if tag in item.get('comment', '').lower():
return True
# nothing match so we consider it as normal subtitles
return False
def list_subtitles(self, video, languages):
return self.query(languages, video)
def download_subtitle(self, subtitle):
logger.debug('Downloading subtitle %r', subtitle)
download_link = urljoin("https://dl.subdl.com", subtitle.download_link)
r = self.retry(
lambda: self.session.get(download_link, timeout=30),
amount=retry_amount,
retry_timeout=retry_timeout
)
if r.status_code == 429:
raise DownloadLimitExceeded("Daily download limit exceeded")
elif r.status_code == 403:
raise ConfigurationError("Invalid API key")
elif r.status_code != 200:
r.raise_for_status()
if not r:
logger.error(f'Could not download subtitle from {download_link}')
subtitle.content = None
return
else:
archive_stream = io.BytesIO(r.content)
if is_zipfile(archive_stream):
archive = ZipFile(archive_stream)
for name in archive.namelist():
# TODO when possible, deal with season pack / multiple files archive
subtitle_content = archive.read(name)
subtitle.content = fix_line_ending(subtitle_content)
return
else:
logger.error(f'Could not unzip subtitle from {download_link}')
subtitle.content = None
return

@ -1,366 +0,0 @@
# coding=utf-8
import io
import logging
import os
import time
import traceback
from urllib import parse
import requests
import inflect
import re
import json
import html
import zipfile
import rarfile
from babelfish import language_converters
from guessit import guessit
from dogpile.cache.api import NO_VALUE
from requests.exceptions import RequestException
from subliminal import Episode, ProviderError
from subliminal.video import Episode, Movie
from subliminal.exceptions import ConfigurationError, ServiceUnavailable
from subliminal.utils import sanitize_release_group
from subliminal.cache import region
from subliminal_patch.http import RetryingCFSession
from subliminal_patch.providers import Provider, reinitialize_on_error
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.converters.subscene import language_ids, supported_languages
from subscene_api.subscene import search, SearchTypes, Subtitle as APISubtitle, SITE_DOMAIN
from subzero.language import Language
p = inflect.engine()
language_converters.register('subscene = subliminal_patch.converters.subscene:SubsceneConverter')
logger = logging.getLogger(__name__)
class SubsceneSubtitle(Subtitle):
provider_name = 'subscene'
hearing_impaired_verifiable = True
is_pack = False
page_link = None
season = None
episode = None
releases = None
def __init__(self, language, release_info, hearing_impaired=False, page_link=None, encoding=None, mods=None,
asked_for_release_group=None, asked_for_episode=None):
super(SubsceneSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link,
encoding=encoding, mods=mods)
self.release_info = self.releases = release_info
self.asked_for_episode = asked_for_episode
self.asked_for_release_group = asked_for_release_group
self.season = None
self.episode = None
@classmethod
def from_api(cls, s):
return cls(Language.fromsubscene(s.language.strip()), s.title, hearing_impaired=s.hearing_impaired,
page_link=s.url)
@property
def id(self):
return self.page_link
@property
def numeric_id(self):
return self.page_link.split("/")[-1]
def get_matches(self, video):
matches = set()
if self.release_info.strip() == get_video_filename(video):
logger.debug("Using hash match as the release name is the same")
matches |= {"hash"}
# episode
if isinstance(video, Episode):
guess = guessit(self.release_info, {'type': 'episode'})
self.season = guess.get("season")
self.episode = guess.get("episode")
matches |= guess_matches(video, guess)
if "season" in matches and "episode" not in guess:
# pack
matches.add("episode")
logger.debug("%r is a pack", self)
self.is_pack = True
if "title" in guess and "year" in matches:
if video.series in guess['title']:
matches.add("series")
# movie
else:
guess = guessit(self.release_info, {'type': 'movie'})
matches |= guess_matches(video, guess)
if video.release_group and "release_group" not in matches and "release_group" in guess:
if sanitize_release_group(video.release_group) in sanitize_release_group(guess["release_group"]):
matches.add("release_group")
self.matches = matches
return matches
def get_download_link(self, session):
return APISubtitle.get_zipped_url(self.page_link, session)
def get_video_filename(video):
return os.path.splitext(os.path.basename(video.original_name))[0]
class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin):
"""
This currently only searches for the filename on SubScene. It doesn't open every found subtitle page to avoid
massive hammering, thus it can't determine whether a subtitle is only-foreign or not.
"""
subtitle_class = SubsceneSubtitle
languages = supported_languages
languages.update(set(Language.rebuild(l, forced=True) for l in languages))
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
video_types = (Episode, Movie)
session = None
skip_wrong_fps = False
hearing_impaired_verifiable = True
only_foreign = False
username = None
password = None
search_throttle = 8 # seconds
def __init__(self, only_foreign=False, username=None, password=None):
if not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.only_foreign = only_foreign
self.username = username
self.password = password
def initialize(self):
logger.info("Creating session")
self.session = RetryingCFSession()
prev_cookies = region.get("subscene_cookies2")
if prev_cookies != NO_VALUE:
logger.debug("Re-using old subscene cookies: %r", prev_cookies)
self.session.cookies.update(prev_cookies)
else:
logger.debug("Logging in")
self.login()
def login(self):
r = self.session.get("https://subscene.com/account/login")
if "Server Error" in r.text:
logger.error("Login unavailable; Maintenance?")
raise ServiceUnavailable("Login unavailable; Maintenance?")
match = re.search(r"<script id='modelJson' type='application/json'>\s*(.+)\s*</script>", r.text)
if match:
h = html
data = json.loads(h.unescape(match.group(1)))
login_url = parse.urljoin(data["siteUrl"], data["loginUrl"])
time.sleep(1.0)
r = self.session.post(login_url,
{
"username": self.username,
"password": self.password,
data["antiForgery"]["name"]: data["antiForgery"]["value"]
})
pep_content = re.search(r"<form method=\"post\" action=\"https://subscene\.com/\">"
r".+name=\"id_token\".+?value=\"(?P<id_token>.+?)\".*?"
r"access_token\".+?value=\"(?P<access_token>.+?)\".+?"
r"token_type.+?value=\"(?P<token_type>.+?)\".+?"
r"expires_in.+?value=\"(?P<expires_in>.+?)\".+?"
r"scope.+?value=\"(?P<scope>.+?)\".+?"
r"state.+?value=\"(?P<state>.+?)\".+?"
r"session_state.+?value=\"(?P<session_state>.+?)\"",
r.text, re.MULTILINE | re.DOTALL)
if pep_content:
r = self.session.post(SITE_DOMAIN, pep_content.groupdict())
try:
r.raise_for_status()
except Exception:
raise ProviderError("Something went wrong when trying to log in: %s", traceback.format_exc())
else:
cj = self.session.cookies.copy()
store_cks = ("scene", "idsrv", "idsrv.xsrf", "idsvr.clients", "idsvr.session", "idsvr.username")
for cn in self.session.cookies.keys():
if cn not in store_cks:
del cj[cn]
logger.debug("Storing cookies: %r", cj)
region.set("subscene_cookies2", cj)
return
raise ProviderError("Something went wrong when trying to log in #1")
def terminate(self):
logger.info("Closing session")
self.session.close()
def _create_filters(self, languages):
self.filters = dict(HearingImpaired="2")
acc_filters = self.filters.copy()
if self.only_foreign:
self.filters["ForeignOnly"] = "True"
acc_filters["ForeignOnly"] = self.filters["ForeignOnly"].lower()
logger.info("Only searching for foreign/forced subtitles")
selected_ids = []
for l in languages:
lid = language_ids.get(l.basename, language_ids.get(l.alpha3, None))
if lid:
selected_ids.append(str(lid))
acc_filters["SelectedIds"] = selected_ids
self.filters["LanguageFilter"] = ",".join(acc_filters["SelectedIds"])
last_filters = region.get("subscene_filters")
if last_filters != acc_filters:
region.set("subscene_filters", acc_filters)
logger.debug("Setting account filters to %r", acc_filters)
self.session.post("https://u.subscene.com/filter", acc_filters, allow_redirects=False)
logger.debug("Filter created: '%s'" % self.filters)
def _enable_filters(self):
self.session.cookies.update(self.filters)
logger.debug("Filters applied")
def list_subtitles(self, video, languages):
if not video.original_name:
logger.info("Skipping search because we don't know the original release name")
return []
self._create_filters(languages)
self._enable_filters()
if isinstance(video, Episode):
international_titles = list(set([video.series] + video.alternative_series[:1]))
subtitles = [s for s in self.query(video, international_titles) if s.language in languages]
if not len(subtitles):
us_titles = [x + ' (US)' for x in international_titles]
subtitles = [s for s in self.query(video, us_titles) if s.language in languages]
return subtitles
else:
titles = list(set([video.title] + video.alternative_titles[:1]))
return [s for s in self.query(video, titles) if s.language in languages]
def download_subtitle(self, subtitle):
if subtitle.pack_data:
logger.info("Using previously downloaded pack data")
if rarfile.is_rarfile(io.BytesIO(subtitle.pack_data)):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(io.BytesIO(subtitle.pack_data))
elif zipfile.is_zipfile(io.BytesIO(subtitle.pack_data)):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(io.BytesIO(subtitle.pack_data))
else:
logger.error('Unsupported compressed format')
return
subtitle.pack_data = None
try:
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
return
except ProviderError:
pass
# open the archive
r = self.session.get(subtitle.get_download_link(self.session), timeout=10)
r.raise_for_status()
archive_stream = io.BytesIO(r.content)
if rarfile.is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(archive_stream)
else:
logger.error('Unsupported compressed format')
return
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
# store archive as pack_data for later caching
subtitle.pack_data = r.content
def parse_results(self, video, film):
subtitles = []
for s in film.subtitles:
try:
subtitle = SubsceneSubtitle.from_api(s)
except NotImplementedError as e:
logger.info(e)
continue
subtitle.asked_for_release_group = video.release_group
if isinstance(video, Episode):
subtitle.asked_for_episode = video.episode
if self.only_foreign:
subtitle.language = Language.rebuild(subtitle.language, forced=True)
# set subtitle language to hi if it's hearing_impaired
if subtitle.hearing_impaired:
subtitle.language = Language.rebuild(subtitle.language, hi=True)
subtitles.append(subtitle)
logger.debug('Found subtitle %r', subtitle)
return subtitles
def do_search(self, *args, **kwargs):
try:
return search(*args, **kwargs)
except requests.HTTPError:
region.delete("subscene_cookies2")
raise
@reinitialize_on_error((RequestException,), attempts=1)
def query(self, video, titles):
subtitles = []
if isinstance(video, Episode):
more_than_one = len(titles) > 1
for series in titles:
term = u"%s - %s Season" % (series, p.number_to_words("%sth" % video.season).capitalize())
logger.debug('Searching with series and season: %s', term)
film = self.do_search(term, session=self.session, release=False, throttle=self.search_throttle,
limit_to=SearchTypes.TvSerie)
if not film and video.season == 1:
logger.debug('Searching with series name: %s', series)
film = self.do_search(series, session=self.session, release=False, throttle=self.search_throttle,
limit_to=SearchTypes.TvSerie)
if film and film.subtitles:
logger.debug('Searching found: %s', len(film.subtitles))
subtitles += self.parse_results(video, film)
else:
logger.debug('No results found')
if more_than_one:
time.sleep(self.search_throttle)
else:
more_than_one = len(titles) > 1
for title in titles:
logger.debug('Searching for movie results: %r', title)
film = self.do_search(title, year=video.year, session=self.session, limit_to=None, release=False,
throttle=self.search_throttle)
if film and film.subtitles:
subtitles += self.parse_results(video, film)
if more_than_one:
time.sleep(self.search_throttle)
logger.info("%s subtitles found" % len(subtitles))
return subtitles

@ -1,410 +0,0 @@
# -*- coding: utf-8 -*-
from difflib import SequenceMatcher
import functools
import logging
import re
import time
import urllib.parse
from bs4 import BeautifulSoup as bso
import cloudscraper
from guessit import guessit
from requests import Session
from requests.exceptions import HTTPError
from subliminal.exceptions import ProviderError
from subliminal_patch.core import Episode
from subliminal_patch.core import Movie
from subliminal_patch.exceptions import APIThrottled
from subliminal_patch.providers import Provider
from subliminal_patch.providers.utils import get_archive_from_bytes
from subliminal_patch.providers.utils import get_subtitle_from_archive
from subliminal_patch.providers.utils import update_matches
from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
logger = logging.getLogger(__name__)
class SubsceneSubtitle(Subtitle):
provider_name = "subscene_cloudscraper"
hash_verifiable = False
def __init__(self, language, page_link, release_info, episode_number=None):
super().__init__(language, page_link=page_link)
self.release_info = release_info
self.episode_number = episode_number
self.episode_title = None
self._matches = set(
("title", "year")
if episode_number is None
else ("title", "series", "year", "season", "episode")
)
def get_matches(self, video):
update_matches(self._matches, video, self.release_info)
return self._matches
@property
def id(self):
return self.page_link
_BASE_URL = "https://subscene.com"
# TODO: add more seasons and languages
_SEASONS = (
"First",
"Second",
"Third",
"Fourth",
"Fifth",
"Sixth",
"Seventh",
"Eighth",
"Ninth",
"Tenth",
"Eleventh",
"Twelfth",
"Thirdteenth",
"Fourthteenth",
"Fifteenth",
"Sixteenth",
"Seventeenth",
"Eightheenth",
"Nineteenth",
"Tweentieth",
)
_LANGUAGE_MAP = {
"english": "eng",
"farsi_persian": "per",
"arabic": "ara",
"spanish": "spa",
"portuguese": "por",
"italian": "ita",
"dutch": "dut",
"hebrew": "heb",
"indonesian": "ind",
"danish": "dan",
"norwegian": "nor",
"bengali": "ben",
"bulgarian": "bul",
"croatian": "hrv",
"swedish": "swe",
"vietnamese": "vie",
"czech": "cze",
"finnish": "fin",
"french": "fre",
"german": "ger",
"greek": "gre",
"hungarian": "hun",
"icelandic": "ice",
"japanese": "jpn",
"macedonian": "mac",
"malay": "may",
"polish": "pol",
"romanian": "rum",
"russian": "rus",
"serbian": "srp",
"thai": "tha",
"turkish": "tur",
}
class SubsceneProvider(Provider):
provider_name = "subscene_cloudscraper"
_movie_title_regex = re.compile(r"^(.+?)( \((\d{4})\))?$")
_tv_show_title_regex = re.compile(
r"^(.+?) [-\(]\s?(.*?) (season|series)\)?( \((\d{4})\))?$"
)
_supported_languages = {}
_supported_languages["brazillian-portuguese"] = Language("por", "BR")
for key, val in _LANGUAGE_MAP.items():
_supported_languages[key] = Language.fromalpha3b(val)
_supported_languages_reversed = {
val: key for key, val in _supported_languages.items()
}
languages = set(_supported_languages.values())
video_types = (Episode, Movie)
subtitle_class = SubsceneSubtitle
def initialize(self):
pass
def terminate(self):
pass
def _scraper_call(self, url, retry=7, method="GET", sleep=5, **kwargs):
last_exc = None
for n in range(retry):
# Creating an instance for every try in order to avoid dropped connections.
# This could probably be improved!
scraper = cloudscraper.create_scraper()
if method == "GET":
req = scraper.get(url, **kwargs)
elif method == "POST":
req = scraper.post(url, **kwargs)
else:
raise NotImplementedError(f"{method} not allowed")
try:
req.raise_for_status()
except HTTPError as error:
logger.debug(
"'%s' returned. Trying again [%d] in %s", error, n + 1, sleep
)
last_exc = error
time.sleep(sleep)
else:
return req
raise ProviderError("403 Retry count exceeded") from last_exc
def _gen_results(self, query):
url = (
f"{_BASE_URL}/subtitles/searchbytitle?query={urllib.parse.quote(query)}&l="
)
result = self._scraper_call(url, method="POST")
soup = bso(result.content, "html.parser")
for title in soup.select("li div[class='title'] a"):
yield title
def _search_movie(self, title, year):
title = title.lower()
year = str(year)
found_movie = None
results = []
for result in self._gen_results(title):
text = result.text.lower()
match = self._movie_title_regex.match(text)
if not match:
continue
match_title = match.group(1)
match_year = match.group(3)
if year == match_year:
results.append(
{
"href": result.get("href"),
"similarity": SequenceMatcher(None, title, match_title).ratio(),
}
)
if results:
results.sort(key=lambda x: x["similarity"], reverse=True)
found_movie = results[0]["href"]
logger.debug("Movie found: %s", results[0])
return found_movie
def _search_tv_show_season(self, title, season, year=None):
try:
season_str = _SEASONS[season - 1].lower()
except IndexError:
logger.debug("Season number not supported: %s", season)
return None
found_tv_show_season = None
results = []
for result in self._gen_results(title):
text = result.text.lower()
match = self._tv_show_title_regex.match(text)
if not match:
logger.debug("Series title not matched: %s", text)
continue
else:
logger.debug("Series title matched: %s", text)
match_title = match.group(1)
match_season = match.group(2)
# Match "complete series" titles as they usually contain season packs
if season_str == match_season or "complete" in match_season:
plus = 0.1 if year and str(year) in text else 0
results.append(
{
"href": result.get("href"),
"similarity": SequenceMatcher(None, title, match_title).ratio()
+ plus,
}
)
if results:
results.sort(key=lambda x: x["similarity"], reverse=True)
found_tv_show_season = results[0]["href"]
logger.debug("TV Show season found: %s", results[0])
return found_tv_show_season
def _find_movie_subtitles(self, path, language):
soup = self._get_subtitle_page_soup(path, language)
subtitles = []
for item in soup.select("tr"):
subtitle = _get_subtitle_from_item(item, language)
if subtitle is None:
continue
logger.debug("Found subtitle: %s", subtitle)
subtitles.append(subtitle)
return subtitles
def _find_episode_subtitles(
self, path, season, episode, language, episode_title=None
):
soup = self._get_subtitle_page_soup(path, language)
subtitles = []
for item in soup.select("tr"):
valid_item = None
clean_text = " ".join(item.text.split())
if not clean_text:
continue
# It will return list values
guess = _memoized_episode_guess(clean_text)
if "season" not in guess:
if "complete series" in clean_text.lower():
logger.debug("Complete series pack found: %s", clean_text)
guess["season"] = [season]
else:
logger.debug("Nothing guessed from release: %s", clean_text)
continue
if season in guess["season"] and episode in guess.get("episode", []):
logger.debug("Episode match found: %s - %s", guess, clean_text)
valid_item = item
elif season in guess["season"] and not "episode" in guess:
logger.debug("Season pack found: %s", clean_text)
valid_item = item
if valid_item is None:
continue
subtitle = _get_subtitle_from_item(item, language, episode)
if subtitle is None:
continue
subtitle.episode_title = episode_title
logger.debug("Found subtitle: %s", subtitle)
subtitles.append(subtitle)
return subtitles
def _get_subtitle_page_soup(self, path, language):
language_path = self._supported_languages_reversed[language]
result = self._scraper_call(f"{_BASE_URL}{path}/{language_path}")
return bso(result.content, "html.parser")
def list_subtitles(self, video, languages):
is_episode = isinstance(video, Episode)
if is_episode:
result = self._search_tv_show_season(video.series, video.season, video.year)
else:
result = self._search_movie(video.title, video.year)
if result is None:
logger.debug("No results")
return []
subtitles = []
for language in languages:
if is_episode:
subtitles.extend(
self._find_episode_subtitles(
result, video.season, video.episode, language, video.title
)
)
else:
subtitles.extend(self._find_movie_subtitles(result, language))
return subtitles
def download_subtitle(self, subtitle):
# TODO: add MustGetBlacklisted support
result = self._scraper_call(subtitle.page_link)
soup = bso(result.content, "html.parser")
try:
download_url = _BASE_URL + str(
soup.select_one("a[id='downloadButton']")["href"] # type: ignore
)
except (AttributeError, KeyError, TypeError):
raise APIThrottled(f"Couldn't get download url from {subtitle.page_link}")
downloaded = self._scraper_call(download_url)
archive = get_archive_from_bytes(downloaded.content)
if archive is None:
raise APIThrottled(f"Invalid archive: {subtitle.page_link}")
subtitle.content = get_subtitle_from_archive(
archive,
episode=subtitle.episode_number,
episode_title=subtitle.episode_title,
)
@functools.lru_cache(2048)
def _memoized_episode_guess(content):
# Use include to save time from unnecessary checks
return guessit(
content,
{
"type": "episode",
# Add codec keys to avoid matching x264, 5.1, etc as episode info
"includes": ["season", "episode", "video_codec", "audio_codec"],
"enforce_list": True,
},
)
def _get_subtitle_from_item(item, language, episode_number=None):
release_infos = []
try:
release_infos.append(item.find("td", {"class": "a6"}).text.strip())
except (AttributeError, KeyError):
pass
try:
release_infos.append(
item.find("td", {"class": "a1"}).find_all("span")[-1].text.strip()
)
except (AttributeError, KeyError):
pass
release_info = "".join(r_info for r_info in release_infos if r_info)
try:
path = item.find("td", {"class": "a1"}).find("a")["href"]
except (AttributeError, KeyError):
logger.debug("Couldn't get path: %s", item)
return None
return SubsceneSubtitle(language, _BASE_URL + path, release_info, episode_number)

@ -110,7 +110,7 @@ class SubsSabBzSubtitle(Subtitle):
guess_filename = guessit(self.filename, video.hints) guess_filename = guessit(self.filename, video.hints)
matches |= guess_matches(video, guess_filename) matches |= guess_matches(video, guess_filename)
if isinstance(video, Movie) and (self.num_cds > 1 or 'cd' in guess_filename): if isinstance(video, Movie) and ((isinstance(self.num_cds, int) and self.num_cds > 1) or 'cd' in guess_filename):
# reduce score of subtitles for multi-disc movie releases # reduce score of subtitles for multi-disc movie releases
return set() return set()

@ -108,7 +108,7 @@ class SubsUnacsSubtitle(Subtitle):
guess_filename = guessit(self.filename, video.hints) guess_filename = guessit(self.filename, video.hints)
matches |= guess_matches(video, guess_filename) matches |= guess_matches(video, guess_filename)
if isinstance(video, Movie) and (self.num_cds > 1 or 'cd' in guess_filename): if isinstance(video, Movie) and ((isinstance(self.num_cds, int) and self.num_cds > 1) or 'cd' in guess_filename):
# reduce score of subtitles for multi-disc movie releases # reduce score of subtitles for multi-disc movie releases
return set() return set()

@ -169,7 +169,7 @@ def whisper_get_language_reverse(alpha3):
lan = whisper_get_language(wl, whisper_languages[wl]) lan = whisper_get_language(wl, whisper_languages[wl])
if lan.alpha3 == alpha3: if lan.alpha3 == alpha3:
return wl return wl
raise ValueError return None
def language_from_alpha3(lang): def language_from_alpha3(lang):
name = Language(lang).name name = Language(lang).name
@ -326,11 +326,23 @@ class WhisperAIProvider(Provider):
else: else:
output_language = "eng" output_language = "eng"
input_language = whisper_get_language_reverse(subtitle.audio_language)
if input_language is None:
if output_language == "eng":
# guess that audio track is mislabelled English and let whisper try to transcribe it
input_language = "en"
subtitle.task = "transcribe"
logger.info(f"Whisper treating unsupported audio track language: '{subtitle.audio_language}' as English")
else:
logger.info(f"Whisper cannot process {subtitle.video.original_path} because of unsupported audio track language: '{subtitle.audio_language}'")
subtitle.content = None
return
logger.info(f'Starting WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} for {subtitle.video.original_path}') logger.info(f'Starting WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} for {subtitle.video.original_path}')
startTime = time.time() startTime = time.time()
r = self.session.post(f"{self.endpoint}/asr", r = self.session.post(f"{self.endpoint}/asr",
params={'task': subtitle.task, 'language': whisper_get_language_reverse(subtitle.audio_language), 'output': 'srt', 'encode': 'false'}, params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false'},
files={'audio_file': out}, files={'audio_file': out},
timeout=(self.response, self.timeout)) timeout=(self.response, self.timeout))

@ -13,7 +13,7 @@ import pysrt
import pysubs2 import pysubs2
from bs4 import UnicodeDammit from bs4 import UnicodeDammit
from pysubs2 import SSAStyle from pysubs2 import SSAStyle
from pysubs2.subrip import parse_tags, MAX_REPRESENTABLE_TIME from pysubs2.formats.subrip import parse_tags, MAX_REPRESENTABLE_TIME
from pysubs2.time import ms_to_times from pysubs2.time import ms_to_times
from subzero.modification import SubtitleModifications from subzero.modification import SubtitleModifications
from subzero.language import Language from subzero.language import Language
@ -62,7 +62,7 @@ class Subtitle(Subtitle_):
_guessed_encoding = None _guessed_encoding = None
_is_valid = False _is_valid = False
use_original_format = False use_original_format = False
format = "srt" # default format is srt # format = "srt" # default format is srt
def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None, mods=None, original_format=False): def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None, mods=None, original_format=False):
# set subtitle language to hi if it's hearing_impaired # set subtitle language to hi if it's hearing_impaired
@ -74,6 +74,21 @@ class Subtitle(Subtitle_):
self.mods = mods self.mods = mods
self._is_valid = False self._is_valid = False
self.use_original_format = original_format self.use_original_format = original_format
self._og_format = None
@property
def format(self):
if self.use_original_format and self._og_format is not None:
logger.debug("Original format requested [%s]", self._og_format)
return self._og_format
logger.debug("Will assume srt format")
return "srt"
# Compatibility
@format.setter
def format(self, val):
self._og_format = val
def __repr__(self): def __repr__(self):
r_info = str(self.release_info or "").replace("\n", " | ").strip() r_info = str(self.release_info or "").replace("\n", " | ").strip()
@ -292,10 +307,12 @@ class Subtitle(Subtitle_):
logger.info("Got FPS from MicroDVD subtitle: %s", subs.fps) logger.info("Got FPS from MicroDVD subtitle: %s", subs.fps)
else: else:
logger.info("Got format: %s", subs.format) logger.info("Got format: %s", subs.format)
if self.use_original_format: self._og_format = subs.format
self.format = subs.format
self._is_valid = True self._is_valid = True
logger.debug("Using original format") # if self.use_original_format:
# self.format = subs.format
# self._is_valid = True
# logger.debug("Using original format")
return True return True
except pysubs2.UnknownFPSError: except pysubs2.UnknownFPSError:
@ -340,7 +357,7 @@ class Subtitle(Subtitle_):
fragment = fragment.replace(r"\n", u"\n") fragment = fragment.replace(r"\n", u"\n")
fragment = fragment.replace(r"\N", u"\n") fragment = fragment.replace(r"\N", u"\n")
if sty.drawing: if sty.drawing:
raise pysubs2.ContentNotUsable return None
if format == "srt": if format == "srt":
if sty.italic: if sty.italic:
@ -373,9 +390,10 @@ class Subtitle(Subtitle_):
for i, line in enumerate(visible_lines, 1): for i, line in enumerate(visible_lines, 1):
start = ms_to_timestamp(line.start, mssep=mssep) start = ms_to_timestamp(line.start, mssep=mssep)
end = ms_to_timestamp(line.end, mssep=mssep) end = ms_to_timestamp(line.end, mssep=mssep)
try:
text = prepare_text(line.text, sub.styles.get(line.style, SSAStyle.DEFAULT_STYLE)) text = prepare_text(line.text, sub.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
except pysubs2.ContentNotUsable:
if text is None:
continue continue
out.append(u"%d\n" % i) out.append(u"%d\n" % i)

@ -33,6 +33,8 @@ class Video(Video_):
edition=None, edition=None,
other=None, other=None,
info_url=None, info_url=None,
series_anidb_id=None,
series_anidb_episode_id=None,
**kwargs **kwargs
): ):
super(Video, self).__init__( super(Video, self).__init__(
@ -57,3 +59,5 @@ class Video(Video_):
self.original_path = name self.original_path = name
self.other = other self.other = other
self.info_url = info_url self.info_url = info_url
self.series_anidb_series_id = series_anidb_id,
self.series_anidb_episode_id = series_anidb_episode_id,

@ -1,299 +0,0 @@
# -*- coding: utf-8 -*-
# vim: fenc=utf-8 ts=4 et sw=4 sts=4
# This file is part of Subscene-API.
#
# Subscene-API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Subscene-API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python wrapper for Subscene subtitle database.
since Subscene doesn't provide an official API, I wrote
this script that does the job by parsing the website"s pages.
"""
# imports
import re
import enum
import sys
import requests
import time
import logging
is_PY2 = sys.version_info[0] < 3
if is_PY2:
from contextlib2 import suppress
from urllib2 import Request, urlopen
else:
from contextlib import suppress
from urllib.request import Request, urlopen
from dogpile.cache.api import NO_VALUE
from subliminal.cache import region
from bs4 import BeautifulSoup, NavigableString
logger = logging.getLogger(__name__)
# constants
HEADERS = {
}
SITE_DOMAIN = "https://subscene.com"
DEFAULT_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWeb"\
"Kit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
ENDPOINT_RE = re.compile(r'(?uis)<form.+?action="/subtitles/(.+)">.*?<input type="text"')
class NewEndpoint(Exception):
pass
# utils
def soup_for(url, data=None, session=None, user_agent=DEFAULT_USER_AGENT):
url = re.sub("\s", "+", url)
if not session:
r = Request(url, data=None, headers=dict(HEADERS, **{"User-Agent": user_agent}))
html = urlopen(r).read().decode("utf-8")
else:
ret = session.post(url, data=data)
ret.raise_for_status()
html = ret.text
return BeautifulSoup(html, "html.parser")
class AttrDict(object):
def __init__(self, *attrs):
self._attrs = attrs
for attr in attrs:
setattr(self, attr, "")
def to_dict(self):
return {k: getattr(self, k) for k in self._attrs}
# models
@enum.unique
class SearchTypes(enum.Enum):
Exact = 1
TvSerie = 2
Popular = 3
Close = 4
SectionsParts = {
SearchTypes.Exact: "Exact",
SearchTypes.TvSerie: "TV-Series",
SearchTypes.Popular: "Popular",
SearchTypes.Close: "Close"
}
class Subtitle(object):
def __init__(self, title, url, language, owner_username, owner_url,
description, hearing_impaired):
self.title = title
self.url = url
self.language = language
self.owner_username = owner_username
self.owner_url = owner_url
self.description = description
self.hearing_impaired = hearing_impaired
self._zipped_url = None
def __str__(self):
return self.title
@classmethod
def from_rows(cls, rows):
subtitles = []
for row in rows:
if row.td.a is not None and row.td.get("class", ["lazy"])[0] != "empty":
subtitles.append(cls.from_row(row))
return subtitles
@classmethod
def from_row(cls, row):
attrs = AttrDict("title", "url", "language", "owner_username",
"owner_url", "description", "hearing_impaired")
with suppress(Exception):
attrs.title = row.find("td", "a1").a.find_all("span")[1].text \
.strip()
with suppress(Exception):
attrs.url = SITE_DOMAIN + row.find("td", "a1").a.get("href")
with suppress(Exception):
attrs.language = row.find("td", "a1").a.find_all("span")[0].text \
.strip()
with suppress(Exception):
attrs.owner_username = row.find("td", "a5").a.text.strip()
with suppress(Exception):
attrs.owner_page = SITE_DOMAIN + row.find("td", "a5").a \
.get("href").strip()
with suppress(Exception):
attrs.description = row.find("td", "a6").div.text.strip()
with suppress(Exception):
attrs.hearing_impaired = bool(row.find("td", "a41"))
return cls(**attrs.to_dict())
@classmethod
def get_zipped_url(cls, url, session=None):
soup = soup_for(url, session=session)
return SITE_DOMAIN + soup.find("div", "download").a.get("href")
@property
def zipped_url(self):
if self._zipped_url:
return self._zipped_url
self._zipped_url = Subtitle.get_zipped_url(self.url)
return self._zipped_url
class Film(object):
def __init__(self, title, year=None, imdb=None, cover=None,
subtitles=None):
self.title = title
self.year = year
self.imdb = imdb
self.cover = cover
self.subtitles = subtitles
def __str__(self):
return self.title
@classmethod
def from_url(cls, url, session=None):
soup = soup_for(url, session=session)
content = soup.find("div", "subtitles")
header = content.find("div", "box clearfix")
cover = None
try:
cover = header.find("div", "poster").img.get("src")
except AttributeError:
pass
title = header.find("div", "header").h2.text[:-12].strip()
imdb = header.find("div", "header").h2.find("a", "imdb").get("href")
year = header.find("div", "header").ul.li.text
year = int(re.findall(r"[0-9]+", year)[0])
rows = content.find("table").tbody.find_all("tr")
subtitles = Subtitle.from_rows(rows)
return cls(title, year, imdb, cover, subtitles)
# functions
def section_exists(soup, section):
tag_part = SectionsParts[section]
try:
headers = soup.find("div", "search-result").find_all("h2")
except AttributeError:
return False
for header in headers:
if tag_part in header.text:
return True
return False
def get_first_film(soup, section, year=None, session=None):
tag_part = SectionsParts[section]
tag = None
headers = soup.find("div", "search-result").find_all("h2")
for header in headers:
if tag_part in header.text:
tag = header
break
if not tag:
return
url = None
url = SITE_DOMAIN + tag.findNext("ul").find("li").div.a.get("href")
for t in tag.findNext("ul").findAll("li"):
if isinstance(t, NavigableString) or not t.div:
continue
if str(year) in t.div.a.string:
url = SITE_DOMAIN + t.div.a.get("href")
break
return Film.from_url(url, session=session)
def find_endpoint(session, content=None):
endpoint = region.get("subscene_endpoint2")
if endpoint is NO_VALUE:
if not content:
content = session.get(SITE_DOMAIN).text
m = ENDPOINT_RE.search(content)
if m:
endpoint = m.group(1).strip()
logger.debug("Switching main endpoint to %s", endpoint)
region.set("subscene_endpoint2", endpoint)
return endpoint
def search(term, release=True, session=None, year=None, limit_to=SearchTypes.Exact, throttle=0):
# note to subscene: if you actually start to randomize the endpoint, we'll have to query your server even more
if release:
endpoint = "release"
else:
endpoint = find_endpoint(session)
time.sleep(throttle)
if not endpoint:
logger.error("Couldn't find endpoint, exiting")
return
soup = soup_for("%s/subtitles/%s" % (SITE_DOMAIN, endpoint), data={"query": term},
session=session)
if soup:
if "Subtitle search by" in str(soup):
rows = soup.find("table").tbody.find_all("tr")
subtitles = Subtitle.from_rows(rows)
return Film(term, subtitles=subtitles)
for junk, search_type in SearchTypes.__members__.items():
if section_exists(soup, search_type):
return get_first_film(soup, search_type, year=year, session=session)
if limit_to == search_type:
return

@ -2,6 +2,13 @@
"rules": { "rules": {
"no-console": "error", "no-console": "error",
"camelcase": "warn", "camelcase": "warn",
"no-restricted-imports": [
"error",
{
"patterns": ["..*"]
}
],
"simple-import-sort/imports": "error",
"@typescript-eslint/explicit-module-boundary-types": "off", "@typescript-eslint/explicit-module-boundary-types": "off",
"@typescript-eslint/no-empty-function": "warn", "@typescript-eslint/no-empty-function": "warn",
"@typescript-eslint/no-empty-interface": "off", "@typescript-eslint/no-empty-interface": "off",
@ -13,7 +20,7 @@
"eslint:recommended", "eslint:recommended",
"plugin:@typescript-eslint/recommended" "plugin:@typescript-eslint/recommended"
], ],
"plugins": ["testing-library"], "plugins": ["testing-library", "simple-import-sort"],
"overrides": [ "overrides": [
{ {
"files": [ "files": [
@ -21,6 +28,43 @@
"**/?(*.)+(spec|test).[jt]s?(x)" "**/?(*.)+(spec|test).[jt]s?(x)"
], ],
"extends": ["plugin:testing-library/react"] "extends": ["plugin:testing-library/react"]
},
{
"files": ["*.ts", "*.tsx"],
"rules": {
"simple-import-sort/imports": [
"error",
{
"groups": [
[
// React Packages
"^react",
// Mantine Packages
"^@mantine/",
// Vendor Packages
"^(\\w|@\\w)",
// Side Effect Imports
"^\\u0000",
// Internal Packages
"^@/\\w",
// Parent Imports
"^\\.\\.(?!/?$)",
"^\\.\\./?$",
// Relative Imports
"^\\./(?=.*/)(?!/?$)",
"^\\.(?!/?$)",
"^\\./?$",
// Style Imports
"^.+\\.?(css)$"
]
]
} }
] ]
} }
}
],
"parserOptions": {
"sourceType": "module",
"ecmaVersion": "latest"
}
}

@ -1,7 +1,7 @@
node_modules
dist
*.local *.local
*.tsbuildinfo
build build
coverage coverage
dev-dist
*.tsbuildinfo dist
node_modules

@ -2,9 +2,12 @@
## Dependencies ## Dependencies
- [Node.js](https://nodejs.org/) - Either [Node.js](https://nodejs.org/) installed manually or using [Node Version Manager](https://github.com/nvm-sh/nvm)
- npm (included in Node.js) - npm (included in Node.js)
> The recommended Node version to use and maintained is managed on the `.nvmrc` file. You can either install manually
> or use `nvm install` followed by `nvm use`.
## Getting Started ## Getting Started
1. Clone or download this repository 1. Clone or download this repository

@ -1,10 +1,11 @@
// eslint-disable-next-line no-restricted-imports
import { dependencies } from "../package.json"; import { dependencies } from "../package.json";
const vendors = [ const vendors = [
"react", "react",
"react-router-dom", "react-router-dom",
"react-dom", "react-dom",
"react-query", "@tanstack/react-query",
"axios", "axios",
"socket.io-client", "socket.io-client",
]; ];

@ -5,7 +5,17 @@
<base href="{{baseUrl}}" /> <base href="{{baseUrl}}" />
<meta charset="utf-8" /> <meta charset="utf-8" />
<link rel="icon" type="image/x-icon" href="./images/favicon.ico" /> <link rel="icon" type="image/x-icon" href="./images/favicon.ico" />
<link rel="manifest" href="manifest.json" /> <link
rel="apple-touch-icon"
href="./images/apple-touch-icon-180x180.png"
sizes="180x180"
/>
<link
rel="mask-icon"
href="./images/maskable-icon-512x512.png"
color="#FFFFFF"
/>
<meta name="theme-color" content="#be4bdb" />
<meta <meta
name="viewport" name="viewport"
content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1"

File diff suppressed because it is too large Load Diff

@ -13,71 +13,79 @@
}, },
"private": true, "private": true,
"dependencies": { "dependencies": {
"@mantine/core": "^6.0.21", "@mantine/core": "^7.11.0",
"@mantine/dropzone": "^6.0.21", "@mantine/dropzone": "^7.11.0",
"@mantine/form": "^6.0.21", "@mantine/form": "^7.11.0",
"@mantine/hooks": "^6.0.21", "@mantine/hooks": "^7.11.0",
"@mantine/modals": "^6.0.21", "@mantine/modals": "^7.11.0",
"@mantine/notifications": "^6.0.21", "@mantine/notifications": "^7.11.0",
"axios": "^1.6.7", "@tanstack/react-query": "^5.40.1",
"react": "^18.2.0", "@tanstack/react-table": "^8.19.2",
"react-dom": "^18.2.0", "axios": "^1.6.8",
"react-query": "^3.39.3", "braces": "^3.0.3",
"react-router-dom": "^6.22.3", "react": "^18.3.1",
"socket.io-client": "^4.7.4" "react-dom": "^18.3.1",
"react-router-dom": "^6.23.1",
"socket.io-client": "^4.7.5"
}, },
"devDependencies": { "devDependencies": {
"@fontsource/roboto": "^5.0.12", "@fontsource/roboto": "^5.0.12",
"@fortawesome/fontawesome-svg-core": "^6.5.1", "@fortawesome/fontawesome-svg-core": "^6.5.2",
"@fortawesome/free-brands-svg-icons": "^6.5.1", "@fortawesome/free-brands-svg-icons": "^6.5.2",
"@fortawesome/free-regular-svg-icons": "^6.5.1", "@fortawesome/free-regular-svg-icons": "^6.5.2",
"@fortawesome/free-solid-svg-icons": "^6.5.1", "@fortawesome/free-solid-svg-icons": "^6.5.2",
"@fortawesome/react-fontawesome": "^0.2.0", "@fortawesome/react-fontawesome": "^0.2.2",
"@tanstack/react-query-devtools": "^5.40.1",
"@testing-library/jest-dom": "^6.4.2", "@testing-library/jest-dom": "^6.4.2",
"@testing-library/react": "^14.2.1", "@testing-library/react": "^15.0.5",
"@testing-library/user-event": "^14.5.2", "@testing-library/user-event": "^14.5.2",
"@types/jest": "^29.5.12", "@types/jest": "^29.5.12",
"@types/lodash": "^4.17.0", "@types/lodash": "^4.17.1",
"@types/node": "^20.11.26", "@types/node": "^20.12.6",
"@types/react": "^18.2.65", "@types/react": "^18.3.3",
"@types/react-dom": "^18.2.21", "@types/react-dom": "^18.3.0",
"@types/react-table": "^7.7.19", "@vite-pwa/assets-generator": "^0.2.4",
"@vitejs/plugin-react": "^4.2.1", "@vitejs/plugin-react": "^4.2.1",
"@vitest/coverage-v8": "^1.3.1", "@vitest/coverage-v8": "^1.4.0",
"@vitest/ui": "^1.2.2", "@vitest/ui": "^1.2.2",
"clsx": "^2.1.0", "clsx": "^2.1.0",
"eslint": "^8.57.0", "eslint": "^8.57.0",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-simple-import-sort": "^12.1.0",
"eslint-plugin-testing-library": "^6.2.0", "eslint-plugin-testing-library": "^6.2.0",
"husky": "^9.0.11", "husky": "^9.0.11",
"jsdom": "^24.0.0", "jsdom": "^24.0.0",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"moment": "^2.30.1", "postcss-preset-mantine": "^1.14.4",
"postcss-simple-vars": "^7.0.1",
"prettier": "^3.2.5", "prettier": "^3.2.5",
"prettier-plugin-organize-imports": "^3.2.4", "prettier-plugin-organize-imports": "^3.2.4",
"pretty-quick": "^4.0.0", "pretty-quick": "^4.0.0",
"react-table": "^7.8.0", "recharts": "^2.12.6",
"recharts": "^2.12.2", "sass": "^1.74.1",
"sass": "^1.71.1", "typescript": "^5.4.4",
"typescript": "^5.4.2", "vite": "^5.2.8",
"vite": "^5.1.6",
"vite-plugin-checker": "^0.6.4", "vite-plugin-checker": "^0.6.4",
"vite-plugin-pwa": "^0.20.0",
"vitest": "^1.2.2", "vitest": "^1.2.2",
"yaml": "^2.4.1" "yaml": "^2.4.1"
}, },
"scripts": { "scripts": {
"start": "vite",
"build": "vite build", "build": "vite build",
"build:ci": "vite build -m development", "build:ci": "vite build -m development",
"check": "eslint --ext .ts,.tsx src", "check": "eslint --ext .ts,.tsx src",
"check:fix": "eslint --ext .ts,.tsx src --fix",
"check:ts": "tsc --noEmit --incremental false", "check:ts": "tsc --noEmit --incremental false",
"check:fmt": "prettier -c .", "check:fmt": "prettier -c .",
"test": "vitest",
"test:ui": "vitest --ui",
"coverage": "vitest run --coverage", "coverage": "vitest run --coverage",
"format": "prettier -w .", "format": "prettier -w .",
"prepare": "cd .. && husky install frontend/.husky" "pwa-assets:generate": "pwa-assets-generator --preset minimal-2023 public/images/logo128.png",
"prepare": "cd .. && husky frontend/.husky",
"preview": "vite preview",
"start": "vite",
"test": "vitest",
"test:ui": "vitest --ui"
}, },
"browserslist": { "browserslist": {
"production": [ "production": [

@ -0,0 +1,14 @@
module.exports = {
plugins: {
"postcss-preset-mantine": {},
"postcss-simple-vars": {
variables: {
"mantine-breakpoint-xs": "36em",
"mantine-breakpoint-sm": "48em",
"mantine-breakpoint-md": "62em",
"mantine-breakpoint-lg": "75em",
"mantine-breakpoint-xl": "88em",
},
},
},
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 866 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 132 KiB

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save