Merge development into master

master
github-actions[bot] 4 weeks ago committed by GitHub
commit 6ec304d13d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Execute
uses: benc-uk/workflow-dispatch@v1.2.3
uses: benc-uk/workflow-dispatch@v1.2.4
with:
workflow: "release_beta_to_dev"
token: ${{ secrets.WF_GITHUB_TOKEN }}

@ -25,8 +25,8 @@ def check_python_version():
print("Python " + minimum_py3_str + " or greater required. "
"Current version is " + platform.python_version() + ". Please upgrade Python.")
exit_program(EXIT_PYTHON_UPGRADE_NEEDED)
elif int(python_version[0]) == 3 and int(python_version[1]) > 11:
print("Python version greater than 3.11.x is unsupported. Current version is " + platform.python_version() +
elif int(python_version[0]) == 3 and int(python_version[1]) > 12:
print("Python version greater than 3.12.x is unsupported. Current version is " + platform.python_version() +
". Keep in mind that even if it works, you're on your own.")
elif (int(python_version[0]) == minimum_py3_tuple[0] and int(python_version[1]) < minimum_py3_tuple[1]) or \
(int(python_version[0]) != minimum_py3_tuple[0]):

@ -95,13 +95,10 @@ class EpisodesHistory(Resource):
TableHistory.matched,
TableHistory.not_matched,
TableEpisodes.subtitles.label('external_subtitles'),
upgradable_episodes_not_perfect.c.id.label('upgradable'),
blacklisted_subtitles.c.subs_id.label('blacklisted')) \
.select_from(TableHistory) \
.join(TableShows, onclause=TableHistory.sonarrSeriesId == TableShows.sonarrSeriesId) \
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId) \
.join(upgradable_episodes_not_perfect, onclause=TableHistory.id == upgradable_episodes_not_perfect.c.id,
isouter=True) \
.join(blacklisted_subtitles, onclause=TableHistory.subs_id == blacklisted_subtitles.c.subs_id,
isouter=True) \
.where(reduce(operator.and_, query_conditions)) \
@ -120,6 +117,7 @@ class EpisodesHistory(Resource):
'sonarrSeriesId': x.sonarrSeriesId,
'path': x.path,
'language': x.language,
'profileId': x.profileId,
'score': x.score,
'tags': x.tags,
'action': x.action,
@ -130,24 +128,29 @@ class EpisodesHistory(Resource):
'matches': x.matched,
'dont_matches': x.not_matched,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
'upgradable': bool(x.upgradable) if _language_still_desired(x.language, x.profileId) else False,
'blacklisted': bool(x.blacklisted),
} for x in database.execute(stmt).all()]
for item in episode_history:
original_video_path = item['path']
original_subtitle_path = item['subtitles_path']
# is this language still desired or should we simply skip this subtitles from upgrade logic?
still_desired = _language_still_desired(item['language'], item['profileId'])
item.update(postprocess(item))
# Mark not upgradable if score is perfect or if video/subtitles file doesn't exist anymore
# Mark upgradable and get original_id
item.update({'original_id': upgradable_episodes_not_perfect.get(item['id'])})
item.update({'upgradable': bool(item['original_id'])})
# Mark not upgradable if video/subtitles file doesn't exist anymore or if language isn't desired anymore
if item['upgradable']:
if original_subtitle_path not in item['external_subtitles'] or \
not item['video_path'] == original_video_path:
if (item['subtitles_path'] not in item['external_subtitles'] or item['video_path'] != item['path'] or
not still_desired):
item.update({"upgradable": False})
del item['path']
del item['video_path']
del item['external_subtitles']
del item['profileId']
if item['score']:
item['score'] = f"{round((int(item['score']) * 100 / 360), 2)}%"

@ -48,7 +48,8 @@ class EpisodesWanted(Resource):
args = self.get_request_parser.parse_args()
episodeid = args.get('episodeid[]')
wanted_conditions = [(TableEpisodes.missing_subtitles != '[]')]
wanted_conditions = [(TableEpisodes.missing_subtitles.is_not(None)),
(TableEpisodes.missing_subtitles != '[]')]
if len(episodeid) > 0:
wanted_conditions.append((TableEpisodes.sonarrEpisodeId in episodeid))
start = 0

@ -90,12 +90,9 @@ class MoviesHistory(Resource):
TableHistoryMovie.not_matched,
TableMovies.profileId,
TableMovies.subtitles.label('external_subtitles'),
upgradable_movies_not_perfect.c.id.label('upgradable'),
blacklisted_subtitles.c.subs_id.label('blacklisted')) \
.select_from(TableHistoryMovie) \
.join(TableMovies) \
.join(upgradable_movies_not_perfect, onclause=TableHistoryMovie.id == upgradable_movies_not_perfect.c.id,
isouter=True) \
.join(blacklisted_subtitles, onclause=TableHistoryMovie.subs_id == blacklisted_subtitles.c.subs_id,
isouter=True) \
.where(reduce(operator.and_, query_conditions)) \
@ -112,6 +109,7 @@ class MoviesHistory(Resource):
'monitored': x.monitored,
'path': x.path,
'language': x.language,
'profileId': x.profileId,
'tags': x.tags,
'score': x.score,
'subs_id': x.subs_id,
@ -121,24 +119,29 @@ class MoviesHistory(Resource):
'matches': x.matched,
'dont_matches': x.not_matched,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
'upgradable': bool(x.upgradable) if _language_still_desired(x.language, x.profileId) else False,
'blacklisted': bool(x.blacklisted),
} for x in database.execute(stmt).all()]
for item in movie_history:
original_video_path = item['path']
original_subtitle_path = item['subtitles_path']
# is this language still desired or should we simply skip this subtitles from upgrade logic?
still_desired = _language_still_desired(item['language'], item['profileId'])
item.update(postprocess(item))
# Mark not upgradable if score or if video/subtitles file doesn't exist anymore
# Mark upgradable and get original_id
item.update({'original_id': upgradable_movies_not_perfect.get(item['id'])})
item.update({'upgradable': bool(item['original_id'])})
# Mark not upgradable if video/subtitles file doesn't exist anymore or if language isn't desired anymore
if item['upgradable']:
if original_subtitle_path not in item['external_subtitles'] or \
not item['video_path'] == original_video_path:
if (item['subtitles_path'] not in item['external_subtitles'] or item['video_path'] != item['path'] or
not still_desired):
item.update({"upgradable": False})
del item['path']
del item['video_path']
del item['external_subtitles']
del item['profileId']
if item['score']:
item['score'] = f"{round((int(item['score']) * 100 / 120), 2)}%"

@ -45,7 +45,8 @@ class MoviesWanted(Resource):
args = self.get_request_parser.parse_args()
radarrid = args.get("radarrid[]")
wanted_conditions = [(TableMovies.missing_subtitles != '[]')]
wanted_conditions = [(TableMovies.missing_subtitles.is_not(None)),
(TableMovies.missing_subtitles != '[]')]
if len(radarrid) > 0:
wanted_conditions.append((TableMovies.radarrId.in_(radarrid)))
start = 0

@ -11,7 +11,7 @@ from subtitles.manual import manual_search, manual_download_subtitle
from sonarr.history import history_log
from app.config import settings
from app.notifier import send_notifications
from subtitles.indexer.series import store_subtitles
from subtitles.indexer.series import store_subtitles, list_missing_subtitles
from subtitles.processing import ProcessSubtitlesResult
from ..utils import authenticate
@ -50,18 +50,27 @@ class ProviderEpisodes(Resource):
"""Search manually for an episode subtitles"""
args = self.get_request_parser.parse_args()
sonarrEpisodeId = args.get('episodeid')
episodeInfo = database.execute(
select(TableEpisodes.path,
TableEpisodes.sceneName,
TableShows.title,
TableShows.profileId)
.select_from(TableEpisodes)
.join(TableShows)
.where(TableEpisodes.sonarrEpisodeId == sonarrEpisodeId)) \
.first()
stmt = select(TableEpisodes.path,
TableEpisodes.sceneName,
TableShows.title,
TableShows.profileId,
TableEpisodes.subtitles,
TableEpisodes.missing_subtitles) \
.select_from(TableEpisodes) \
.join(TableShows) \
.where(TableEpisodes.sonarrEpisodeId == sonarrEpisodeId)
episodeInfo = database.execute(stmt).first()
if not episodeInfo:
return 'Episode not found', 404
elif episodeInfo.subtitles is None:
# subtitles indexing for this episode is incomplete, we'll do it again
store_subtitles(episodeInfo.path, path_mappings.path_replace(episodeInfo.path))
episodeInfo = database.execute(stmt).first()
elif episodeInfo.missing_subtitles is None:
# missing subtitles calculation for this episode is incomplete, we'll do it again
list_missing_subtitles(epno=sonarrEpisodeId)
episodeInfo = database.execute(stmt).first()
title = episodeInfo.title
episodePath = path_mappings.path_replace(episodeInfo.path)

@ -11,7 +11,7 @@ from subtitles.manual import manual_search, manual_download_subtitle
from radarr.history import history_log_movie
from app.config import settings
from app.notifier import send_notifications_movie
from subtitles.indexer.movies import store_subtitles_movie
from subtitles.indexer.movies import store_subtitles_movie, list_missing_subtitles_movies
from subtitles.processing import ProcessSubtitlesResult
from ..utils import authenticate
@ -51,16 +51,25 @@ class ProviderMovies(Resource):
"""Search manually for a movie subtitles"""
args = self.get_request_parser.parse_args()
radarrId = args.get('radarrid')
movieInfo = database.execute(
select(TableMovies.title,
TableMovies.path,
TableMovies.sceneName,
TableMovies.profileId)
.where(TableMovies.radarrId == radarrId)) \
.first()
stmt = select(TableMovies.title,
TableMovies.path,
TableMovies.sceneName,
TableMovies.profileId,
TableMovies.subtitles,
TableMovies.missing_subtitles) \
.where(TableMovies.radarrId == radarrId)
movieInfo = database.execute(stmt).first()
if not movieInfo:
return 'Movie not found', 404
elif movieInfo.subtitles is None:
# subtitles indexing for this movie is incomplete, we'll do it again
store_subtitles_movie(movieInfo.path, path_mappings.path_replace_movie(movieInfo.path))
movieInfo = database.execute(stmt).first()
elif movieInfo.missing_subtitles is None:
# missing subtitles calculation for this movie is incomplete, we'll do it again
list_missing_subtitles_movies(no=radarrId)
movieInfo = database.execute(stmt).first()
title = movieInfo.title
moviePath = path_mappings.path_replace_movie(movieInfo.path)

@ -34,9 +34,11 @@ class Series(Resource):
'alternativeTitles': fields.List(fields.String),
'audio_language': fields.Nested(get_audio_language_model),
'episodeFileCount': fields.Integer(default=0),
'ended': fields.Boolean(),
'episodeMissingCount': fields.Integer(default=0),
'fanart': fields.String(),
'imdbId': fields.String(),
'lastAired': fields.String(),
'monitored': fields.Boolean(),
'overview': fields.String(),
'path': fields.String(),
@ -73,7 +75,8 @@ class Series(Resource):
.group_by(TableShows.sonarrSeriesId)\
.subquery()
episodes_missing_conditions = [(TableEpisodes.missing_subtitles != '[]')]
episodes_missing_conditions = [(TableEpisodes.missing_subtitles.is_not(None)),
(TableEpisodes.missing_subtitles != '[]')]
episodes_missing_conditions += get_exclusion_clause('series')
episodeMissingCount = select(TableShows.sonarrSeriesId,
@ -99,6 +102,8 @@ class Series(Resource):
TableShows.tags,
TableShows.title,
TableShows.year,
TableShows.ended,
TableShows.lastAired,
episodeFileCount.c.episodeFileCount,
episodeMissingCount.c.episodeMissingCount) \
.select_from(TableShows) \
@ -127,6 +132,8 @@ class Series(Resource):
'tags': x.tags,
'title': x.title,
'year': x.year,
'ended': x.ended,
'lastAired': x.lastAired,
'episodeFileCount': x.episodeFileCount,
'episodeMissingCount': x.episodeMissingCount,
}) for x in database.execute(stmt).all()]

@ -3,7 +3,7 @@
from flask_restx import Resource, Namespace, reqparse
from unidecode import unidecode
from app.config import settings
from app.config import base_url, settings
from app.database import TableShows, TableMovies, database, select
from ..utils import authenticate
@ -34,6 +34,7 @@ class Searches(Resource):
search_list += database.execute(
select(TableShows.title,
TableShows.sonarrSeriesId,
TableShows.poster,
TableShows.year)
.order_by(TableShows.title)) \
.all()
@ -43,6 +44,7 @@ class Searches(Resource):
search_list += database.execute(
select(TableMovies.title,
TableMovies.radarrId,
TableMovies.poster,
TableMovies.year)
.order_by(TableMovies.title)) \
.all()
@ -58,8 +60,11 @@ class Searches(Resource):
if hasattr(x, 'sonarrSeriesId'):
result['sonarrSeriesId'] = x.sonarrSeriesId
result['poster'] = f"{base_url}/images/series{x.poster}" if x.poster else None
else:
result['radarrId'] = x.radarrId
result['poster'] = f"{base_url}/images/movies{x.poster}" if x.poster else None
results.append(result)

@ -134,10 +134,21 @@ def postprocess(item):
if item.get('path'):
item['path'] = path_replace(item['path'])
if item.get('video_path'):
# Provide mapped video path for history
item['video_path'] = path_replace(item['video_path'])
if item.get('subtitles_path'):
# Provide mapped subtitles path
item['subtitles_path'] = path_replace(item['subtitles_path'])
if item.get('external_subtitles'):
# Provide mapped external subtitles paths for history
if isinstance(item['external_subtitles'], str):
item['external_subtitles'] = ast.literal_eval(item['external_subtitles'])
for i, subs in enumerate(item['external_subtitles']):
item['external_subtitles'][i] = path_replace(subs)
# map poster and fanart to server proxy
if item.get('poster') is not None:
poster = item['poster']

@ -239,6 +239,10 @@ validators = [
Validator('opensubtitlescom.use_hash', must_exist=True, default=True, is_type_of=bool),
Validator('opensubtitlescom.include_ai_translated', must_exist=True, default=False, is_type_of=bool),
# napiprojekt section
Validator('napiprojekt.only_authors', must_exist=True, default=False, is_type_of=bool),
Validator('napiprojekt.only_real_names', must_exist=True, default=False, is_type_of=bool),
# addic7ed section
Validator('addic7ed.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('addic7ed.password', must_exist=True, default='', is_type_of=str, cast=str),
@ -274,6 +278,7 @@ validators = [
Validator('whisperai.endpoint', must_exist=True, default='http://127.0.0.1:9000', is_type_of=str),
Validator('whisperai.response', must_exist=True, default=5, is_type_of=int, gte=1),
Validator('whisperai.timeout', must_exist=True, default=3600, is_type_of=int, gte=1),
Validator('whisperai.pass_video_name', must_exist=True, default=False, is_type_of=bool),
Validator('whisperai.loglevel', must_exist=True, default='INFO', is_type_of=str,
is_in=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']),
@ -328,6 +333,7 @@ validators = [
Validator('titulky.username', must_exist=True, default='', is_type_of=str, cast=str),
Validator('titulky.password', must_exist=True, default='', is_type_of=str, cast=str),
Validator('titulky.approved_only', must_exist=True, default=False, is_type_of=bool),
Validator('titulky.skip_wrong_fps', must_exist=True, default=False, is_type_of=bool),
# embeddedsubtitles section
Validator('embeddedsubtitles.included_codecs', must_exist=True, default=[], is_type_of=list),

@ -136,6 +136,7 @@ class TableEpisodes(Base):
audio_codec = mapped_column(Text)
audio_language = mapped_column(Text)
created_at_timestamp = mapped_column(DateTime)
episode = mapped_column(Integer, nullable=False)
episode_file_id = mapped_column(Integer)
failedAttempts = mapped_column(Text)
@ -152,6 +153,7 @@ class TableEpisodes(Base):
sonarrSeriesId = mapped_column(Integer, ForeignKey('table_shows.sonarrSeriesId', ondelete='CASCADE'))
subtitles = mapped_column(Text)
title = mapped_column(Text, nullable=False)
updated_at_timestamp = mapped_column(DateTime)
video_codec = mapped_column(Text)
@ -213,6 +215,7 @@ class TableMovies(Base):
alternativeTitles = mapped_column(Text)
audio_codec = mapped_column(Text)
audio_language = mapped_column(Text)
created_at_timestamp = mapped_column(DateTime)
failedAttempts = mapped_column(Text)
fanart = mapped_column(Text)
ffprobe_cache = mapped_column(LargeBinary)
@ -234,6 +237,7 @@ class TableMovies(Base):
tags = mapped_column(Text)
title = mapped_column(Text, nullable=False)
tmdbId = mapped_column(Text, nullable=False, unique=True)
updated_at_timestamp = mapped_column(DateTime)
video_codec = mapped_column(Text)
year = mapped_column(Text)
@ -271,8 +275,11 @@ class TableShows(Base):
tvdbId = mapped_column(Integer)
alternativeTitles = mapped_column(Text)
audio_language = mapped_column(Text)
created_at_timestamp = mapped_column(DateTime)
ended = mapped_column(Text)
fanart = mapped_column(Text)
imdbId = mapped_column(Text)
lastAired = mapped_column(Text)
monitored = mapped_column(Text)
overview = mapped_column(Text)
path = mapped_column(Text, nullable=False, unique=True)
@ -283,6 +290,7 @@ class TableShows(Base):
sortTitle = mapped_column(Text)
tags = mapped_column(Text)
title = mapped_column(Text, nullable=False)
updated_at_timestamp = mapped_column(DateTime)
year = mapped_column(Text)

@ -30,7 +30,6 @@ from radarr.blacklist import blacklist_log_movie
from sonarr.blacklist import blacklist_log
from utilities.analytics import event_tracker
_TRACEBACK_RE = re.compile(r'File "(.*?providers[\\/].*?)", line (\d+)')
@ -41,7 +40,7 @@ def time_until_midnight(timezone):
"""
now_in_tz = datetime.datetime.now(tz=timezone)
midnight = now_in_tz.replace(hour=0, minute=0, second=0, microsecond=0) + \
datetime.timedelta(days=1)
datetime.timedelta(days=1)
return midnight - now_in_tz
@ -91,7 +90,7 @@ def provider_throttle_map():
},
"opensubtitlescom": {
TooManyRequests: (datetime.timedelta(minutes=1), "1 minute"),
DownloadLimitExceeded: (datetime.timedelta(hours=24), "24 hours"),
DownloadLimitExceeded: (datetime.timedelta(hours=6), "6 hours"),
},
"addic7ed": {
DownloadLimitExceeded: (datetime.timedelta(hours=3), "3 hours"),
@ -101,6 +100,9 @@ def provider_throttle_map():
"titlovi": {
TooManyRequests: (datetime.timedelta(minutes=5), "5 minutes"),
},
"titrari": {
TooManyRequests: (datetime.timedelta(minutes=10), "10 minutes"),
},
"titulky": {
DownloadLimitExceeded: (
titulky_limit_reset_timedelta(),
@ -254,6 +256,8 @@ def get_providers_auth():
'include_ai_translated': settings.opensubtitlescom.include_ai_translated,
'api_key': 's38zmzVlW7IlYruWi7mHwDYl2SfMQoC1'
},
'napiprojekt': {'only_authors': settings.napiprojekt.only_authors,
'only_real_names': settings.napiprojekt.only_real_names},
'podnapisi': {
'only_foreign': False, # fixme
'also_foreign': False, # fixme
@ -284,6 +288,7 @@ def get_providers_auth():
'username': settings.titulky.username,
'password': settings.titulky.password,
'approved_only': settings.titulky.approved_only,
'skip_wrong_fps': settings.titulky.skip_wrong_fps,
},
'titlovi': {
'username': settings.titlovi.username,
@ -329,6 +334,7 @@ def get_providers_auth():
'timeout': settings.whisperai.timeout,
'ffmpeg_path': _FFMPEG_BINARY,
'loglevel': settings.whisperai.loglevel,
'pass_video_name': settings.whisperai.pass_video_name,
},
"animetosho": {
'search_threshold': settings.animetosho.search_threshold,
@ -367,7 +373,7 @@ def provider_throttle(name, exception, ids=None, language=None):
cls = valid_cls
throttle_data = provider_throttle_map().get(name, provider_throttle_map()["default"]).get(cls, None) or \
provider_throttle_map()["default"].get(cls, None)
provider_throttle_map()["default"].get(cls, None)
if throttle_data:
throttle_delta, throttle_description = throttle_data
@ -377,7 +383,8 @@ def provider_throttle(name, exception, ids=None, language=None):
throttle_until = datetime.datetime.now() + throttle_delta
if cls_name not in VALID_COUNT_EXCEPTIONS or throttled_count(name):
if cls_name == 'ValueError' and isinstance(exception.args, tuple) and len(exception.args) and exception.args[0].startswith('unsupported pickle protocol'):
if cls_name == 'ValueError' and isinstance(exception.args, tuple) and len(exception.args) and exception.args[
0].startswith('unsupported pickle protocol'):
for fn in subliminal_cache_region.backend.all_filenames:
try:
os.remove(fn)

@ -49,12 +49,12 @@ class Server:
threads=100)
self.connected = True
except OSError as error:
if error.errno == errno.EADDRNOTAVAIL:
if error.errno == 49:
logging.exception("BAZARR cannot bind to specified IP, trying with 0.0.0.0")
self.address = '0.0.0.0'
self.connected = False
super(Server, self).__init__()
elif error.errno == errno.EADDRINUSE:
elif error.errno == 48:
if self.port != '6767':
logging.exception("BAZARR cannot bind to specified TCP port, trying with default (6767)")
self.port = '6767'
@ -64,6 +64,11 @@ class Server:
logging.exception("BAZARR cannot bind to default TCP port (6767) because it's already in use, "
"exiting...")
self.shutdown(EXIT_PORT_ALREADY_IN_USE_ERROR)
elif error.errno == 97:
logging.exception("BAZARR cannot bind to IPv6 (*), trying with 0.0.0.0")
self.address = '0.0.0.0'
self.connected = False
super(Server, self).__init__()
else:
logging.exception("BAZARR cannot start because of unhandled exception.")
self.shutdown()

@ -6,7 +6,7 @@ from app.database import TableHistoryMovie, database, insert
from app.event_handler import event_stream
def history_log_movie(action, radarr_id, result, fake_provider=None, fake_score=None):
def history_log_movie(action, radarr_id, result, fake_provider=None, fake_score=None, upgraded_from_id=None):
description = result.message
video_path = result.path
language = result.language_code
@ -31,6 +31,7 @@ def history_log_movie(action, radarr_id, result, fake_provider=None, fake_score=
subs_id=subs_id,
subtitles_path=subtitles_path,
matched=str(matched) if matched else None,
not_matched=str(not_matched) if not_matched else None
not_matched=str(not_matched) if not_matched else None,
upgradedFromId=upgraded_from_id,
))
event_stream(type='movie-history')

@ -5,6 +5,7 @@ import logging
from constants import MINIMUM_VIDEO_SIZE
from sqlalchemy.exc import IntegrityError
from datetime import datetime
from app.config import settings
from utilities.path_mappings import path_mappings
@ -49,6 +50,7 @@ def get_movie_file_size_from_db(movie_path):
# Update movies in DB
def update_movie(updated_movie, send_event):
try:
updated_movie['updated_at_timestamp'] = datetime.now()
database.execute(
update(TableMovies).values(updated_movie)
.where(TableMovies.tmdbId == updated_movie['tmdbId']))
@ -75,6 +77,7 @@ def get_movie_monitored_status(movie_id):
# Insert new movies in DB
def add_movie(added_movie, send_event):
try:
added_movie['created_at_timestamp'] = datetime.now()
database.execute(
insert(TableMovies)
.values(added_movie))
@ -203,7 +206,11 @@ def update_movies(send_event=True):
files_missing += 1
if send_event:
hide_progress(id='movies_progress')
show_progress(id='movies_progress',
header='Syncing movies...',
name='',
value=movies_count,
count=movies_count)
trace(f"Skipped {files_missing} file missing movies out of {movies_count}")
if sync_monitored:
@ -296,6 +303,7 @@ def update_one_movie(movie_id, action, defer_search=False):
# Update existing movie in DB
elif movie and existing_movie:
try:
movie['updated_at_timestamp'] = datetime.now()
database.execute(
update(TableMovies)
.values(movie)
@ -312,6 +320,7 @@ def update_one_movie(movie_id, action, defer_search=False):
# Insert new movie in DB
elif movie and not existing_movie:
try:
movie['created_at_timestamp'] = datetime.now()
database.execute(
insert(TableMovies)
.values(movie))

@ -6,7 +6,8 @@ from app.database import TableHistory, database, insert
from app.event_handler import event_stream
def history_log(action, sonarr_series_id, sonarr_episode_id, result, fake_provider=None, fake_score=None):
def history_log(action, sonarr_series_id, sonarr_episode_id, result, fake_provider=None, fake_score=None,
upgraded_from_id=None):
description = result.message
video_path = result.path
language = result.language_code
@ -32,6 +33,7 @@ def history_log(action, sonarr_series_id, sonarr_episode_id, result, fake_provid
subs_id=subs_id,
subtitles_path=subtitles_path,
matched=str(matched) if matched else None,
not_matched=str(not_matched) if not_matched else None
not_matched=str(not_matched) if not_matched else None,
upgradedFromId=upgraded_from_id,
))
event_stream(type='episode-history')

@ -5,6 +5,7 @@ import logging
from constants import MINIMUM_VIDEO_SIZE
from sqlalchemy.exc import IntegrityError
from datetime import datetime
from app.database import database, TableShows, TableEpisodes, delete, update, insert, select
from app.config import settings
@ -145,10 +146,27 @@ def sync_episodes(series_id, send_event=True):
if send_event:
event_stream(type='episode', action='delete', payload=removed_episode)
# Insert new episodes in DB
if len(episodes_to_add):
for added_episode in episodes_to_add:
try:
added_episode['created_at_timestamp'] = datetime.now()
database.execute(insert(TableEpisodes).values(added_episode))
except IntegrityError as e:
logging.error(f"BAZARR cannot insert episodes because of {e}. We'll try to update it instead.")
del added_episode['created_at_timestamp']
episodes_to_update.append(added_episode)
else:
store_subtitles(added_episode['path'], path_mappings.path_replace(added_episode['path']))
if send_event:
event_stream(type='episode', payload=added_episode['sonarrEpisodeId'])
# Update existing episodes in DB
if len(episodes_to_update):
for updated_episode in episodes_to_update:
try:
updated_episode['updated_at_timestamp'] = datetime.now()
database.execute(update(TableEpisodes)
.values(updated_episode)
.where(TableEpisodes.sonarrEpisodeId == updated_episode['sonarrEpisodeId']))
@ -160,19 +178,6 @@ def sync_episodes(series_id, send_event=True):
if send_event:
event_stream(type='episode', action='update', payload=updated_episode['sonarrEpisodeId'])
# Insert new episodes in DB
if len(episodes_to_add):
for added_episode in episodes_to_add:
try:
database.execute(insert(TableEpisodes).values(added_episode))
except IntegrityError as e:
logging.error(f"BAZARR cannot insert episodes because of {e}")
else:
store_subtitles(added_episode['path'], path_mappings.path_replace(added_episode['path']))
if send_event:
event_stream(type='episode', payload=added_episode['sonarrEpisodeId'])
logging.debug(f'BAZARR All episodes from series ID {series_id} synced from Sonarr into database.')
@ -225,6 +230,7 @@ def sync_one_episode(episode_id, defer_search=False):
# Update existing episodes in DB
elif episode and existing_episode:
try:
episode['updated_at_timestamp'] = datetime.now()
database.execute(
update(TableEpisodes)
.values(episode)
@ -240,6 +246,7 @@ def sync_one_episode(episode_id, defer_search=False):
# Insert new episodes in DB
elif episode and not existing_episode:
try:
episode['created_at_timestamp'] = datetime.now()
database.execute(
insert(TableEpisodes)
.values(episode))

@ -2,6 +2,8 @@
import os
from dateutil import parser
from app.config import settings
from app.database import TableShows, database, select
from constants import MINIMUM_VIDEO_SIZE
@ -45,6 +47,10 @@ def seriesParser(show, action, tags_dict, language_profiles, serie_default_profi
imdbId = show['imdbId'] if 'imdbId' in show else None
ended = 'True' if 'ended' in show and show['ended'] else 'False'
lastAired = parser.parse(show['lastAired']).strftime("%Y-%m-%d") if 'lastAired' in show and show['lastAired'] else None
audio_language = []
if not settings.general.parse_embedded_audio_track:
if get_sonarr_info.is_legacy():
@ -56,22 +62,24 @@ def seriesParser(show, action, tags_dict, language_profiles, serie_default_profi
audio_language = []
parsed_series = {
'title': show["title"],
'path': show["path"],
'tvdbId': int(show["tvdbId"]),
'sonarrSeriesId': int(show["id"]),
'overview': overview,
'poster': poster,
'fanart': fanart,
'audio_language': str(audio_language),
'sortTitle': show['sortTitle'],
'year': str(show['year']),
'alternativeTitles': str(alternate_titles),
'tags': str(tags),
'seriesType': show['seriesType'],
'imdbId': imdbId,
'monitored': str(bool(show['monitored']))
}
'title': show["title"],
'path': show["path"],
'tvdbId': int(show["tvdbId"]),
'sonarrSeriesId': int(show["id"]),
'overview': overview,
'poster': poster,
'fanart': fanart,
'audio_language': str(audio_language),
'sortTitle': show['sortTitle'],
'year': str(show['year']),
'alternativeTitles': str(alternate_titles),
'tags': str(tags),
'seriesType': show['seriesType'],
'imdbId': imdbId,
'monitored': str(bool(show['monitored'])),
'ended': ended,
'lastAired': lastAired,
}
if action == 'insert':
parsed_series['profileId'] = serie_default_profile

@ -3,6 +3,7 @@
import logging
from sqlalchemy.exc import IntegrityError
from datetime import datetime
from app.config import settings
from subtitles.indexer.series import list_missing_subtitles
@ -127,6 +128,7 @@ def update_series(send_event=True):
.first():
try:
trace(f"Updating {show['title']}")
updated_series['updated_at_timestamp'] = datetime.now()
database.execute(
update(TableShows)
.values(updated_series)
@ -145,6 +147,7 @@ def update_series(send_event=True):
try:
trace(f"Inserting {show['title']}")
added_series['created_at_timestamp'] = datetime.now()
database.execute(
insert(TableShows)
.values(added_series))
@ -175,7 +178,11 @@ def update_series(send_event=True):
event_stream(type='series', action='delete', payload=series)
if send_event:
hide_progress(id='series_progress')
show_progress(id='series_progress',
header='Syncing series...',
name='',
value=series_count,
count=series_count)
if sync_monitored:
trace(f"skipped {skipped_count} unmonitored series out of {i}")
@ -238,6 +245,7 @@ def update_one_series(series_id, action):
# Update existing series in DB
if action == 'updated' and existing_series:
try:
series['updated_at_timestamp'] = datetime.now()
database.execute(
update(TableShows)
.values(series)
@ -252,6 +260,7 @@ def update_one_series(series_id, action):
# Insert new series in DB
elif action == 'updated' and not existing_series:
try:
series['created_at_timestamp'] = datetime.now()
database.execute(
insert(TableShows)
.values(series))

@ -13,7 +13,7 @@ from subliminal_patch.core_persistent import download_best_subtitles
from subliminal_patch.score import ComputeScore
from app.config import settings, get_scores, get_array_from
from app.database import TableEpisodes, TableMovies, database, select
from app.database import TableEpisodes, TableMovies, database, select, get_profiles_list
from utilities.path_mappings import path_mappings
from utilities.helper import get_target_folder, force_unicode
from languages.get_languages import alpha3_from_alpha2
@ -24,8 +24,8 @@ from .processing import process_subtitle
@update_pools
def generate_subtitles(path, languages, audio_language, sceneName, title, media_type, forced_minimum_score=None,
is_upgrade=False, profile_id=None, check_if_still_required=False,
def generate_subtitles(path, languages, audio_language, sceneName, title, media_type, profile_id,
forced_minimum_score=None, is_upgrade=False, check_if_still_required=False,
previous_subtitles_to_delete=None):
if not languages:
return None
@ -41,6 +41,8 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
providers = pool.providers
language_set = _get_language_obj(languages=languages)
profile = get_profiles_list(profile_id=profile_id)
original_format = profile['originalFormat']
hi_required = "force HI" if any([x.hi for x in language_set]) else False
also_forced = any([x.forced for x in language_set])
forced_required = all([x.forced for x in language_set])
@ -72,7 +74,8 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
pool_instance=pool,
min_score=int(min_score),
hearing_impaired=hi_required,
compute_score=ComputeScore(get_scores()))
compute_score=ComputeScore(get_scores()),
use_original_format=original_format in (1, "1", "True", True))
if downloaded_subtitles:
for video, subtitles in downloaded_subtitles.items():
@ -100,7 +103,7 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
tags=None, # fixme
directory=fld,
chmod=chmod,
formats=tuple(subtitle_formats),
formats=subtitle_formats,
path_decoder=force_unicode
)
except Exception as e:

@ -292,7 +292,11 @@ def movies_full_scan_subtitles(use_cache=None):
count=count_movies)
store_subtitles_movie(movie.path, path_mappings.path_replace_movie(movie.path), use_cache=use_cache)
hide_progress(id='movies_disk_scan')
show_progress(id='movies_disk_scan',
header='Full disk scan...',
name='Movies subtitles',
value=count_movies,
count=count_movies)
gc.collect()

@ -294,7 +294,11 @@ def series_full_scan_subtitles(use_cache=None):
count=count_episodes)
store_subtitles(episode.path, path_mappings.path_replace(episode.path), use_cache=use_cache)
hide_progress(id='episodes_disk_scan')
show_progress(id='episodes_disk_scan',
header='Full disk scan...',
name='Episodes subtitles',
value=count_episodes,
count=count_episodes)
gc.collect()

@ -135,7 +135,9 @@ def guess_external_subtitles(dest_folder, subtitles, media_type, previously_inde
continue
text = text.decode(encoding)
if core.parse_for_hi_regex(subtitle_text=text,
alpha3_language=language.alpha3 if hasattr(language, 'alpha3') else None):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
if os.path.splitext(subtitle_path)[1] == 'srt':
if core.parse_for_hi_regex(subtitle_text=text,
alpha3_language=language.alpha3 if hasattr(language, 'alpha3') else
None):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
return subtitles

@ -158,7 +158,7 @@ def manual_download_subtitle(path, audio_language, hi, forced, subtitle, provide
subtitle.language.forced = True
else:
subtitle.language.forced = False
if use_original_format in ("1", "True"):
if use_original_format in (1, "1", "True", True):
subtitle.use_original_format = True
subtitle.mods = get_array_from(settings.general.subzero_mods)

@ -9,7 +9,7 @@ import os
from functools import reduce
from utilities.path_mappings import path_mappings
from subtitles.indexer.movies import store_subtitles_movie
from subtitles.indexer.movies import store_subtitles_movie, list_missing_subtitles_movies
from radarr.history import history_log_movie
from app.notifier import send_notifications_movie
from app.get_providers import get_providers
@ -22,20 +22,30 @@ from ..download import generate_subtitles
def movies_download_subtitles(no):
conditions = [(TableMovies.radarrId == no)]
conditions += get_exclusion_clause('movie')
movie = database.execute(
select(TableMovies.path,
TableMovies.missing_subtitles,
TableMovies.audio_language,
TableMovies.radarrId,
TableMovies.sceneName,
TableMovies.title,
TableMovies.tags,
TableMovies.monitored)
.where(reduce(operator.and_, conditions))) \
.first()
stmt = select(TableMovies.path,
TableMovies.missing_subtitles,
TableMovies.audio_language,
TableMovies.radarrId,
TableMovies.sceneName,
TableMovies.title,
TableMovies.tags,
TableMovies.monitored,
TableMovies.profileId,
TableMovies.subtitles) \
.where(reduce(operator.and_, conditions))
movie = database.execute(stmt).first()
if not movie:
logging.debug("BAZARR no movie with that radarrId can be found in database:", str(no))
logging.debug(f"BAZARR no movie with that radarrId can be found in database: {no}")
return
elif movie.subtitles is None:
# subtitles indexing for this movie is incomplete, we'll do it again
store_subtitles_movie(movie.path, path_mappings.path_replace_movie(movie.path))
movie = database.execute(stmt).first()
elif movie.missing_subtitles is None:
# missing subtitles calculation for this movie is incomplete, we'll do it again
list_missing_subtitles_movies(no=no)
movie = database.execute(stmt).first()
moviePath = path_mappings.path_replace_movie(movie.path)
@ -79,6 +89,7 @@ def movies_download_subtitles(no):
str(movie.sceneName),
movie.title,
'movie',
movie.profileId,
check_if_still_required=True):
if result:
@ -88,4 +99,8 @@ def movies_download_subtitles(no):
history_log_movie(1, no, result)
send_notifications_movie(no, result.message)
hide_progress(id=f'movie_search_progress_{no}')
show_progress(id=f'movie_search_progress_{no}',
header='Searching missing subtitles...',
name=movie.title,
value=count_movie,
count=count_movie)

@ -9,7 +9,7 @@ import os
from functools import reduce
from utilities.path_mappings import path_mappings
from subtitles.indexer.series import store_subtitles
from subtitles.indexer.series import store_subtitles, list_missing_subtitles
from sonarr.history import history_log
from app.notifier import send_notifications
from app.get_providers import get_providers
@ -32,18 +32,12 @@ def series_download_subtitles(no):
(TableEpisodes.missing_subtitles != '[]')]
conditions += get_exclusion_clause('series')
episodes_details = database.execute(
select(TableEpisodes.path,
TableEpisodes.missing_subtitles,
TableEpisodes.monitored,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.sceneName,
TableShows.tags,
TableShows.seriesType,
TableEpisodes.audio_language,
select(TableEpisodes.sonarrEpisodeId,
TableShows.title,
TableEpisodes.season,
TableEpisodes.episode,
TableEpisodes.title.label('episodeTitle'))
TableEpisodes.title.label('episodeTitle'),
TableEpisodes.missing_subtitles)
.select_from(TableEpisodes)
.join(TableShows)
.where(reduce(operator.and_, conditions))) \
@ -65,110 +59,100 @@ def series_download_subtitles(no):
value=i,
count=count_episodes_details)
audio_language_list = get_audio_profile_languages(episode.audio_language)
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
audio_language = 'None'
languages = []
for language in ast.literal_eval(episode.missing_subtitles):
if language is not None:
hi_ = "True" if language.endswith(':hi') else "False"
forced_ = "True" if language.endswith(':forced') else "False"
languages.append((language.split(":")[0], hi_, forced_))
if not languages:
continue
for result in generate_subtitles(path_mappings.path_replace(episode.path),
languages,
audio_language,
str(episode.sceneName),
episode.title,
'series',
check_if_still_required=True):
if result:
if isinstance(result, tuple) and len(result):
result = result[0]
store_subtitles(episode.path, path_mappings.path_replace(episode.path))
history_log(1, no, episode.sonarrEpisodeId, result)
send_notifications(no, episode.sonarrEpisodeId, result.message)
episode_download_subtitles(no=episode.sonarrEpisodeId, send_progress=False, providers_list=providers_list)
else:
logging.info("BAZARR All providers are throttled")
break
hide_progress(id=f'series_search_progress_{no}')
show_progress(id=f'series_search_progress_{no}',
header='Searching missing subtitles...',
name='',
value=count_episodes_details,
count=count_episodes_details)
def episode_download_subtitles(no, send_progress=False):
def episode_download_subtitles(no, send_progress=False, providers_list=None):
conditions = [(TableEpisodes.sonarrEpisodeId == no)]
conditions += get_exclusion_clause('series')
episodes_details = database.execute(
select(TableEpisodes.path,
TableEpisodes.missing_subtitles,
TableEpisodes.monitored,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.sceneName,
TableShows.tags,
TableShows.title,
TableShows.sonarrSeriesId,
TableEpisodes.audio_language,
TableShows.seriesType,
TableEpisodes.title.label('episodeTitle'),
TableEpisodes.season,
TableEpisodes.episode)
.select_from(TableEpisodes)
.join(TableShows)
.where(reduce(operator.and_, conditions))) \
.all()
if not episodes_details:
stmt = select(TableEpisodes.path,
TableEpisodes.missing_subtitles,
TableEpisodes.monitored,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.sceneName,
TableShows.tags,
TableShows.title,
TableShows.sonarrSeriesId,
TableEpisodes.audio_language,
TableShows.seriesType,
TableEpisodes.title.label('episodeTitle'),
TableEpisodes.season,
TableEpisodes.episode,
TableShows.profileId,
TableEpisodes.subtitles) \
.select_from(TableEpisodes) \
.join(TableShows) \
.where(reduce(operator.and_, conditions))
episode = database.execute(stmt).first()
if not episode:
logging.debug("BAZARR no episode with that sonarrEpisodeId can be found in database:", str(no))
return
for episode in episodes_details:
elif episode.subtitles is None:
# subtitles indexing for this episode is incomplete, we'll do it again
store_subtitles(episode.path, path_mappings.path_replace_movie(episode.path))
episode = database.execute(stmt).first()
elif episode.missing_subtitles is None:
# missing subtitles calculation for this episode is incomplete, we'll do it again
list_missing_subtitles(epno=no)
episode = database.execute(stmt).first()
if not providers_list:
providers_list = get_providers()
if providers_list:
if send_progress:
show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=0,
count=1)
audio_language_list = get_audio_profile_languages(episode.audio_language)
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
audio_language = 'None'
languages = []
for language in ast.literal_eval(episode.missing_subtitles):
if language is not None:
hi_ = "True" if language.endswith(':hi') else "False"
forced_ = "True" if language.endswith(':forced') else "False"
languages.append((language.split(":")[0], hi_, forced_))
if not languages:
continue
for result in generate_subtitles(path_mappings.path_replace(episode.path),
languages,
audio_language,
str(episode.sceneName),
episode.title,
'series',
check_if_still_required=True):
if result:
if isinstance(result, tuple) and len(result):
result = result[0]
store_subtitles(episode.path, path_mappings.path_replace(episode.path))
history_log(1, episode.sonarrSeriesId, episode.sonarrEpisodeId, result)
send_notifications(episode.sonarrSeriesId, episode.sonarrEpisodeId, result.message)
if send_progress:
hide_progress(id=f'episode_search_progress_{no}')
if providers_list:
if send_progress:
show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=0,
count=1)
audio_language_list = get_audio_profile_languages(episode.audio_language)
if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name']
else:
logging.info("BAZARR All providers are throttled")
break
audio_language = 'None'
languages = []
for language in ast.literal_eval(episode.missing_subtitles):
if language is not None:
hi_ = "True" if language.endswith(':hi') else "False"
forced_ = "True" if language.endswith(':forced') else "False"
languages.append((language.split(":")[0], hi_, forced_))
if not languages:
return
for result in generate_subtitles(path_mappings.path_replace(episode.path),
languages,
audio_language,
str(episode.sceneName),
episode.title,
'series',
episode.profileId,
check_if_still_required=True):
if result:
if isinstance(result, tuple) and len(result):
result = result[0]
store_subtitles(episode.path, path_mappings.path_replace(episode.path))
history_log(1, episode.sonarrSeriesId, episode.sonarrEpisodeId, result)
send_notifications(episode.sonarrSeriesId, episode.sonarrEpisodeId, result.message)
if send_progress:
show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=1,
count=1)
else:
logging.info("BAZARR All providers are throttled")

@ -91,11 +91,11 @@ class AniDBClient(object):
return None, None, None
is_special_entry = True
for special_entry in special_entries:
mapping_list = special_entry.findall(f".//mapping[@tvdbseason='{tvdb_series_season}']")
if len(mapping_list) > 0:
anidb_id = int(special_entry.attrib.get('anidbid'))
offset = int(mapping_list[0].attrib.get('offset', 0))
anidb_id = int(special_entry.attrib.get('anidbid'))
offset = int(mapping_list[0].attrib.get('offset', 0)) if len(mapping_list) > 0 else 0
if not is_special_entry:
# Sort the anime by offset in ascending order
@ -111,7 +111,7 @@ class AniDBClient(object):
mapping_list = anime.find('mapping-list')
# Handle mapping list for Specials
if mapping_list:
if mapping_list is not None:
for mapping in mapping_list.findall("mapping"):
if mapping.text is None:
continue
@ -176,7 +176,7 @@ class AniDBClient(object):
episode_elements = xml_root.find('episodes')
if not episode_elements:
if episode_elements is None:
raise ValueError
return etree.tostring(episode_elements, encoding='utf8', method='xml')

@ -43,10 +43,15 @@ class AniListClient(object):
logger.debug(f"Based on '{mapped_tag}': '{candidate_id_value}', anime-list matched: {obj}")
if len(obj) > 0:
return obj[0]["anilist_id"]
anilist_id = obj[0].get("anilist_id")
if not anilist_id:
logger.error("This entry does not have an AniList ID")
return anilist_id
else:
logger.debug(f"Could not find corresponding AniList ID with '{mapped_tag}': {candidate_id_value}")
return None
return None
def refine_from_anilist(path, video):

@ -40,10 +40,8 @@ def refine_from_db(path, video):
if data:
video.series = _TITLE_RE.sub('', data.seriesTitle)
if not video.season and data.season:
video.season = int(data.season)
if not video.episode and data.episode:
video.episode = int(data.episode)
video.season = int(data.season)
video.episode = int(data.episode)
video.title = data.episodeTitle
# Only refine year as a fallback

@ -2,8 +2,10 @@
# fmt: off
import logging
import json
from subliminal import Movie
from guessit.jsonutils import GuessitEncoder
from utilities.path_mappings import path_mappings
from app.database import TableEpisodes, TableMovies, database, select
@ -37,10 +39,12 @@ def refine_from_ffprobe(path, video):
return video
if data['ffprobe']:
logging.debug('FFprobe found: %s', data['ffprobe'])
logging.debug('FFprobe found: %s', json.dumps(data['ffprobe'], cls=GuessitEncoder, indent=4,
ensure_ascii=False))
parser_data = data['ffprobe']
elif data['mediainfo']:
logging.debug('Mediainfo found: %s', data['mediainfo'])
logging.debug('Mediainfo found: %s', json.dumps(data['mediainfo'], cls=GuessitEncoder, indent=4,
ensure_ascii=False))
parser_data = data['mediainfo']
else:
parser_data = {}

@ -3,8 +3,10 @@
import logging
import gc
import os
from app.config import settings
from app.event_handler import show_progress, hide_progress
from subtitles.tools.subsyncer import SubSyncer
@ -40,7 +42,22 @@ def sync_subtitles(video_path, srt_path, srt_lang, forced, hi, percent_score, so
'sonarr_episode_id': sonarr_episode_id,
'radarr_id': radarr_id,
}
subsync.sync(**sync_kwargs)
subtitles_filename = os.path.basename(srt_path)
show_progress(id=f'subsync_{subtitles_filename}',
header='Syncing Subtitle',
name=srt_path,
value=0,
count=1)
try:
subsync.sync(**sync_kwargs)
except Exception:
hide_progress(id=f'subsync_{subtitles_filename}')
else:
show_progress(id=f'subsync_{subtitles_filename}',
header='Syncing Subtitle',
name=srt_path,
value=1,
count=1)
del subsync
gc.collect()
return True

@ -94,12 +94,20 @@ def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, fo
for i, line in enumerate(translated_lines):
lines_list[line['id']] = line['line']
hide_progress(id=f'translate_progress_{dest_srt_file}')
show_progress(id=f'translate_progress_{dest_srt_file}',
header=f'Translating subtitles lines to {language_from_alpha3(to_lang)}...',
name='',
value=lines_list_len,
count=lines_list_len)
logging.debug(f'BAZARR saving translated subtitles to {dest_srt_file}')
for i, line in enumerate(subs):
try:
line.plaintext = lines_list[i]
if lines_list[i]:
line.plaintext = lines_list[i]
else:
# we assume that there was nothing to translate if Google returns None. ex.: "♪♪"
continue
except IndexError:
logging.error(f'BAZARR is unable to translate malformed subtitles: {source_srt_file}')
return False

@ -44,8 +44,8 @@ def upgrade_subtitles():
'sonarrSeriesId': x.sonarrSeriesId,
'subtitles_path': x.subtitles_path,
'path': x.path,
'profileId': x.profileId,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
'upgradable': bool(x.upgradable),
} for x in database.execute(
select(TableHistory.id,
TableShows.title.label('seriesTitle'),
@ -62,22 +62,30 @@ def upgrade_subtitles():
TableHistory.subtitles_path,
TableEpisodes.path,
TableShows.profileId,
TableEpisodes.subtitles.label('external_subtitles'),
episodes_to_upgrade.c.id.label('upgradable'))
TableEpisodes.subtitles.label('external_subtitles'))
.select_from(TableHistory)
.join(TableShows, onclause=TableHistory.sonarrSeriesId == TableShows.sonarrSeriesId)
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId)
.join(episodes_to_upgrade, onclause=TableHistory.id == episodes_to_upgrade.c.id, isouter=True)
.where(episodes_to_upgrade.c.id.is_not(None)))
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId))
.all() if _language_still_desired(x.language, x.profileId) and
x.subtitles_path in x.external_subtitles and
x.video_path == x.path
]
for item in episodes_data:
# do not consider subtitles that do not exist on disk anymore
if item['subtitles_path'] not in item['external_subtitles']:
continue
# Mark upgradable and get original_id
item.update({'original_id': episodes_to_upgrade.get(item['id'])})
item.update({'upgradable': bool(item['original_id'])})
# cleanup the unused attributes
del item['path']
del item['external_subtitles']
# Make sure to keep only upgradable episode subtitles
episodes_data = [x for x in episodes_data if 'upgradable' in x and x['upgradable']]
count_episode_to_upgrade = len(episodes_data)
for i, episode in enumerate(episodes_data):
@ -94,6 +102,8 @@ def upgrade_subtitles():
return
language, is_forced, is_hi = parse_language_string(episode['language'])
if is_hi and not _is_hi_required(language, episode['profileId']):
is_hi = 'False'
audio_language_list = get_audio_profile_languages(episode['audio_language'])
if len(audio_language_list) > 0:
@ -107,6 +117,7 @@ def upgrade_subtitles():
str(episode['sceneName']),
episode['seriesTitle'],
'series',
episode['profileId'],
forced_minimum_score=int(episode['score']),
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace(
@ -118,14 +129,20 @@ def upgrade_subtitles():
if isinstance(result, tuple) and len(result):
result = result[0]
store_subtitles(episode['video_path'], path_mappings.path_replace(episode['video_path']))
history_log(3, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], result)
history_log(3, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], result,
upgraded_from_id=episode['original_id'])
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], result.message)
hide_progress(id='upgrade_episodes_progress')
show_progress(id='upgrade_episodes_progress',
header='Upgrading episodes subtitles...',
name='',
value=count_episode_to_upgrade,
count=count_episode_to_upgrade)
if use_radarr:
movies_to_upgrade = get_upgradable_movies_subtitles()
movies_data = [{
'id': x.id,
'title': x.title,
'language': x.language,
'audio_language': x.audio_language,
@ -134,11 +151,12 @@ def upgrade_subtitles():
'score': x.score,
'radarrId': x.radarrId,
'path': x.path,
'profileId': x.profileId,
'subtitles_path': x.subtitles_path,
'external_subtitles': [y[1] for y in ast.literal_eval(x.external_subtitles) if y[1]],
'upgradable': bool(x.upgradable),
} for x in database.execute(
select(TableMovies.title,
select(TableHistoryMovie.id,
TableMovies.title,
TableHistoryMovie.language,
TableMovies.audio_language,
TableHistoryMovie.video_path,
@ -148,21 +166,29 @@ def upgrade_subtitles():
TableHistoryMovie.subtitles_path,
TableMovies.path,
TableMovies.profileId,
TableMovies.subtitles.label('external_subtitles'),
movies_to_upgrade.c.id.label('upgradable'))
TableMovies.subtitles.label('external_subtitles'))
.select_from(TableHistoryMovie)
.join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId)
.join(movies_to_upgrade, onclause=TableHistoryMovie.id == movies_to_upgrade.c.id, isouter=True)
.where(movies_to_upgrade.c.id.is_not(None)))
.join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId))
.all() if _language_still_desired(x.language, x.profileId) and
x.subtitles_path in x.external_subtitles and
x.video_path == x.path
]
for item in movies_data:
# do not consider subtitles that do not exist on disk anymore
if item['subtitles_path'] not in item['external_subtitles']:
continue
# Mark upgradable and get original_id
item.update({'original_id': movies_to_upgrade.get(item['id'])})
item.update({'upgradable': bool(item['original_id'])})
# cleanup the unused attributes
del item['path']
del item['external_subtitles']
# Make sure to keep only upgradable movie subtitles
movies_data = [x for x in movies_data if 'upgradable' in x and x['upgradable']]
count_movie_to_upgrade = len(movies_data)
for i, movie in enumerate(movies_data):
@ -179,6 +205,8 @@ def upgrade_subtitles():
return
language, is_forced, is_hi = parse_language_string(movie['language'])
if is_hi and not _is_hi_required(language, movie['profileId']):
is_hi = 'False'
audio_language_list = get_audio_profile_languages(movie['audio_language'])
if len(audio_language_list) > 0:
@ -192,6 +220,7 @@ def upgrade_subtitles():
str(movie['sceneName']),
movie['title'],
'movie',
movie['profileId'],
forced_minimum_score=int(movie['score']),
is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie(
@ -203,10 +232,14 @@ def upgrade_subtitles():
result = result[0]
store_subtitles_movie(movie['video_path'],
path_mappings.path_replace_movie(movie['video_path']))
history_log_movie(3, movie['radarrId'], result)
history_log_movie(3, movie['radarrId'], result, upgraded_from_id=movie['original_id'])
send_notifications_movie(movie['radarrId'], result.message)
hide_progress(id='upgrade_movies_progress')
show_progress(id='upgrade_movies_progress',
header='Upgrading movies subtitles...',
name='',
value=count_movie_to_upgrade,
count=count_movie_to_upgrade)
logging.info('BAZARR Finished searching for Subtitles to upgrade. Check History for more information.')
@ -243,10 +276,10 @@ def parse_language_string(language_string):
def get_upgradable_episode_subtitles():
if not settings.general.upgrade_subs:
# return an empty set of rows
return select(TableHistory.id) \
.where(TableHistory.id.is_(None)) \
.subquery()
logging.debug("Subtitles upgrade is disabled so we wont go further.")
return {}
logging.debug("Determining upgradable episode subtitles")
max_id_timestamp = select(TableHistory.video_path,
TableHistory.language,
func.max(TableHistory.timestamp).label('timestamp')) \
@ -255,31 +288,76 @@ def get_upgradable_episode_subtitles():
.subquery()
minimum_timestamp, query_actions = get_queries_condition_parameters()
logging.debug(f"Minimum timestamp used for subtitles upgrade: {minimum_timestamp}")
logging.debug(f"These actions are considered for subtitles upgrade: {query_actions}")
upgradable_episodes_conditions = [(TableHistory.action.in_(query_actions)),
(TableHistory.timestamp > minimum_timestamp),
TableHistory.score.is_not(None),
(TableHistory.score < 357)]
upgradable_episodes_conditions += get_exclusion_clause('series')
return select(TableHistory.id)\
.select_from(TableHistory) \
subtitles_to_upgrade = database.execute(
select(TableHistory.id,
TableHistory.video_path,
TableHistory.language,
TableHistory.upgradedFromId)
.select_from(TableHistory)
.join(TableShows, onclause=TableHistory.sonarrSeriesId == TableShows.sonarrSeriesId)
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId)
.join(max_id_timestamp, onclause=and_(TableHistory.video_path == max_id_timestamp.c.video_path,
TableHistory.language == max_id_timestamp.c.language,
max_id_timestamp.c.timestamp == TableHistory.timestamp)) \
.join(TableShows, onclause=TableHistory.sonarrSeriesId == TableShows.sonarrSeriesId) \
.join(TableEpisodes, onclause=TableHistory.sonarrEpisodeId == TableEpisodes.sonarrEpisodeId) \
.where(reduce(operator.and_, upgradable_episodes_conditions)) \
.order_by(TableHistory.timestamp.desc())\
.subquery()
max_id_timestamp.c.timestamp == TableHistory.timestamp))
.where(reduce(operator.and_, upgradable_episodes_conditions))
.order_by(TableHistory.timestamp.desc())) \
.all()
logging.debug(f"{len(subtitles_to_upgrade)} subtitles are candidates and we've selected the latest timestamp for "
f"each of them.")
query_actions_without_upgrade = [x for x in query_actions if x != 3]
upgradable_episode_subtitles = {}
for subtitle_to_upgrade in subtitles_to_upgrade:
# check if we have the original subtitles id in database and use it instead of guessing
if subtitle_to_upgrade.upgradedFromId:
upgradable_episode_subtitles.update({subtitle_to_upgrade.id: subtitle_to_upgrade.upgradedFromId})
logging.debug(f"The original subtitles ID for TableHistory ID {subtitle_to_upgrade.id} stored in DB is: "
f"{subtitle_to_upgrade.upgradedFromId}")
continue
# if not, we have to try to guess the original subtitles id
logging.debug("We don't have the original subtitles ID for this subtitle so we'll have to guess it.")
potential_parents = database.execute(
select(TableHistory.id, TableHistory.action)
.where(TableHistory.video_path == subtitle_to_upgrade.video_path,
TableHistory.language == subtitle_to_upgrade.language,)
.order_by(TableHistory.timestamp.desc())
).all()
logging.debug(f"The potential original subtitles IDs for TableHistory ID {subtitle_to_upgrade.id} are: "
f"{[x.id for x in potential_parents]}")
confirmed_parent = None
for potential_parent in potential_parents:
if potential_parent.action in query_actions_without_upgrade:
confirmed_parent = potential_parent.id
logging.debug(f"This ID is the first one to match selected query actions so it's been selected as "
f"original subtitles ID: {potential_parent.id}")
break
if confirmed_parent not in upgradable_episode_subtitles.values():
logging.debug("We haven't defined this ID as original subtitles ID for any other ID so we'll add it to "
"upgradable episode subtitles.")
upgradable_episode_subtitles.update({subtitle_to_upgrade.id: confirmed_parent})
logging.debug(f"We've found {len(upgradable_episode_subtitles)} episode subtitles IDs to be upgradable")
return upgradable_episode_subtitles
def get_upgradable_movies_subtitles():
if not settings.general.upgrade_subs:
# return an empty set of rows
return select(TableHistoryMovie.id) \
.where(TableHistoryMovie.id.is_(None)) \
.subquery()
logging.debug("Subtitles upgrade is disabled so we won't go further.")
return {}
logging.debug("Determining upgradable movie subtitles")
max_id_timestamp = select(TableHistoryMovie.video_path,
TableHistoryMovie.language,
func.max(TableHistoryMovie.timestamp).label('timestamp')) \
@ -288,21 +366,66 @@ def get_upgradable_movies_subtitles():
.subquery()
minimum_timestamp, query_actions = get_queries_condition_parameters()
logging.debug(f"Minimum timestamp used for subtitles upgrade: {minimum_timestamp}")
logging.debug(f"These actions are considered for subtitles upgrade: {query_actions}")
upgradable_movies_conditions = [(TableHistoryMovie.action.in_(query_actions)),
(TableHistoryMovie.timestamp > minimum_timestamp),
TableHistoryMovie.score.is_not(None),
(TableHistoryMovie.score < 117)]
upgradable_movies_conditions += get_exclusion_clause('movie')
return select(TableHistoryMovie.id) \
.select_from(TableHistoryMovie) \
subtitles_to_upgrade = database.execute(
select(TableHistoryMovie.id,
TableHistoryMovie.video_path,
TableHistoryMovie.language,
TableHistoryMovie.upgradedFromId)
.select_from(TableHistoryMovie)
.join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId)
.join(max_id_timestamp, onclause=and_(TableHistoryMovie.video_path == max_id_timestamp.c.video_path,
TableHistoryMovie.language == max_id_timestamp.c.language,
max_id_timestamp.c.timestamp == TableHistoryMovie.timestamp)) \
.join(TableMovies, onclause=TableHistoryMovie.radarrId == TableMovies.radarrId) \
.where(reduce(operator.and_, upgradable_movies_conditions)) \
.order_by(TableHistoryMovie.timestamp.desc()) \
.subquery()
max_id_timestamp.c.timestamp == TableHistoryMovie.timestamp))
.where(reduce(operator.and_, upgradable_movies_conditions))
.order_by(TableHistoryMovie.timestamp.desc())) \
.all()
logging.debug(f"{len(subtitles_to_upgrade)} subtitles are candidates and we've selected the latest timestamp for "
f"each of them.")
query_actions_without_upgrade = [x for x in query_actions if x != 3]
upgradable_movie_subtitles = {}
for subtitle_to_upgrade in subtitles_to_upgrade:
# check if we have the original subtitles id in database and use it instead of guessing
if subtitle_to_upgrade.upgradedFromId:
upgradable_movie_subtitles.update({subtitle_to_upgrade.id: subtitle_to_upgrade.upgradedFromId})
logging.debug(f"The original subtitles ID for TableHistoryMovie ID {subtitle_to_upgrade.id} stored in DB "
f"is: {subtitle_to_upgrade.upgradedFromId}")
continue
# if not, we have to try to guess the original subtitles id
logging.debug("We don't have the original subtitles ID for this subtitle so we'll have to guess it.")
potential_parents = database.execute(
select(TableHistoryMovie.id, TableHistoryMovie.action)
.where(TableHistoryMovie.video_path == subtitle_to_upgrade.video_path,
TableHistoryMovie.language == subtitle_to_upgrade.language, )
.order_by(TableHistoryMovie.timestamp.desc())
).all()
logging.debug(f"The potential original subtitles IDs for TableHistoryMovie ID {subtitle_to_upgrade.id} are: "
f"{[x.id for x in potential_parents]}")
confirmed_parent = None
for potential_parent in potential_parents:
if potential_parent.action in query_actions_without_upgrade:
confirmed_parent = potential_parent.id
logging.debug(f"This ID is the newest one to match selected query actions so it's been selected as "
f"original subtitles ID: {potential_parent.id}")
break
if confirmed_parent not in upgradable_movie_subtitles.values():
logging.debug("We haven't defined this ID as original subtitles ID for any other ID so we'll add it to "
"upgradable episode subtitles.")
upgradable_movie_subtitles.update({subtitle_to_upgrade.id: confirmed_parent})
logging.debug(f"We've found {len(upgradable_movie_subtitles)} movie subtitles IDs to be upgradable")
return upgradable_movie_subtitles
def _language_still_desired(language, profile_id):
@ -327,3 +450,11 @@ def _language_from_items(items):
results.append(item['language'])
results.append(f'{item["language"]}:hi')
return results
def _is_hi_required(language, profile_id):
profile = get_profiles_list(profile_id=profile_id)
for item in profile['items']:
if language.split(':')[0] == item['language'] and item['hi'] == 'True':
return True
return False

@ -3,9 +3,11 @@
import logging
import os
import json
from subzero.language import Language
from subzero.video import parse_video
from guessit.jsonutils import GuessitEncoder
from app.config import settings
from languages.custom_lang import CustomLanguage
@ -26,33 +28,31 @@ def get_video(path, title, sceneName, providers=None, media_type="movie"):
:return: `Video` instance
"""
hints = {"title": title, "type": "movie" if media_type == "movie" else "episode"}
used_scene_name = False
original_path = path
original_name = os.path.basename(path)
hash_from = None
if sceneName != "None":
# use the sceneName but keep the folder structure for better guessing
path = os.path.join(os.path.dirname(path), sceneName + os.path.splitext(path)[1])
used_scene_name = True
hash_from = original_path
try:
logging.debug(f'BAZARR guessing video object using video file path: {path}')
skip_hashing = settings.general.skip_hashing
video = parse_video(path, hints=hints, skip_hashing=skip_hashing, dry_run=used_scene_name, providers=providers,
hash_from=hash_from)
video.used_scene_name = used_scene_name
video.original_name = original_name
video.original_path = original_path
video = parse_video(path, hints=hints, skip_hashing=skip_hashing, dry_run=False, providers=providers)
if sceneName != "None":
# refine the video object using the sceneName and update the video object accordingly
scenename_with_extension = sceneName + os.path.splitext(path)[1]
logging.debug(f'BAZARR guessing video object using scene name: {scenename_with_extension}')
scenename_video = parse_video(scenename_with_extension, hints=hints, dry_run=True)
refine_video_with_scenename(initial_video=video, scenename_video=scenename_video)
logging.debug('BAZARR resulting video object once refined using scene name: %s',
json.dumps(vars(video), cls=GuessitEncoder, indent=4, ensure_ascii=False))
for key, refiner in registered_refiners.items():
logging.debug("Running refiner: %s", key)
refiner(original_path, video)
refiner(path, video)
logging.debug('BAZARR is using these video object properties: %s', vars(video))
logging.debug('BAZARR is using these video object properties: %s', json.dumps(vars(video),
cls=GuessitEncoder, indent=4,
ensure_ascii=False))
return video
except Exception as error:
logging.exception("BAZARR Error (%s) trying to get video information for this file: %s", error, original_path)
logging.exception("BAZARR Error (%s) trying to get video information for this file: %s", error, path)
def _get_download_code3(subtitle):
@ -100,3 +100,10 @@ def _set_forced_providers(pool, also_forced=False, forced_required=False):
"opensubtitles": {'also_foreign': also_forced, "only_foreign": forced_required}
}
)
def refine_video_with_scenename(initial_video, scenename_video):
for key, value in vars(scenename_video).items():
if value and getattr(initial_video, key) in [None, (), {}, []]:
setattr(initial_video, key, value)
return initial_video

@ -8,7 +8,7 @@ import operator
from functools import reduce
from utilities.path_mappings import path_mappings
from subtitles.indexer.movies import store_subtitles_movie
from subtitles.indexer.movies import store_subtitles_movie, list_missing_subtitles_movies
from radarr.history import history_log_movie
from app.notifier import send_notifications_movie
from app.get_providers import get_providers
@ -50,6 +50,7 @@ def _wanted_movie(movie):
str(movie.sceneName),
movie.title,
'movie',
movie.profileId,
check_if_still_required=True):
if result:
@ -62,29 +63,41 @@ def _wanted_movie(movie):
def wanted_download_subtitles_movie(radarr_id):
movies_details = database.execute(
select(TableMovies.path,
TableMovies.missing_subtitles,
TableMovies.radarrId,
TableMovies.audio_language,
TableMovies.sceneName,
TableMovies.failedAttempts,
TableMovies.title)
.where(TableMovies.radarrId == radarr_id)) \
.all()
for movie in movies_details:
providers_list = get_providers()
if providers_list:
_wanted_movie(movie)
else:
logging.info("BAZARR All providers are throttled")
break
stmt = select(TableMovies.path,
TableMovies.missing_subtitles,
TableMovies.radarrId,
TableMovies.audio_language,
TableMovies.sceneName,
TableMovies.failedAttempts,
TableMovies.title,
TableMovies.profileId,
TableMovies.subtitles) \
.where(TableMovies.radarrId == radarr_id)
movie = database.execute(stmt).first()
if not movie:
logging.debug(f"BAZARR no movie with that radarrId can be found in database: {radarr_id}")
return
elif movie.subtitles is None:
# subtitles indexing for this movie is incomplete, we'll do it again
store_subtitles_movie(movie.path, path_mappings.path_replace_movie(movie.path))
movie = database.execute(stmt).first()
elif movie.missing_subtitles is None:
# missing subtitles calculation for this movie is incomplete, we'll do it again
list_missing_subtitles_movies(no=radarr_id)
movie = database.execute(stmt).first()
providers_list = get_providers()
if providers_list:
_wanted_movie(movie)
else:
logging.info("BAZARR All providers are throttled")
def wanted_search_missing_subtitles_movies():
conditions = [(TableMovies.missing_subtitles != '[]')]
conditions = [(TableMovies.missing_subtitles.is_not(None)),
(TableMovies.missing_subtitles != '[]')]
conditions += get_exclusion_clause('movie')
movies = database.execute(
select(TableMovies.radarrId,
@ -109,6 +122,10 @@ def wanted_search_missing_subtitles_movies():
logging.info("BAZARR All providers are throttled")
break
hide_progress(id='wanted_movies_progress')
show_progress(id='wanted_movies_progress',
header='Searching subtitles...',
name="",
value=count_movies,
count=count_movies)
logging.info('BAZARR Finished searching for missing Movies Subtitles. Check History for more information.')

@ -8,6 +8,7 @@ import operator
from functools import reduce
from utilities.path_mappings import path_mappings
from subtitles.indexer.series import store_subtitles, list_missing_subtitles
from subtitles.indexer.series import store_subtitles
from sonarr.history import history_log
from app.notifier import send_notifications
@ -51,6 +52,7 @@ def _wanted_episode(episode):
str(episode.sceneName),
episode.title,
'series',
episode.profileId,
check_if_still_required=True):
if result:
if isinstance(result, tuple) and len(result):
@ -63,32 +65,44 @@ def _wanted_episode(episode):
def wanted_download_subtitles(sonarr_episode_id):
episodes_details = database.execute(
select(TableEpisodes.path,
TableEpisodes.missing_subtitles,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.sonarrSeriesId,
TableEpisodes.audio_language,
TableEpisodes.sceneName,
TableEpisodes.failedAttempts,
TableShows.title)
.select_from(TableEpisodes)
.join(TableShows)
.where((TableEpisodes.sonarrEpisodeId == sonarr_episode_id))) \
.all()
for episode in episodes_details:
providers_list = get_providers()
if providers_list:
_wanted_episode(episode)
else:
logging.info("BAZARR All providers are throttled")
break
stmt = select(TableEpisodes.path,
TableEpisodes.missing_subtitles,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.sonarrSeriesId,
TableEpisodes.audio_language,
TableEpisodes.sceneName,
TableEpisodes.failedAttempts,
TableShows.title,
TableShows.profileId,
TableEpisodes.subtitles) \
.select_from(TableEpisodes) \
.join(TableShows) \
.where((TableEpisodes.sonarrEpisodeId == sonarr_episode_id))
episode_details = database.execute(stmt).first()
if not episode_details:
logging.debug(f"BAZARR no episode with that sonarrId can be found in database: {sonarr_episode_id}")
return
elif episode_details.subtitles is None:
# subtitles indexing for this episode is incomplete, we'll do it again
store_subtitles(episode_details.path, path_mappings.path_replace(episode_details.path))
episode_details = database.execute(stmt).first()
elif episode_details.missing_subtitles is None:
# missing subtitles calculation for this episode is incomplete, we'll do it again
list_missing_subtitles(epno=sonarr_episode_id)
episode_details = database.execute(stmt).first()
providers_list = get_providers()
if providers_list:
_wanted_episode(episode_details)
else:
logging.info("BAZARR All providers are throttled")
def wanted_search_missing_subtitles_series():
conditions = [(TableEpisodes.missing_subtitles != '[]')]
conditions = [(TableEpisodes.missing_subtitles.is_not(None)),
(TableEpisodes.missing_subtitles != '[]')]
conditions += get_exclusion_clause('series')
episodes = database.execute(
select(TableEpisodes.sonarrSeriesId,
@ -120,6 +134,10 @@ def wanted_search_missing_subtitles_series():
logging.info("BAZARR All providers are throttled")
break
hide_progress(id='wanted_episodes_progress')
show_progress(id='wanted_episodes_progress',
header='Searching subtitles...',
name='',
value=count_episodes,
count=count_episodes)
logging.info('BAZARR Finished searching for missing Series Subtitles. Check History for more information.')

@ -2,8 +2,11 @@
import json
from sqlalchemy import func
from app.config import settings
from app.database import TableShowsRootfolder, TableMoviesRootfolder, TableLanguagesProfiles, database, select
from app.database import (TableShowsRootfolder, TableMoviesRootfolder, TableLanguagesProfiles, database, select,
TableShows, TableMovies)
from app.event_handler import event_stream
from .path_mappings import path_mappings
from sonarr.rootfolder import check_sonarr_rootfolder
@ -66,4 +69,19 @@ def get_health_issues():
else:
languages_profile_ids.append(items['id'])
# check if there's at least one languages profile created
languages_profiles_count = database.execute(select(func.count(TableLanguagesProfiles.profileId))).scalar()
series_with_profile = database.execute(select(func.count(TableShows.sonarrSeriesId))
.where(TableShows.profileId.is_not(None))).scalar()
movies_with_profile = database.execute(select(func.count(TableMovies.radarrId))
.where(TableMovies.profileId.is_not(None))).scalar()
if languages_profiles_count == 0:
health_issues.append({'object': 'Missing languages profile',
'issue': 'You must create at least one languages profile and assign it to your content.'})
elif languages_profiles_count > 0 and ((settings.general.use_sonarr and series_with_profile == 0) or
(settings.general.use_radarr and movies_with_profile == 0)):
health_issues.append({'object': 'No assigned languages profile',
'issue': 'Although you have created at least one languages profile, you must assign it '
'to your content.'})
return health_issues

@ -11,7 +11,9 @@ from app.config import settings
def check_credentials(user, pw, request, log_success=True):
ip_addr = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
forwarded_for_ip_addr = request.environ.get('HTTP_X_FORWARDED_FOR')
real_ip_addr = request.environ.get('HTTP_X_REAL_IP')
ip_addr = forwarded_for_ip_addr or real_ip_addr or request.remote_addr
username = settings.auth.username
password = settings.auth.password
if hashlib.md5(f"{pw}".encode('utf-8')).hexdigest() == password and user == username:

@ -67,8 +67,10 @@ class NapiProjektProvider(Provider):
server_url = 'http://napiprojekt.pl/unit_napisy/dl.php'
subtitle_class = NapiProjektSubtitle
def __init__(self):
def __init__(self, only_authors=None, only_real_names=None):
self.session = None
self.only_authors = only_authors
self.only_real_names = only_real_names
def initialize(self):
self.session = Session()
@ -78,6 +80,8 @@ class NapiProjektProvider(Provider):
self.session.close()
def query(self, language, hash):
if self.only_authors or self.only_real_names:
return None
params = {
'v': 'dreambox',
'kolejka': 'false',

@ -524,7 +524,7 @@ class SZProviderPool(ProviderPool):
return True
def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False,
compute_score=None):
compute_score=None, use_original_format=False):
"""Download the best matching subtitles.
patch:
@ -543,6 +543,7 @@ class SZProviderPool(ProviderPool):
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:param bool use_original_format: preserve original subtitles format
:return: downloaded subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
@ -620,6 +621,9 @@ class SZProviderPool(ProviderPool):
subtitle, score)
continue
# make sure to preserve original subtitles format if requested
subtitle.use_original_format = use_original_format
# download
logger.debug("%r: Trying to download subtitle with matches %s, score: %s; release(s): %s", subtitle,
matches, score, subtitle.release_info)
@ -1213,10 +1217,10 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
continue
# create subtitle path
if subtitle.text and parse_for_hi_regex(subtitle_text=subtitle.text,
alpha3_language=subtitle.language.alpha3 if
(hasattr(subtitle, 'language') and hasattr(subtitle.language, 'alpha3'))
else None):
if (subtitle.text and subtitle.format == 'srt' and (hasattr(subtitle.language, 'hi') and
not subtitle.language.hi) and
parse_for_hi_regex(subtitle_text=subtitle.text, alpha3_language=subtitle.language.alpha3 if
(hasattr(subtitle, 'language') and hasattr(subtitle.language, 'alpha3')) else None)):
subtitle.language.hi = True
subtitle_path = get_subtitle_path(file_path, None if single else subtitle.language,
forced_tag=subtitle.language.forced,

@ -50,6 +50,7 @@ def download_best_subtitles(
hearing_impaired=False,
only_one=False,
compute_score=None,
use_original_format=False,
**kwargs
):
downloaded_subtitles = defaultdict(list)
@ -77,6 +78,7 @@ def download_best_subtitles(
hearing_impaired=hearing_impaired,
only_one=only_one,
compute_score=compute_score,
use_original_format=use_original_format,
)
logger.info("Downloaded %d subtitle(s)", len(subtitles))
downloaded_subtitles[video].extend(subtitles)

@ -11,7 +11,7 @@ from time import sleep
from math import ceil
from subliminal import Movie, Episode
from subliminal.exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, ProviderError
from subliminal.exceptions import ConfigurationError, ProviderError
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.subtitle import fix_line_ending
from subliminal_patch.providers import Provider
@ -104,7 +104,7 @@ class AssrtSubtitle(Subtitle):
if 'subtitle_language' in guess:
langs.update(guess['subtitle_language'])
if self.language in langs:
self._defail = f
self._detail = f
return f
# second pass: keyword matching
@ -112,7 +112,7 @@ class AssrtSubtitle(Subtitle):
for f in files:
langs = set([Language.fromassrt(k) for k in codes if k in f['f']])
if self.language in langs:
self._defail = f
self._detail = f
return f
# fallback: pick up first file if nothing matches

@ -255,8 +255,6 @@ class EmbeddedSubtitlesProvider(Provider):
class _MemoizedFFprobeVideoContainer(FFprobeVideoContainer):
# 128 is the default value for maxsize since Python 3.8. We ste it here for previous versions.
@functools.lru_cache(maxsize=128)
def get_subtitles(self, *args, **kwargs):
return super().get_subtitles(*args, **kwargs)
@ -287,7 +285,7 @@ def _check_hi_fallback(streams, languages):
logger.debug("Checking HI fallback for '%r' language", language)
streams_ = [
stream for stream in streams if stream.language.alpha3 == language.alpha3
stream for stream in streams if stream.language.alpha3 == language.alpha3 and stream.language.forced == language.forced
]
if len(streams_) == 1 and streams_[0].disposition.hearing_impaired:
stream_ = streams_[0]

@ -1,6 +1,7 @@
# coding=utf-8
from __future__ import absolute_import
import logging
import re
from subliminal.providers.napiprojekt import NapiProjektProvider as _NapiProjektProvider, \
NapiProjektSubtitle as _NapiProjektSubtitle, get_subhash
@ -40,6 +41,11 @@ class NapiProjektProvider(_NapiProjektProvider):
video_types = (Episode, Movie)
subtitle_class = NapiProjektSubtitle
def __init__(self, only_authors=None, only_real_names=None):
super().__init__()
self.only_authors = only_authors
self.only_real_names = only_real_names
def query(self, language, hash):
params = {
'v': 'dreambox',
@ -66,10 +72,23 @@ class NapiProjektProvider(_NapiProjektProvider):
return subtitle
def list_subtitles(self, video, languages):
def flatten(l):
return [item for sublist in l for item in sublist]
return [s for s in [self.query(l, video.hashes['napiprojekt']) for l in languages] if s is not None] + \
flatten([self._scrape(video, l) for l in languages])
def flatten(nested_list):
"""Flatten a nested list."""
return [item for sublist in nested_list for item in sublist]
# Determine the source of subtitles based on conditions
hash_subtitles = []
if not (self.only_authors or self.only_real_names):
hash_subtitles = [
subtitle
for language in languages
if (subtitle := self.query(language, video.hashes.get('napiprojekt'))) is not None
]
# Scrape additional subtitles
scraped_subtitles = flatten([self._scrape(video, language) for language in languages])
return hash_subtitles + scraped_subtitles
def download_subtitle(self, subtitle):
if subtitle.content is not None:
@ -80,7 +99,8 @@ class NapiProjektProvider(_NapiProjektProvider):
if language.alpha2 != 'pl':
return []
title, matches = self._find_title(video)
if title == None:
if title is None:
return []
episode = f'-s{video.season:02d}e{video.episode:02d}' if isinstance(
video, Episode) else ''
@ -89,14 +109,59 @@ class NapiProjektProvider(_NapiProjektProvider):
response.raise_for_status()
soup = BeautifulSoup(response.content, 'html.parser')
subtitles = []
for link in soup.find_all('a'):
if 'class' in link.attrs and 'tableA' in link.attrs['class']:
hash = link.attrs['href'][len('napiprojekt:'):]
subtitles.append(
NapiProjektSubtitle(language,
hash,
release_info=str(link.contents[0]),
matches=matches | ({'season', 'episode'} if episode else set())))
# Find all rows with titles and napiprojekt links
rows = soup.find_all("tr", title=True)
for row in rows:
for link in row.find_all('a'):
if 'class' in link.attrs and 'tableA' in link.attrs['class']:
title = row['title']
hash = link.attrs['href'][len('napiprojekt:'):]
data = row.find_all('p')
size = data[1].contents[0] if len(data) > 1 and data[1].contents else ""
length = data[3].contents[0] if len(data) > 3 and data[3].contents else ""
author = data[4].contents[0] if len(data) > 4 and data[4].contents else ""
added = data[5].contents[0] if len(data) > 5 and data[5].contents else ""
if author == "":
match = re.search(r"<b>Autor:</b> (.*?)\(", title)
print(title)
if match:
author = match.group(1).strip()
else:
author = ""
if self.only_authors:
if author.lower() in ["brak", "automat", "si", "chatgpt", "ai", "robot", "maszynowe", "tłumaczenie maszynowe"]:
continue
if self.only_real_names:
# Check if `self.only_authors` contains exactly 2 uppercase letters and at least one lowercase letter
if not (re.match(r'^(?=(?:.*[A-Z]){2})(?=.*[a-z]).*$', author) or
re.match(r'^\w+\s\w+$', author)):
continue
match = re.search(r"<b>Video rozdzielczość:</b> (.*?)<", title)
if match:
resolution = match.group(1).strip()
else:
resolution = ""
match = re.search(r"<b>Video FPS:</b> (.*?)<", title)
if match:
fps = match.group(1).strip()
else:
fps = ""
added_lenght = "Autor: " + author + " | " + resolution + " | " + fps + " | " + size + " | " + added + " | " + length
subtitles.append(
NapiProjektSubtitle(language,
hash,
release_info=added_lenght,
matches=matches | ({'season', 'episode'} if episode else set())))
logger.debug(f'Found subtitles {subtitles}')
return subtitles
@ -114,15 +179,17 @@ class NapiProjektProvider(_NapiProjektProvider):
video, Episode) else video.imdb_id
def match_title_tag(
tag): return tag.name == 'a' and 'class' in tag.attrs and 'movieTitleCat' in tag.attrs['class'] and 'href' in tag.attrs
tag):
return tag.name == 'a' and 'class' in tag.attrs and 'movieTitleCat' in tag.attrs[
'class'] and 'href' in tag.attrs
if imdb_id:
for entry in soup.find_all(lambda tag: tag.name == 'div' and 'greyBoxCatcher' in tag['class']):
if entry.find_all(href=lambda href: href and href.startswith(f'https://www.imdb.com/title/{imdb_id}')):
for link in entry.find_all(match_title_tag):
return link.attrs['href'][len('napisy-'):], \
{'series', 'year', 'series_imdb_id'} if isinstance(
video, Episode) else {'title', 'year', 'imdb_id'}
{'series', 'year', 'series_imdb_id'} if isinstance(
video, Episode) else {'title', 'year', 'imdb_id'}
type = 'episode' if isinstance(video, Episode) else 'movie'
for link in soup.find_all(match_title_tag):

@ -3,7 +3,6 @@ from __future__ import absolute_import
import base64
import logging
import os
import traceback
import re
import zlib
import time
@ -411,6 +410,8 @@ def checked(fn, raise_api_limit=False):
except requests.RequestException as e:
status_code = e.response.status_code
if status_code == 503 and "Server under maintenance" in e.response.text:
status_code = 506
else:
status_code = int(response['status'][:3])
except:
@ -437,6 +438,8 @@ def checked(fn, raise_api_limit=False):
raise APIThrottled
if status_code == 503:
raise ServiceUnavailable(str(status_code))
if status_code == 506:
raise ServiceUnavailable("Server under maintenance")
if status_code != 200:
if response and "status" in response:
raise OpenSubtitlesError(response['status'])

@ -54,6 +54,7 @@ def fix_movie_naming(title):
custom_languages = {
'pt': 'pt-PT',
'zh': 'zh-CN',
'es-MX': 'ea',
}
@ -156,9 +157,10 @@ class OpenSubtitlesComProvider(ProviderRetryMixin, Provider):
"""OpenSubtitlesCom Provider"""
server_hostname = 'api.opensubtitles.com'
languages = {Language.fromopensubtitles(lang) for lang in language_converters['szopensubtitles'].codes}
languages = ({Language.fromietf("es-MX")} |
{Language.fromopensubtitles(lang) for lang in language_converters['szopensubtitles'].codes})
languages.update(set(Language.rebuild(lang, forced=True) for lang in languages))
languages.update(set(Language.rebuild(l, hi=True) for l in languages))
languages.update(set(Language.rebuild(lang, hi=True) for lang in languages))
video_types = (Episode, Movie)

@ -209,7 +209,7 @@ class PodnapisiProvider(_PodnapisiProvider, ProviderSubtitleArchiveMixin):
break
# exit if no results
if (not xml.find('pagination/results') or not xml.find('pagination/results').text or not
if (xml.find('pagination/results') is None or not xml.find('pagination/results').text or not
int(xml.find('pagination/results').text)):
logger.debug('No subtitles found')
break

@ -92,17 +92,19 @@ class RegieLiveProvider(Provider):
data=payload, headers=self.headers)
subtitles = []
if response.json()['cod'] == 200:
results_subs = response.json()['rezultate']
for film in results_subs:
for sub in results_subs[film]['subtitrari']:
subtitles.append(
RegieLiveSubtitle(
results_subs[film]['subtitrari'][sub]['titlu'],
video,
results_subs[film]['subtitrari'][sub]['url'],
results_subs[film]['subtitrari'][sub]['rating']['nota'],
language))
if response.status_code == 200:
results = response.json()
if len(results) > 0:
results_subs = results['rezultate']
for film in results_subs:
for sub in results_subs[film]['subtitrari']:
subtitles.append(
RegieLiveSubtitle(
results_subs[film]['subtitrari'][sub]['titlu'],
video,
results_subs[film]['subtitrari'][sub]['url'],
results_subs[film]['subtitrari'][sub]['rating']['nota'],
language))
return subtitles
def list_subtitles(self, video, languages):

@ -39,6 +39,7 @@ _SEASON_NUM_RE = re.compile(
)
_EPISODE_YEAR_RE = re.compile(r"\((?P<x>(19\d{2}|20[0-2]\d))\)")
_UNSUPPORTED_RE = re.compile(r"(extras|forzado(s)?|forced)\s?$", flags=re.IGNORECASE)
_VERSION_RESOLUTION = re.compile(r'id="vs">([^<]+)<\/div>')
logger = logging.getLogger(__name__)
@ -161,6 +162,16 @@ class SubdivxSubtitlesProvider(Provider):
return subtitles
def _get_vs(self):
# t["buscar" + $("#vs").html().replace(".", "").replace("v", "")] = $("#buscar").val(),
res = self.session.get('https://subdivx.com/')
results = _VERSION_RESOLUTION.findall(res.text)
if results is not None and len(results) == 0:
return -1
version = results[0]
version = version.replace('.','').replace('v','')
return version
def _query_results(self, query, video):
token_link = f"{_SERVER_URL}/inc/gt.php?gt=1"
@ -180,8 +191,8 @@ class SubdivxSubtitlesProvider(Provider):
raise ProviderError("Response doesn't include a token")
search_link = f"{_SERVER_URL}/inc/ajax.php"
payload = {"tabla": "resultados", "filtros": "", "buscar393": query, "token": token}
version = self._get_vs()
payload = {"tabla": "resultados", "filtros": "", f"buscar{version}": query, "token": token}
logger.debug("Query: %s", query)

@ -188,7 +188,11 @@ class SubdlProvider(ProviderRetryMixin, Provider):
if len(result['subtitles']):
for item in result['subtitles']:
if item.get('episode_from', False) == item.get('episode_end', False): # ignore season packs
if (isinstance(self.video, Episode) and
item.get('episode_from', False) != item.get('episode_end', False)):
# ignore season packs
continue
else:
subtitle = SubdlSubtitle(
language=Language.fromsubdl(item['language']),
forced=self._is_forced(item),

@ -6,6 +6,7 @@ import os
from zipfile import ZipFile, is_zipfile
from requests import Session
from guessit import guessit
from requests.exceptions import JSONDecodeError
from subliminal import Movie
from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending
@ -91,7 +92,11 @@ class SubsynchroProvider(Provider):
result.raise_for_status()
subtitles = []
results = result.json() or {}
try:
results = result.json()
except JSONDecodeError:
results = {}
status_ = results.get("status")

@ -282,4 +282,7 @@ class SubtitrarinoiProvider(Provider, ProviderSubtitleArchiveMixin):
r.raise_for_status()
archive = get_archive_from_bytes(r.content)
subtitle.content = get_subtitle_from_archive(archive, episode=subtitle.desired_episode)
if archive:
subtitle.content = get_subtitle_from_archive(archive, episode=subtitle.desired_episode)
else:
subtitle.content = r.content

@ -56,7 +56,7 @@ class TitloviSubtitle(Subtitle):
provider_name = 'titlovi'
def __init__(self, language, download_link, sid, releases, title, alt_title=None, season=None,
episode=None, year=None, rating=None, download_count=None, asked_for_release_group=None, asked_for_episode=None):
episode=None, year=None, rating=None, download_count=None, asked_for_release_group=None, asked_for_episode=None, is_pack=False):
super(TitloviSubtitle, self).__init__(language)
self.sid = sid
self.releases = self.release_info = releases
@ -71,6 +71,7 @@ class TitloviSubtitle(Subtitle):
self.matches = None
self.asked_for_release_group = asked_for_release_group
self.asked_for_episode = asked_for_episode
self.is_pack = is_pack
def __repr__(self):
if self.season and self.episode:
@ -216,7 +217,7 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
is_episode = False
if season and episode:
is_episode = True
#search_params['season'] = season
search_params['season'] = season
#search_params['episode'] = episode
#if year:
# search_params['year'] = year
@ -238,6 +239,18 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
resp_json = response.json()
if resp_json['SubtitleResults']:
query_results.extend(resp_json['SubtitleResults'])
# if there are more pages, loop through them. If there is more than 3 pages, stop at 3
if resp_json['PagesAvailable'] > 1:
for page in range(2, min(4, resp_json['PagesAvailable'] + 1)):
search_params['pg'] = page
response = self.get_result(self.api_search_url, search_params)
resp_json = response.json()
if resp_json['SubtitleResults']:
query_results.extend(resp_json['SubtitleResults'])
else:
break
except TooManyRequests:
raise
except Exception as e:
@ -258,15 +271,19 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
# skip if season and episode number does not match
if season and season != sub.get('Season'):
continue
elif episode and episode != sub.get('Episode'):
elif episode and episode != sub.get('Episode') and sub.get('Episode') != 0:
continue
is_pack = False
if sub.get('Episode') == 0:
is_pack = True
subtitle = self.subtitle_class(Language.fromtitlovi(sub.get('Lang')), sub.get('Link'), sub.get('Id'), sub.get('Release'), _title,
alt_title=alt_title, season=sub.get('Season'), episode=sub.get('Episode'),
alt_title=alt_title, season=sub.get('Season'), episode=episode,
year=sub.get('Year'), rating=sub.get('Rating'),
download_count=sub.get('DownloadCount'),
asked_for_release_group=video.release_group,
asked_for_episode=episode)
asked_for_episode=episode, is_pack=is_pack)
else:
subtitle = self.subtitle_class(Language.fromtitlovi(sub.get('Lang')), sub.get('Link'), sub.get('Id'), sub.get('Release'), _title,
alt_title=alt_title, year=sub.get('Year'), rating=sub.get('Rating'),
@ -321,13 +338,25 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
subs_in_archive = archive.namelist()
# if Serbian lat and cyr versions are packed together, try to find right version
if len(subs_in_archive) > 1 and (subtitle.language == 'sr' or subtitle.language == 'sr-Cyrl'):
if len(subs_in_archive) > 1 and subtitle.is_pack:
# if subtitle is a pack, try to find the right subtitle by format SSxEE or SxxEyy
self.get_subtitle_from_pack(subtitle, subs_in_archive, archive)
elif len(subs_in_archive) > 1 and (subtitle.language == 'sr' or subtitle.language == 'sr-Cyrl'):
# if Serbian lat and cyr versions are packed together, try to find right version
self.get_subtitle_from_bundled_archive(subtitle, subs_in_archive, archive)
else:
# use default method for everything else
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
def get_subtitle_from_pack(self, subtitle, subs_in_archive, archive):
# try to find the right subtitle, it should contain season and episode number in format SSxEE or SxxEyy
format1 = '%.2dx%.2d' % (subtitle.season, subtitle.episode)
format2 = 's%.2de%.2d' % (subtitle.season, subtitle.episode)
for sub_name in subs_in_archive:
if format1 in sub_name.lower() or format2 in sub_name.lower():
subtitle.content = fix_line_ending(archive.read(sub_name))
return
def get_subtitle_from_bundled_archive(self, subtitle, subs_in_archive, archive):
sr_lat_subs = []
sr_cyr_subs = []

@ -5,18 +5,18 @@ import os
import io
import logging
import re
import rarfile
from random import randint
from zipfile import ZipFile, is_zipfile
from rarfile import RarFile, is_rarfile
from guessit import guessit
from time import sleep
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from subliminal.exceptions import ProviderError
from subliminal_patch.exceptions import TooManyRequests
from subliminal.providers import ParserBeautifulSoup
from subliminal.video import Episode, Movie
from subliminal.subtitle import SUBTITLE_EXTENSIONS
@ -147,6 +147,10 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
params = self.getQueryParams(imdb_id, title, language)
search_response = self.session.get(self.api_url, params=params, timeout=15)
if search_response.status_code == 404 and 'Too many requests' in search_response.content:
raise TooManyRequests(search_response.content)
search_response.raise_for_status()
if not search_response.content:
@ -215,6 +219,8 @@ class TitrariProvider(Provider, ProviderSubtitleArchiveMixin):
ordered_subs = self.order(subtitles)
sleep(5) # prevent being blocked for too many requests
return ordered_subs
@staticmethod

@ -24,6 +24,8 @@ from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal_patch.score import framerate_equal
from dogpile.cache.api import NO_VALUE
from subzero.language import Language
@ -53,6 +55,8 @@ class TitulkySubtitle(Subtitle):
approved,
page_link,
download_link,
fps,
skip_wrong_fps,
asked_for_episode=None):
super().__init__(language, page_link=page_link)
@ -67,6 +71,8 @@ class TitulkySubtitle(Subtitle):
self.page_link = page_link
self.uploader = uploader
self.download_link = download_link
self.fps = fps if skip_wrong_fps else None # This attribute should be ignored if skip_wrong_fps is false
self.skip_wrong_fps = skip_wrong_fps
self.asked_for_episode = asked_for_episode
self.matches = None
@ -78,6 +84,10 @@ class TitulkySubtitle(Subtitle):
matches = set()
media_type = 'movie' if isinstance(video, Movie) else 'episode'
if self.skip_wrong_fps and video.fps and self.fps and not framerate_equal(video.fps, self.fps):
logger.debug(f"Titulky.com: Wrong FPS (expected: {video.fps}, got: {self.fps}, lowering score massively)")
return set()
if media_type == 'episode':
# match imdb_id of a series
if video.series_imdb_id and video.series_imdb_id == self.imdb_id:
@ -120,16 +130,19 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
def __init__(self,
username=None,
password=None,
approved_only=None):
approved_only=None,
skip_wrong_fps=None):
if not all([username, password]):
raise ConfigurationError("Username and password must be specified!")
if type(approved_only) is not bool:
raise ConfigurationError(f"Approved_only {approved_only} must be a boolean!")
if type(skip_wrong_fps) is not bool:
raise ConfigurationError(f"Skip_wrong_fps {skip_wrong_fps} must be a boolean!")
self.username = username
self.password = password
self.approved_only = approved_only
self.skip_wrong_fps = skip_wrong_fps
self.session = None
@ -268,6 +281,48 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
return result
# Retrieves the fps value given subtitles id from the details page and caches it.
def retrieve_subtitles_fps(self, subtitles_id):
cache_key = f"titulky_subs-{subtitles_id}_fps"
cached_fps_value = cache.get(cache_key)
if(cached_fps_value != NO_VALUE):
logger.debug(f"Titulky.com: Reusing cached fps value {cached_fps_value} for subtitles with id {subtitles_id}")
return cached_fps_value
params = {
'action': 'detail',
'id': subtitles_id
}
browse_url = self.build_url(params)
html_src = self.fetch_page(browse_url, allow_redirects=True)
browse_page_soup = ParserBeautifulSoup(html_src, ['lxml', 'html.parser'])
fps_container = browse_page_soup.select_one("div.ulozil:has(> img[src='img/ico/Movieroll.png'])")
if(fps_container is None):
logger.debug("Titulky.com: Could not manage to find the FPS container in the details page")
cache.set(cache_key, None)
return None
fps_text_components = fps_container.get_text(strip=True).split()
# Check if the container contains valid fps data
if(len(fps_text_components) < 2 or fps_text_components[1].lower() != "fps"):
logger.debug(f"Titulky.com: Could not determine FPS value for subtitles with id {subtitles_id}")
cache.set(cache_key, None)
return None
fps_text = fps_text_components[0].replace(",", ".") # Fix decimal comma to decimal point
try:
fps = float(fps_text)
logger.debug(f"Titulky.com: Retrieved FPS value {fps} from details page for subtitles with id {subtitles_id}")
cache.set(cache_key, fps)
return fps
except:
logger.debug(f"Titulky.com: There was an error parsing FPS value string for subtitles with id {subtitles_id}")
cache.set(cache_key, None)
return None
"""
There are multiple ways to find substitles on Titulky.com, however we are
going to utilize a page that lists all available subtitles for all episodes in a season
@ -377,7 +432,8 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
'language': sub_language,
'uploader': uploader,
'details_link': details_link,
'download_link': download_link
'download_link': download_link,
'fps': self.retrieve_subtitles_fps(sub_id) if self.skip_wrong_fps else None,
}
# If this row contains the first subtitles to an episode number,
@ -413,7 +469,9 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
sub_info['approved'],
sub_info['details_link'],
sub_info['download_link'],
asked_for_episode=(media_type is SubtitlesType.EPISODE)
sub_info['fps'],
self.skip_wrong_fps,
asked_for_episode=(media_type is SubtitlesType.EPISODE),
)
subtitles.append(subtitle_instance)

@ -5,6 +5,7 @@ from datetime import timedelta
from requests import Session
from requests.exceptions import JSONDecodeError
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.providers import Provider
from subliminal import __short_version__
@ -206,7 +207,10 @@ class WhisperAISubtitle(Subtitle):
@property
def id(self):
return self.video.original_name
# Construct unique id otherwise provider pool will think
# subtitles are all the same and drop all except the first one
# This is important for language profiles with more than one language
return f"{self.video.original_name}_{self.task}_{str(self.language)}"
def get_matches(self, video):
matches = set()
@ -229,7 +233,7 @@ class WhisperAIProvider(Provider):
video_types = (Episode, Movie)
def __init__(self, endpoint=None, response=None, timeout=None, ffmpeg_path=None, loglevel=None):
def __init__(self, endpoint=None, response=None, timeout=None, ffmpeg_path=None, pass_video_name=None, loglevel=None):
set_log_level(loglevel)
if not endpoint:
raise ConfigurationError('Whisper Web Service Endpoint must be provided')
@ -242,12 +246,16 @@ class WhisperAIProvider(Provider):
if not ffmpeg_path:
raise ConfigurationError("ffmpeg path must be provided")
if pass_video_name is None:
raise ConfigurationError('Whisper Web Service Pass Video Name option must be provided')
self.endpoint = endpoint.rstrip("/")
self.response = int(response)
self.timeout = int(timeout)
self.session = None
self.ffmpeg_path = ffmpeg_path
self.pass_video_name = pass_video_name
def initialize(self):
self.session = Session()
@ -269,10 +277,19 @@ class WhisperAIProvider(Provider):
params={'encode': 'false'},
files={'audio_file': out},
timeout=(self.response, self.timeout))
try:
results = r.json()
except JSONDecodeError:
results = {}
if len(results) == 0:
logger.info(f"Whisper returned empty response when detecting language")
return None
logger.debug(f"Whisper detected language of {path} as {r.json()['detected_language']}")
logger.debug(f"Whisper detected language of {path} as {results['detected_language']}")
return whisper_get_language(r.json()["language_code"], r.json()["detected_language"])
return whisper_get_language(results["language_code"], results["detected_language"])
def query(self, language, video):
if language not in self.languages:
@ -356,9 +373,11 @@ class WhisperAIProvider(Provider):
logger.info(f'Starting WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} for {subtitle.video.original_path}')
startTime = time.time()
video_name = subtitle.video.original_path if self.pass_video_name else None
r = self.session.post(f"{self.endpoint}/asr",
params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false'},
params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false',
'video_file': {video_name}},
files={'audio_file': out},
timeout=(self.response, self.timeout))

@ -313,13 +313,10 @@ class Subtitle(Subtitle_):
logger.info("Got FPS from MicroDVD subtitle: %s", subs.fps)
else:
logger.info("Got format: %s", subs.format)
self._og_format = subs.format
self._is_valid = True
# if self.use_original_format:
# self.format = subs.format
# self._is_valid = True
# logger.debug("Using original format")
return True
if self.use_original_format:
self._og_format = subs.format
self._is_valid = True
return True
except pysubs2.UnknownFPSError:
# if parsing failed, use frame rate from provider

File diff suppressed because it is too large Load Diff

@ -13,12 +13,12 @@
},
"private": true,
"dependencies": {
"@mantine/core": "^7.12.2",
"@mantine/dropzone": "^7.12.2",
"@mantine/form": "^7.12.2",
"@mantine/hooks": "^7.12.2",
"@mantine/modals": "^7.12.2",
"@mantine/notifications": "^7.12.2",
"@mantine/core": "^7.14.3",
"@mantine/dropzone": "^7.14.3",
"@mantine/form": "^7.14.3",
"@mantine/hooks": "^7.14.3",
"@mantine/modals": "^7.14.3",
"@mantine/notifications": "^7.14.3",
"@tanstack/react-query": "^5.40.1",
"@tanstack/react-table": "^8.19.2",
"axios": "^1.7.4",
@ -30,10 +30,10 @@
},
"devDependencies": {
"@fontsource/roboto": "^5.0.12",
"@fortawesome/fontawesome-svg-core": "^6.6.0",
"@fortawesome/free-brands-svg-icons": "^6.6.0",
"@fortawesome/free-regular-svg-icons": "^6.6.0",
"@fortawesome/free-solid-svg-icons": "^6.6.0",
"@fortawesome/fontawesome-svg-core": "^6.7.1",
"@fortawesome/free-brands-svg-icons": "^6.7.1",
"@fortawesome/free-regular-svg-icons": "^6.7.1",
"@fortawesome/free-solid-svg-icons": "^6.7.1",
"@fortawesome/react-fontawesome": "^0.2.2",
"@tanstack/react-query-devtools": "^5.40.1",
"@testing-library/jest-dom": "^6.4.2",
@ -42,7 +42,7 @@
"@types/jest": "^29.5.12",
"@types/lodash": "^4.17.1",
"@types/node": "^20.12.6",
"@types/react": "^18.3.5",
"@types/react": "^18.3.11",
"@types/react-dom": "^18.3.0",
"@typescript-eslint/eslint-plugin": "^7.16.0",
"@typescript-eslint/parser": "^7.16.0",
@ -67,7 +67,7 @@
"recharts": "^2.12.7",
"sass": "^1.74.1",
"typescript": "^5.4.4",
"vite": "^5.2.8",
"vite": "^5.4.8",
"vite-plugin-checker": "^0.6.4",
"vite-plugin-pwa": "^0.20.0",
"vitest": "^1.2.2",

@ -39,20 +39,20 @@ const AppHeader: FunctionComponent = () => {
<AppShell.Header p="md" className={styles.header}>
<Group justify="space-between" wrap="nowrap">
<Group wrap="nowrap">
<Anchor onClick={goHome} visibleFrom="sm">
<Avatar
alt="brand"
size={32}
src={`${Environment.baseUrl}/images/logo64.png`}
></Avatar>
</Anchor>
<Burger
opened={showed}
onClick={() => show(!showed)}
size="sm"
hiddenFrom="sm"
></Burger>
<Badge size="lg" radius="sm" variant="brand">
<Anchor onClick={goHome}>
<Avatar
alt="brand"
size={32}
src={`${Environment.baseUrl}/images/logo64.png`}
></Avatar>
</Anchor>
<Badge size="lg" radius="sm" variant="brand" visibleFrom="sm">
Bazarr
</Badge>
</Group>

@ -0,0 +1,9 @@
import { describe, it } from "vitest";
import { Search } from "@/components/index";
import { render } from "@/tests";
describe("Search Bar", () => {
it.skip("should render the closed empty state", () => {
render(<Search />);
});
});

@ -1,48 +1,53 @@
import { FunctionComponent, useMemo, useState } from "react";
import { useNavigate } from "react-router-dom";
import { Autocomplete, ComboboxItem, OptionsFilter, Text } from "@mantine/core";
import {
ComboboxItem,
em,
Flex,
Image,
OptionsFilter,
Select,
Text,
} from "@mantine/core";
import { useMediaQuery } from "@mantine/hooks";
import { faSearch } from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { chain, includes } from "lodash";
import { useServerSearch } from "@/apis/hooks";
import { useDebouncedValue } from "@/utilities";
type SearchResultItem = {
value: string;
label: string;
link: string;
poster: string;
type: string;
};
function useSearch(query: string) {
const debouncedQuery = useDebouncedValue(query, 500);
const { data } = useServerSearch(debouncedQuery, debouncedQuery.length >= 0);
const duplicates = chain(data)
.groupBy((item) => `${item.title} (${item.year})`)
.filter((group) => group.length > 1)
.map((group) => `${group[0].title} (${group[0].year})`)
.value();
return useMemo<SearchResultItem[]>(
() =>
data?.map((v) => {
const { link, displayName } = (() => {
const hasDuplicate = includes(duplicates, `${v.title} (${v.year})`);
const { link, label, poster, type, value } = (() => {
if (v.sonarrSeriesId) {
return {
poster: v.poster,
link: `/series/${v.sonarrSeriesId}`,
displayName: hasDuplicate
? `${v.title} (${v.year}) (S)`
: `${v.title} (${v.year})`,
type: "show",
label: `${v.title} (${v.year})`,
value: `s-${v.sonarrSeriesId}`,
};
}
if (v.radarrId) {
return {
poster: v.poster,
link: `/movies/${v.radarrId}`,
displayName: hasDuplicate
? `${v.title} (${v.year}) (M)`
: `${v.title} (${v.year})`,
type: "movie",
value: `m-${v.radarrId}`,
label: `${v.title} (${v.year})`,
};
}
@ -50,11 +55,14 @@ function useSearch(query: string) {
})();
return {
value: displayName,
link,
value: value,
poster: poster,
label: label,
type: type,
link: link,
};
}) ?? [],
[data, duplicates],
[data],
);
}
@ -64,8 +72,8 @@ const optionsFilter: OptionsFilter = ({ options, search }) => {
return (options as ComboboxItem[]).filter((option) => {
return (
option.value.toLowerCase().includes(lowercaseSearch) ||
option.value
option.label.toLowerCase().includes(lowercaseSearch) ||
option.label
.normalize("NFD")
.replace(/[\u0300-\u036f]/g, "")
.toLowerCase()
@ -80,23 +88,41 @@ const Search: FunctionComponent = () => {
const results = useSearch(query);
const isMobile = useMediaQuery(`(max-width: ${em(750)})`);
return (
<Autocomplete
leftSection={<FontAwesomeIcon icon={faSearch} />}
renderOption={(input) => <Text p="xs">{input.option.value}</Text>}
<Select
placeholder="Search"
withCheckIcon={false}
leftSection={<FontAwesomeIcon icon={faSearch} />}
rightSection={<></>}
size="sm"
data={results}
value={query}
searchable
scrollAreaProps={{ type: "auto" }}
maxDropdownHeight={400}
onChange={setQuery}
data={results}
value={query}
onSearchChange={(a) => {
setQuery(a);
}}
onBlur={() => setQuery("")}
filter={optionsFilter}
onOptionSubmit={(option) =>
navigate(results.find((a) => a.value === option)?.link || "/")
}
></Autocomplete>
onOptionSubmit={(option) => {
navigate(results.find((a) => a.value === option)?.link || "/");
}}
renderOption={(input) => {
const result = results.find((r) => r.value === input.option.value);
return (
<Flex>
<Image src={result?.poster} w={55} h={70} />
<Text size={isMobile ? "xs" : "md"} pl="xs" pr="xs" lineClamp={3}>
{result?.label}
</Text>
</Flex>
);
}}
/>
);
};

@ -1,12 +1,21 @@
import { FunctionComponent } from "react";
import { Group, List, Popover, Stack, Text } from "@mantine/core";
import { useHover } from "@mantine/hooks";
import {
faCheck,
Alert,
em,
Flex,
Group,
List,
Popover,
Stack,
Text,
} from "@mantine/core";
import { useDisclosure, useMediaQuery } from "@mantine/hooks";
import {
faCheckCircle,
faExclamationCircle,
faListCheck,
faTimes,
faMinus,
faPlus,
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { BuildKey } from "@/utilities";
@ -24,7 +33,9 @@ const StateIcon: FunctionComponent<StateIconProps> = ({
}) => {
const hasIssues = dont.length > 0;
const { hovered, ref } = useHover();
const [opened, { close, open }] = useDisclosure(false);
const isMobile = useMediaQuery(`(max-width: ${em(750)})`);
const PopoverTarget: FunctionComponent = () => {
if (isHistory) {
@ -41,18 +52,30 @@ const StateIcon: FunctionComponent<StateIconProps> = ({
};
return (
<Popover opened={hovered} position="top" width={360} withArrow withinPortal>
<Popover position="left" opened={opened} width={360} withArrow withinPortal>
<Popover.Target>
<Text ref={ref}>
<Text onMouseEnter={open} onMouseLeave={close}>
<PopoverTarget />
</Text>
</Popover.Target>
<Popover.Dropdown>
<Text size="xl" ta="center">
Scoring Criteria
</Text>
{isMobile ? null : (
<Alert variant="light" color="blue" mb="sm">
Not matching attributes will not prevent the subtitle to be
downloaded and are strictly used for scoring the subtitle.
</Alert>
)}
<Group justify="left" gap="xl" wrap="nowrap" grow>
<Stack align="flex-start" justify="flex-start" gap="xs" mb="auto">
<Text c="green">
<FontAwesomeIcon icon={faCheck}></FontAwesomeIcon>
</Text>
<Flex gap="sm">
<Text c="green">
<FontAwesomeIcon icon={faPlus}></FontAwesomeIcon>
</Text>
<Text c="green">Matching</Text>
</Flex>
<List>
{matches.map((v, idx) => (
<List.Item key={BuildKey(idx, v, "match")}>{v}</List.Item>
@ -60,9 +83,12 @@ const StateIcon: FunctionComponent<StateIconProps> = ({
</List>
</Stack>
<Stack align="flex-start" justify="flex-start" gap="xs" mb="auto">
<Text c="yellow">
<FontAwesomeIcon icon={faTimes}></FontAwesomeIcon>
</Text>
<Flex gap="sm">
<Text c="yellow">
<FontAwesomeIcon icon={faMinus}></FontAwesomeIcon>
</Text>
<Text c="yellow">Not Matching</Text>
</Flex>
<List>
{dont.map((v, idx) => (
<List.Item key={BuildKey(idx, v, "miss")}>{v}</List.Item>

@ -1,6 +1,5 @@
import { FunctionComponent, ReactElement } from "react";
import { Tooltip, TooltipProps } from "@mantine/core";
import { useHover } from "@mantine/hooks";
import { isNull, isUndefined } from "lodash";
interface TextPopoverProps {
@ -14,20 +13,18 @@ const TextPopover: FunctionComponent<TextPopoverProps> = ({
text,
tooltip,
}) => {
const { hovered, ref } = useHover();
if (isNull(text) || isUndefined(text)) {
return children;
}
return (
<Tooltip
opened={hovered}
label={text}
{...tooltip}
style={{ textWrap: "wrap" }}
events={{ hover: true, focus: false, touch: true }}
>
<div ref={ref}>{children}</div>
<div>{children}</div>
</Tooltip>
);
};

@ -65,7 +65,12 @@ const HistoryIcon: FunctionComponent<{
if (icon) {
return (
<Tooltip label={label} openDelay={500} position="right">
<Tooltip
label={label}
openDelay={500}
position="right"
events={{ hover: true, focus: false, touch: true }}
>
<FontAwesomeIcon
aria-label={label}
title={title}

@ -98,7 +98,11 @@ export default function BaseTable<T extends object>(props: BaseTableProps<T>) {
return (
<Box className={styles.container}>
<Table className={styles.table} striped={tableStyles?.striped ?? true}>
<Table
className={styles.table}
highlightOnHover
striped={tableStyles?.striped ?? true}
>
<Table.Thead hidden={tableStyles?.hideHeader}>
{instance.getHeaderGroups().map((headerGroup) => (
<Table.Tr key={headerGroup.id}>

@ -107,27 +107,32 @@ class TaskDispatcher {
public updateProgress(items: Site.Progress[]) {
items.forEach((item) => {
// TODO: FIX ME!
item.value += 1;
if (item.value >= item.count && this.progress[item.id]) {
updateNotification(notification.progress.end(item.id, item.header));
delete this.progress[item.id];
} else if (item.value > 1 && this.progress[item.id]) {
updateNotification(
notification.progress.update(
item.id,
item.header,
item.name,
item.value,
item.count,
),
);
} else if (item.value > 1 && this.progress[item.id] === undefined) {
if (this.progress[item.id] === undefined) {
showNotification(notification.progress.pending(item.id, item.header));
this.progress[item.id] = true;
setTimeout(() => this.updateProgress([item]), 1000);
return;
}
if (item.value >= item.count) {
updateNotification(notification.progress.end(item.id, item.header));
delete this.progress[item.id];
return;
}
item.value += 1;
updateNotification(
notification.progress.update(
item.id,
item.header,
item.name,
item.value,
item.count,
),
);
});
}

@ -13,12 +13,16 @@ import { showNotification } from "@mantine/notifications";
import {
faAdjust,
faBriefcase,
faCalendar,
faCircleChevronDown,
faCircleChevronRight,
faCloudUploadAlt,
faHdd,
faPlay,
faSearch,
faStop,
faSync,
faTriangleExclamation,
faWrench,
} from "@fortawesome/free-solid-svg-icons";
import { Table as TableInstance } from "@tanstack/table-core/build/lib/types";
@ -62,6 +66,18 @@ const SeriesEpisodesView: FunctionComponent = () => {
icon: faHdd,
text: `${series?.episodeFileCount} files`,
},
{
icon: faTriangleExclamation,
text: `${series?.episodeMissingCount} missing subtitles`,
},
{
icon: series?.ended ? faStop : faPlay,
text: series?.ended ? "Ended" : "Continuing",
},
{
icon: faCalendar,
text: `Last ${series?.ended ? "aired on" : "known airdate"}: ${series?.lastAired}`,
},
{
icon: faAdjust,
text: series?.seriesType ?? "",
@ -151,6 +167,7 @@ const SeriesEpisodesView: FunctionComponent = () => {
series.profileId === null ||
!available
}
loading={hasTask}
>
Search
</Toolbox.Button>
@ -179,7 +196,8 @@ const SeriesEpisodesView: FunctionComponent = () => {
series === undefined ||
series.episodeFileCount === 0 ||
series.profileId === null ||
!available
!available ||
hasTask
}
icon={faCloudUploadAlt}
onClick={() => openDropzone.current?.()}

@ -116,7 +116,7 @@ const MoviesHistoryView: FunctionComponent = () => {
},
},
{
header: "Upgrade",
header: "Upgradable",
accessorKey: "upgradable",
cell: ({
row: {

@ -139,7 +139,7 @@ const SeriesHistoryView: FunctionComponent = () => {
},
},
{
header: "Upgrade",
header: "Upgradable",
accessorKey: "upgradable",
cell: ({
row: {

@ -141,6 +141,7 @@ const MovieDetailView: FunctionComponent = () => {
<Toolbox.Button
icon={faSearch}
disabled={!isNumber(movie?.profileId)}
loading={hasTask}
onClick={() => {
if (movie) {
task.create(movie.title, TaskGroup.SearchSubtitle, action, {

@ -1,9 +1,14 @@
import { FunctionComponent, useMemo } from "react";
import { Link } from "react-router-dom";
import { Anchor, Container, Progress } from "@mantine/core";
import { Anchor, Container, Group, Progress } from "@mantine/core";
import { useDocumentTitle } from "@mantine/hooks";
import { faBookmark as farBookmark } from "@fortawesome/free-regular-svg-icons";
import { faBookmark, faWrench } from "@fortawesome/free-solid-svg-icons";
import {
faBookmark,
faPlay,
faStop,
faWrench,
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { ColumnDef } from "@tanstack/react-table";
import { useSeriesModification, useSeriesPagination } from "@/apis/hooks";
@ -23,16 +28,19 @@ const SeriesView: FunctionComponent = () => {
const columns = useMemo<ColumnDef<Item.Series>[]>(
() => [
{
id: "monitored",
cell: ({
row: {
original: { monitored },
},
}) => (
<FontAwesomeIcon
title={monitored ? "monitored" : "unmonitored"}
icon={monitored ? faBookmark : farBookmark}
></FontAwesomeIcon>
id: "status",
cell: ({ row: { original } }) => (
<Group gap="xs" wrap="nowrap">
<FontAwesomeIcon
title={original.monitored ? "monitored" : "unmonitored"}
icon={original.monitored ? faBookmark : farBookmark}
></FontAwesomeIcon>
<FontAwesomeIcon
title={original.ended ? "Ended" : "Continuing"}
icon={original.ended ? faStop : faPlay}
></FontAwesomeIcon>
</Group>
),
},
{

@ -161,7 +161,7 @@ const SettingsLanguagesView: FunctionComponent = () => {
empty if you don't want Bazarr to remove language profiles.
</Message>
</Section>
<Section header="Default Settings">
<Section header="Default Language Profiles For Newly Added Shows">
<Check
label="Series"
settingKey="settings-general-serie_default_enabled"

@ -1,4 +1,5 @@
import {
Fragment,
FunctionComponent,
useCallback,
useMemo,
@ -42,7 +43,7 @@ import {
} from "@/pages/Settings/utilities/SettingsProvider";
import { BuildKey, useSelectorOptions } from "@/utilities";
import { ASSERT } from "@/utilities/console";
import { ProviderInfo } from "./list";
import { ProviderInfo, ProviderList } from "./list";
type SettingsKey =
| "settings-general-enabled_providers"
@ -151,6 +152,27 @@ const SelectItem: AutocompleteProps["renderOption"] = ({ option }) => {
);
};
const validation = ProviderList.map((provider) => {
return provider.inputs
?.map((input) => {
if (input.validation === undefined) {
return null;
}
return {
[`settings-${provider.key}-${input.key}`]: input.validation?.rule,
};
})
.filter((input) => input && Object.keys(input).length > 0)
.reduce((acc, curr) => {
return { ...acc, ...curr };
}, {});
})
.filter((provider) => provider && Object.keys(provider).length > 0)
.reduce((acc, item) => {
return { ...acc, ...item };
}, {});
const ProviderTool: FunctionComponent<ProviderToolProps> = ({
payload,
enabledProviders,
@ -172,6 +194,9 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
settings: staged,
hooks: {},
},
validate: {
settings: validation!,
},
});
const deletePayload = useCallback(() => {
@ -188,6 +213,12 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
const submit = useCallback(
(values: FormValues) => {
const result = form.validate();
if (result.hasErrors) {
return;
}
if (info && enabledProviders) {
const changes = { ...values.settings };
const hooks = values.hooks;
@ -204,7 +235,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
modals.closeAll();
}
},
[info, enabledProviders, modals, settingsKey],
[info, enabledProviders, modals, settingsKey, form],
);
const canSave = info !== null;
@ -249,43 +280,57 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
const label = value.name ?? capitalize(value.key);
const options = value.options ?? [];
const error = form.errors[`settings.settings-${itemKey}-${key}`] ? (
<MantineText c="red" component="span" size="xs">
{form.errors[`settings.settings-${itemKey}-${key}`]}
</MantineText>
) : null;
switch (value.type) {
case "text":
elements.push(
<Text
key={BuildKey(itemKey, key)}
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Text>,
<Fragment key={BuildKey(itemKey, key)}>
<Text
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Text>
{error}
</Fragment>,
);
return;
case "password":
elements.push(
<Password
key={BuildKey(itemKey, key)}
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Password>,
<Fragment key={BuildKey(itemKey, key)}>
<Password
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Password>
{error}
</Fragment>,
);
return;
case "switch":
elements.push(
<Check
key={key}
inline
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Check>,
<Fragment key={BuildKey(itemKey, key)}>
<Check
inline
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Check>
{error}
</Fragment>,
);
return;
case "select":
elements.push(
<GlobalSelector
key={key}
label={label}
settingKey={`settings-${itemKey}-${key}`}
options={options}
></GlobalSelector>,
<Fragment key={BuildKey(itemKey, key)}>
<GlobalSelector
label={label}
settingKey={`settings-${itemKey}-${key}`}
options={options}
></GlobalSelector>
{error}
</Fragment>,
);
return;
case "testbutton":
@ -295,11 +340,13 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
return;
case "chips":
elements.push(
<Chips
key={key}
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Chips>,
<Fragment key={BuildKey(itemKey, key)}>
<Chips
label={label}
settingKey={`settings-${itemKey}-${key}`}
></Chips>
{error}
</Fragment>,
);
return;
default:
@ -308,7 +355,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
});
return <Stack gap="xs">{elements}</Stack>;
}, [info]);
}, [info, form]);
return (
<SettingsProvider value={settings}>
@ -334,7 +381,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
<Divider></Divider>
<Group justify="right">
<Button hidden={!payload} color="red" onClick={deletePayload}>
Delete
Disable
</Button>
<Button
disabled={!canSave}
@ -342,7 +389,7 @@ const ProviderTool: FunctionComponent<ProviderToolProps> = ({
submit(form.values);
}}
>
Save
Enable
</Button>
</Group>
</Stack>

@ -16,7 +16,7 @@ import { IntegrationList, ProviderList } from "./list";
const SettingsProvidersView: FunctionComponent = () => {
return (
<Layout name="Providers">
<Section header="Providers">
<Section header="Enabled Providers">
<ProviderView
availableOptions={ProviderList}
settingsKey="settings-general-enabled_providers"

@ -1,6 +1,7 @@
import { ReactText } from "react";
import { SelectorOption } from "@/components";
type Text = string | number;
type Input<T, N> = {
type: N;
key: string;
@ -8,15 +9,18 @@ type Input<T, N> = {
name?: string;
description?: string;
options?: SelectorOption<string>[];
validation?: {
rule: (value: string) => string | null;
};
};
type AvailableInput =
| Input<ReactText, "text">
| Input<Text, "text">
| Input<string, "password">
| Input<boolean, "switch">
| Input<string, "select">
| Input<string, "testbutton">
| Input<ReactText[], "chips">;
| Input<Text[], "chips">;
export interface ProviderInfo {
key: string;
@ -151,7 +155,8 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
{
key: "embeddedsubtitles",
name: "Embedded Subtitles",
description: "Embedded Subtitles from your Media Files",
description:
"This provider extracts embedded subtitles from your media files. You must disable 'Treat Embedded Subtitles as Downloaded' in Settings -> Subtitles for this provider to work.",
inputs: [
{
type: "chips",
@ -320,7 +325,22 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
},
],
},
{ key: "napiprojekt", description: "Polish Subtitles Provider" },
{
key: "napiprojekt",
description: "Polish Subtitles Provider",
inputs: [
{
type: "switch",
key: "only_authors",
name: "Skip subtitles without authors or possibly AI generated",
},
{
type: "switch",
key: "only_real_names",
name: "Download subtitles with real name authors only",
},
],
},
{
key: "napisy24",
description: "Polish Subtitles Provider",
@ -375,6 +395,12 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
{
type: "text",
key: "username",
validation: {
rule: (value: string) =>
/^.\S+@\S+$/.test(value)
? "Invalid Username. Do not use your e-mail."
: null,
},
},
{
type: "password",
@ -517,6 +543,11 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
key: "approved_only",
name: "Skip unapproved subtitles",
},
{
type: "switch",
key: "skip_wrong_fps",
name: "Skip subtitles with mismatched fps to video's",
},
],
},
{
@ -556,6 +587,12 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
name: "Logging level",
options: logLevelOptions,
},
{
type: "switch",
key: "pass_video_name",
name: "Pass video filename to Whisper (for logging)",
defaultValue: false,
},
{
type: "testbutton",
key: "whisperai",

@ -48,7 +48,7 @@ const SettingsRadarrView: FunctionComponent = () => {
</Section>
<Section header="Options">
<Slider
label="Minimum Score"
label="Minimum Score For Movies"
settingKey="settings-general-minimum_score_movie"
></Slider>
<Chips

@ -97,7 +97,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Weekly"}
>
<Selector
label="Day of The Week"
label="Day of Week"
settingKey="settings-sonarr-full_update_day"
options={dayOptions}
></Selector>
@ -107,7 +107,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Daily" || k === "Weekly"}
>
<Selector
label="Time of The Day"
label="Time of Day"
settingKey="settings-sonarr-full_update_hour"
options={timeOptions}
></Selector>
@ -134,7 +134,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Weekly"}
>
<Selector
label="Day of The Week"
label="Day of Week"
settingKey="settings-radarr-full_update_day"
options={dayOptions}
></Selector>
@ -144,7 +144,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Daily" || k === "Weekly"}
>
<Selector
label="Time of The Day"
label="Time of Day"
settingKey="settings-radarr-full_update_hour"
options={timeOptions}
></Selector>
@ -190,7 +190,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Weekly"}
>
<Selector
label="Day of The Week"
label="Day of Week"
settingKey="settings-backup-day"
options={dayOptions}
></Selector>
@ -200,7 +200,7 @@ const SettingsSchedulerView: FunctionComponent = () => {
on={(k) => k === "Daily" || k === "Weekly"}
>
<Selector
label="Time of The Day"
label="Time of Day"
settingKey="settings-backup-hour"
options={timeOptions}
></Selector>

@ -50,7 +50,7 @@ const SettingsSonarrView: FunctionComponent = () => {
</Section>
<Section header="Options">
<Slider
label="Minimum Score"
label="Minimum Score For Episodes"
settingKey="settings-general-minimum_score"
></Slider>
<Chips

@ -129,7 +129,7 @@ const commandOptionElements: React.JSX.Element[] = commandOptions.map(
const SettingsSubtitlesView: FunctionComponent = () => {
return (
<Layout name="Subtitles">
<Section header="Basic Options">
<Section header="Subtitle File Options">
<Selector
label="Subtitle Folder"
options={folderOptions}
@ -156,14 +156,33 @@ const SettingsSubtitlesView: FunctionComponent = () => {
What file extension to use when saving hearing-impaired subtitles to
disk (e.g., video.en.sdh.srt).
</Message>
<Check
label="Encode Subtitles To UTF-8"
settingKey="settings-general-utf8_encode"
></Check>
<Message>
Re-encode downloaded subtitles to UTF-8. Should be left enabled in
most cases.
</Message>
<Check
label="Change Subtitle File Permission After Download (chmod)"
settingKey="settings-general-chmod_enabled"
></Check>
<CollapseBox indent settingKey="settings-general-chmod_enabled">
<Text placeholder="0777" settingKey="settings-general-chmod"></Text>
<Message>
Must be a 4 digit octal number. Only for non-Windows systems.
</Message>
</CollapseBox>
</Section>
<Section header="Embedded Subtitles">
<Section header="Embedded Subtitles Handling">
<Check
label="Use Embedded Subtitles"
label="Treat Embedded Subtitles as Downloaded"
settingKey="settings-general-use_embedded_subs"
></Check>
<Message>
Use embedded subtitles in media files when determining missing ones.
Treat embedded subtitles in media files as already downloaded when
determining missing ones.
</Message>
<CollapseBox indent settingKey="settings-general-use_embedded_subs">
<Selector
@ -179,21 +198,21 @@ const SettingsSubtitlesView: FunctionComponent = () => {
settingKey="settings-general-ignore_pgs_subs"
></Check>
<Message>
Ignores PGS Subtitles in Embedded Subtitles detection.
Ignore PGS Subtitles when detecting embedded subtitles.
</Message>
<Check
label="Ignore Embedded VobSub Subtitles"
settingKey="settings-general-ignore_vobsub_subs"
></Check>
<Message>
Ignores VobSub Subtitles in Embedded Subtitles detection.
Ignore VobSub Subtitles when detecting embedded subtitles.
</Message>
<Check
label="Ignore Embedded ASS Subtitles"
settingKey="settings-general-ignore_ass_subs"
></Check>
<Message>
Ignores ASS Subtitles in Embedded Subtitles detection.
Ignore ASS Subtitles when detecting embedded subtitles.
</Message>
<Check
label="Show Only Desired Languages"
@ -232,28 +251,6 @@ const SettingsSubtitlesView: FunctionComponent = () => {
</Message>
</CollapseBox>
</Section>
<Section header="Encoding">
<Check
label="Encode Subtitles To UTF-8"
settingKey="settings-general-utf8_encode"
></Check>
<Message>
Re-encode downloaded subtitles to UTF-8. Should be left enabled in
most cases.
</Message>
</Section>
<Section header="Permissions">
<Check
label="Change Subtitle File Permission (chmod)"
settingKey="settings-general-chmod_enabled"
></Check>
<CollapseBox indent settingKey="settings-general-chmod_enabled">
<Text placeholder="0777" settingKey="settings-general-chmod"></Text>
<Message>
Must be a 4 digit octal number. Only for non-Windows systems.
</Message>
</CollapseBox>
</Section>
<Section header="Performance / Optimization">
<Check
label="Adaptive Searching"
@ -303,7 +300,11 @@ const SettingsSubtitlesView: FunctionComponent = () => {
results scores.
</Message>
</Section>
<Section header="Sub-Zero Modifications">
<Section header="Sub-Zero Subtitle Content Modifications">
<Message>
After downloaded, content of the subtitles will be modified based on
options selected below.
</Message>
<Check
label="Hearing Impaired"
settingOptions={{ onLoaded: SubzeroModification("remove_HI") }}
@ -372,7 +373,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
playback devices.
</Message>
</Section>
<Section header="Synchronization / Alignment">
<Section header="Audio Synchronization / Alignment">
<Check
label="Always use Audio Track as Reference for Syncing"
settingKey="settings-subsync-force_audio"
@ -382,7 +383,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
subtitle.
</Message>
<Check
label="No Fix Framerate"
label="Do Not Fix Framerate Mismatch"
settingKey="settings-subsync-no_fix_framerate"
></Check>
<Message>
@ -407,11 +408,11 @@ const SettingsSubtitlesView: FunctionComponent = () => {
The max allowed offset seconds for any subtitle segment.
</Message>
<Check
label="Automatic Subtitles Synchronization"
label="Automatic Subtitles Audio Synchronization"
settingKey="settings-subsync-use_subsync"
></Check>
<Message>
Enable automatic synchronization after downloading subtitles.
Enable automatic audio synchronization after downloading subtitles.
</Message>
<CollapseBox indent settingKey="settings-subsync-use_subsync">
<MultiSelector
@ -428,7 +429,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
the media file.
</Message>
<Check
label="Series Score Threshold"
label="Series Score Threshold For Audio Sync"
settingKey="settings-subsync-use_subsync_threshold"
></Check>
<CollapseBox
@ -446,7 +447,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
</Message>
</CollapseBox>
<Check
label="Movies Score Threshold"
label="Movies Score Threshold For Audio Sync"
settingKey="settings-subsync-use_subsync_movie_threshold"
></Check>
<CollapseBox
@ -477,7 +478,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
<CollapseBox indent settingKey="settings-general-use_postprocessing">
<Check
settingKey="settings-general-use_postprocessing_threshold"
label="Series Score Threshold"
label="Series Score Threshold For Post-Processing"
></Check>
<CollapseBox
indent
@ -495,7 +496,7 @@ const SettingsSubtitlesView: FunctionComponent = () => {
</CollapseBox>
<Check
settingKey="settings-general-use_postprocessing_threshold_movie"
label="Movies Score Threshold"
label="Movies Score Threshold For Post-Processing"
></Check>
<CollapseBox
indent

@ -49,11 +49,20 @@ const ItemOverview: FunctionComponent<Props> = (props) => {
if (item) {
badges.push(
<ItemBadge key="file-path" icon={faFolder} title="File Path">
<ItemBadge
key="file-path"
icon={faFolder}
title="File Path"
styles={{
root: { overflow: "unset" },
label: { overflow: "hidden" },
}}
>
<Tooltip
label={item.path}
multiline
style={{ overflowWrap: "anywhere" }}
events={{ hover: true, focus: false, touch: true }}
>
<span>{item.path}</span>
</Tooltip>

@ -151,6 +151,8 @@ declare namespace Item {
SeriesIdType & {
episodeFileCount: number;
episodeMissingCount: number;
ended: boolean;
lastAired: string;
seriesType: SonarrSeriesType;
tvdbId: number;
};
@ -295,6 +297,7 @@ type ItemSearchResult = Partial<SeriesIdType> &
Partial<MovieIdType> & {
title: string;
year: string;
poster: string;
};
type BackendError = {

@ -0,0 +1,38 @@
"""empty message
Revision ID: 4274a5dfc4ad
Revises: 8baf97427327
Create Date: 2024-12-15 21:19:19.406290
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4274a5dfc4ad'
down_revision = '8baf97427327'
branch_labels = None
depends_on = None
bind = op.get_context().bind
insp = sa.inspect(bind)
def column_exists(table_name, column_name):
columns = insp.get_columns(table_name)
return any(c["name"] == column_name for c in columns)
def upgrade():
if not column_exists('table_shows', 'ended'):
with op.batch_alter_table('table_shows', schema=None) as batch_op:
batch_op.add_column(sa.Column('ended', sa.TEXT(), nullable=True))
if not column_exists('table_shows', 'lastAired'):
with op.batch_alter_table('table_shows', schema=None) as batch_op:
batch_op.add_column(sa.Column('lastAired', sa.TEXT(), nullable=True))
def downgrade():
pass

@ -0,0 +1,51 @@
"""empty message
Revision ID: 8baf97427327
Revises: 1e38aa77a491
Create Date: 2024-10-18 12:57:13.831596
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8baf97427327'
down_revision = '1e38aa77a491'
branch_labels = None
depends_on = None
bind = op.get_context().bind
insp = sa.inspect(bind)
tables = insp.get_table_names()
sqlite = bind.engine.name == 'sqlite'
def column_exists(table_name, column_name):
columns = insp.get_columns(table_name)
return any(c["name"] == column_name for c in columns)
def upgrade():
with op.batch_alter_table('table_episodes', schema=None) as batch_op:
if not column_exists('table_episodes', 'created_at_timestamp'):
batch_op.add_column(sa.Column('created_at_timestamp', sa.DateTime(), nullable=True))
if not column_exists('table_episodes', 'updated_at_timestamp'):
batch_op.add_column(sa.Column('updated_at_timestamp', sa.DateTime(), nullable=True))
with op.batch_alter_table('table_movies', schema=None) as batch_op:
if not column_exists('table_movies', 'created_at_timestamp'):
batch_op.add_column(sa.Column('created_at_timestamp', sa.DateTime(), nullable=True))
if not column_exists('table_movies', 'updated_at_timestamp'):
batch_op.add_column(sa.Column('updated_at_timestamp', sa.DateTime(), nullable=True))
with op.batch_alter_table('table_shows', schema=None) as batch_op:
if not column_exists('table_shows', 'created_at_timestamp'):
batch_op.add_column(sa.Column('created_at_timestamp', sa.DateTime(), nullable=True))
if not column_exists('table_shows', 'updated_at_timestamp'):
batch_op.add_column(sa.Column('updated_at_timestamp', sa.DateTime(), nullable=True))
def downgrade():
pass

@ -0,0 +1,6 @@
{
"ExpirationDate": "2024-10-06T19:05:13.5",
"Token": "asdf1234",
"UserId": 111,
"UserName": "user1"
}

@ -0,0 +1,172 @@
{
"ResultsFound": 11,
"PagesAvailable": 1,
"CurrentPage": 1,
"SubtitleResults": [
{
"Id": 346305,
"Title": "Nikita aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=346305",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2022-04-11T14:03:30.59",
"DownloadCount": 415,
"Rating": 0.0,
"Release": "PROPER.FRENCH.1080p.BluRay.x264.TrueHD.5.1-FGT"
},
{
"Id": 323824,
"Title": "Nikita Aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=323824",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2021-02-21T23:53:51.257",
"DownloadCount": 397,
"Rating": 0.0,
"Release": "720p BluRay x264 DTS-PRoDJi"
},
{
"Id": 120571,
"Title": "Nikita Aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=120571",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2011-02-28T22:54:45.7",
"DownloadCount": 3543,
"Rating": 0.0,
"Release": "720p.BD rip"
},
{
"Id": 91576,
"Title": "La Femme Nikita",
"Year": 1997,
"Type": 2,
"Link": "https://titlovi.com/download/?type=1&mediaid=91576",
"Season": 5,
"Episode": 0,
"Special": -1,
"Lang": "Srpski",
"Date": "2009-12-21T23:13:20.407",
"DownloadCount": 3227,
"Rating": 0.0,
"Release": ""
},
{
"Id": 81025,
"Title": "La Femme Nikita",
"Year": 1997,
"Type": 2,
"Link": "https://titlovi.com/download/?type=1&mediaid=81025",
"Season": 4,
"Episode": 0,
"Special": -1,
"Lang": "Srpski",
"Date": "2009-06-05T03:09:19.77",
"DownloadCount": 3799,
"Rating": 0.0,
"Release": ""
},
{
"Id": 81024,
"Title": "La Femme Nikita",
"Year": 1997,
"Type": 2,
"Link": "https://titlovi.com/download/?type=1&mediaid=81024",
"Season": 3,
"Episode": 0,
"Special": -1,
"Lang": "Srpski",
"Date": "2009-06-05T03:07:39.683",
"DownloadCount": 3842,
"Rating": 0.0,
"Release": ""
},
{
"Id": 81023,
"Title": "La Femme Nikita",
"Year": 1997,
"Type": 2,
"Link": "https://titlovi.com/download/?type=1&mediaid=81023",
"Season": 2,
"Episode": 0,
"Special": -1,
"Lang": "Srpski",
"Date": "2009-06-05T03:06:06.21",
"DownloadCount": 4310,
"Rating": 0.0,
"Release": ""
},
{
"Id": 81022,
"Title": "La Femme Nikita",
"Year": 1997,
"Type": 2,
"Link": "https://titlovi.com/download/?type=1&mediaid=81022",
"Season": 1,
"Episode": 0,
"Special": -1,
"Lang": "Srpski",
"Date": "2009-06-05T03:04:40.14",
"DownloadCount": 3924,
"Rating": 0.0,
"Release": ""
},
{
"Id": 69118,
"Title": "Nikita Aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=69118",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2008-12-07T18:48:22.087",
"DownloadCount": 4950,
"Rating": 5.0,
"Release": "720p.BluRay.x264-SiNNERS"
},
{
"Id": 14697,
"Title": "Nikita Aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=14697",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2006-03-14T11:29:44.45",
"DownloadCount": 2188,
"Rating": 5.0,
"Release": ""
},
{
"Id": 10582,
"Title": "Nikita Aka La Femme Nikita",
"Year": 1990,
"Type": 1,
"Link": "https://titlovi.com/download/?type=1&mediaid=10582",
"Season": -1,
"Episode": -1,
"Special": -1,
"Lang": "Srpski",
"Date": "2005-09-24T19:40:34.233",
"DownloadCount": 1214,
"Rating": 0.0,
"Release": ""
}
]
}

@ -0,0 +1,69 @@
import pytest
import subliminal
import datetime
import tempfile
import os
from subliminal_patch.providers.titlovi import TitloviProvider
from subliminal_patch.providers.titlovi import TitloviSubtitle
from dogpile.cache.region import register_backend as register_cache_backend
from subliminal_patch.core import Episode
from subzero.language import Language
from subliminal.subtitle import fix_line_ending
from zipfile import ZipFile
@pytest.fixture(scope="session")
def titlovi_episodes():
return {
"la_femme_nikita_s01e13": Episode(
"La Femme Nikita (1997) - S01E13 - Recruit [HDTV-720p][Opus 2.0][x265].mkv",
"La Femme Nikita",
1,
13,
series_imdb_id="tt21209876",
video_codec="x265",
),
}
@pytest.fixture(scope="session")
def region():
register_cache_backend("subzero.cache.file", "subzero.cache_backends.file", "SZFileBackend")
subliminal.region.configure(
"subzero.cache.file",
expiration_time=datetime.timedelta(days=30),
arguments={"appname": "sz_cache", "app_cache_dir": tempfile.gettempdir()},
replace_existing_backend=True,
)
subliminal.region.backend.sync()
def test_list_subtitles_and_download_from_pack(region, titlovi_episodes, requests_mock, data):
language = Language.fromietf('sr-Latn')
item = titlovi_episodes["la_femme_nikita_s01e13"]
with open(os.path.join(data, 'titlovi_gettoken_response.json'), "rb") as f:
response = f.read()
requests_mock.post('https://kodi.titlovi.com/api/subtitles/gettoken?username=user1&password=pass1&json=True', content=response)
with open(os.path.join(data, 'titlovi_search_response.json'), "rb") as f:
response = f.read()
requests_mock.get('https://kodi.titlovi.com/api/subtitles/search?token=asdf1234&userid=111&&query=la femme nikita&lang=Srpski&json=True', content=response)
with open(os.path.join(data, 'titlovi_some_subtitle_pack.zip'), "rb") as f:
response = f.read()
requests_mock.get('https://titlovi.com/download/?type=1&mediaid=81022', content=response)
with TitloviProvider('user1','pass1') as provider:
subtitles = provider.list_subtitles(item, languages={language})
assert len(subtitles) == 1
subtitle = subtitles[0]
provider.download_subtitle(subtitle)
with open(os.path.join(data, 'titlovi_some_subtitle_pack.zip'), "rb") as f:
archive = ZipFile(f)
# subs_in_archive = archive.namelist()
subtitle_content = fix_line_ending(archive.read('La Femme Nikita - 01x13 - Recruit.srt'))
assert(subtitle.content == subtitle_content)
Loading…
Cancel
Save