Merge branch 'development' into subztv

pull/1148/head
Louis Vézina 4 years ago
commit 54d8c802a7

@ -66,9 +66,11 @@ If you need something that is not already part of Bazarr, feel free to create a
* Subsunacs.net
* subtitri.id.lv
* Subtitulamos.tv
* Sucha
* Supersubtitles
* Titlovi
* Titrari.ro
* TuSubtitulo
* TVSubtitles
* Wizdom
* XSubs

@ -37,7 +37,7 @@ from scheduler import scheduler
from subsyncer import subsync
from filesystem import browse_bazarr_filesystem, browse_sonarr_filesystem, browse_radarr_filesystem
from subliminal_patch.core import SUBTITLE_EXTENSIONS
from subliminal_patch.core import SUBTITLE_EXTENSIONS, guessit
from flask import Flask, jsonify, request, Response, Blueprint, url_for, make_response
@ -549,6 +549,19 @@ class Episodes(Resource):
item.update({"desired_languages": desired_languages})
return jsonify(draw=draw, recordsTotal=row_count, recordsFiltered=row_count, data=result)
class SubtitleNameInfo(Resource):
@authenticate
def get(self):
name = request.args.get('filename')
if name is not None:
opts = dict()
opts['type'] = 'episode'
result = guessit(name, options=opts)
if 'subtitle_language' in result:
result['subtitle_language'] = str(result['subtitle_language'])
return jsonify(data=result)
else:
return '', 400
class EpisodesSubtitlesDelete(Resource):
@authenticate
@ -1990,6 +2003,8 @@ api.add_resource(SystemProviders, '/systemproviders')
api.add_resource(SystemStatus, '/systemstatus')
api.add_resource(SystemReleases, '/systemreleases')
api.add_resource(SubtitleNameInfo, '/subtitle_name_info')
api.add_resource(Series, '/series')
api.add_resource(SeriesEditor, '/series_editor')
api.add_resource(SeriesEditSave, '/series_edit_save')

@ -2,6 +2,8 @@
import hashlib
import os
from subliminal.cache import region
from simpleconfigparser import simpleconfigparser
from get_args import args
@ -234,6 +236,26 @@ def save_settings(settings_items):
'settings-radarr-only_monitored']:
exclusion_updated = True
if key == 'settings-addic7ed-username':
if key != settings.addic7ed.username:
region.delete('addic7ed_data')
if key == 'settings-legendasdivx-username':
if key != settings.legendasdivx.username:
region.delete('legendasdivx_cookies2')
if key == 'settings-opensubtitles-username':
if key != settings.opensubtitles.username:
region.delete('os_token')
if key == 'settings-subscene-username':
if key != settings.subscene.username:
region.delete('subscene_cookies2')
if key == 'settings-titlovi-username':
if key != settings.titlovi.username:
region.delete('titlovi_token')
if settings_keys[0] == 'settings':
settings[settings_keys[1]][settings_keys[2]] = str(value)

@ -190,7 +190,10 @@ def sync_episodes():
"sonarrEpisodeId = ?" + get_exclusion_clause('series'), (altered_episode[0],),
only_one=True)
episode_download_subtitles(data['sonarrEpisodeId'])
if data:
episode_download_subtitles(data['sonarrEpisodeId'])
else:
logging.debug("BAZARR skipping download for this episode as it is excluded.")
else:
logging.debug("BAZARR More than 5 episodes were added during this sync then we wont search for subtitles right now.")

@ -273,7 +273,10 @@ def update_movies():
for altered_movie in altered_movies:
data = database.execute("SELECT * FROM table_movies WHERE radarrId = ?" +
get_exclusion_clause('movie'), (altered_movie[2],), only_one=True)
movies_download_subtitles(data['radarrId'])
if data:
movies_download_subtitles(data['radarrId'])
else:
logging.debug("BAZARR skipping download for this movie as it is excluded.")
else:
logging.debug("BAZARR More than 5 movies were added during this sync then we wont search for subtitles.")

@ -1033,7 +1033,7 @@ def refine_from_db(path, video):
"WHERE table_episodes.path = ?", (path_mappings.path_replace_reverse(path),), only_one=True)
if data:
video.series = re.sub(r'(\(\d\d\d\d\))', '', data['seriesTitle'])
video.series = re.sub(r'\s(\(\d\d\d\d\))', '', data['seriesTitle'])
video.season = int(data['season'])
video.episode = int(data['episode'])
video.title = data['episodeTitle']
@ -1058,7 +1058,7 @@ def refine_from_db(path, video):
(path_mappings.path_replace_reverse_movie(path),), only_one=True)
if data:
video.title = re.sub(r'(\(\d\d\d\d\))', '', data['title'])
video.title = re.sub(r'\s(\(\d\d\d\d\))', '', data['title'])
# Commented out because Radarr provided so much bad year
# if data['year']:
# if int(data['year']) > 0: video.year = int(data['year'])

@ -117,7 +117,8 @@ def pp_replace(pp_command, episode, subtitles, language, language_code2, languag
def get_subtitle_destination_folder():
fld_custom = str(settings.general.subfolder_custom).strip() if settings.general.subfolder_custom else None
fld_custom = str(settings.general.subfolder_custom).strip() if (settings.general.subfolder_custom and
settings.general.subfolder != 'current') else None
return fld_custom

@ -63,7 +63,13 @@ def store_subtitles(original_path, reversed_path):
core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
subtitles = search_external_subtitles(reversed_path, languages=get_language_set(),
only_one=settings.general.getboolean('single_language'))
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(reversed_path), subtitles)
full_dest_folder_path = os.path.dirname(reversed_path)
if dest_folder:
if settings.general.subfolder == "absolute":
full_dest_folder_path = dest_folder
elif settings.general.subfolder == "relative":
full_dest_folder_path = os.path.join(os.path.dirname(reversed_path), dest_folder)
subtitles = guess_external_subtitles(full_dest_folder_path, subtitles)
except Exception as e:
logging.exception("BAZARR unable to index external subtitles.")
pass
@ -142,7 +148,13 @@ def store_subtitles_movie(original_path, reversed_path):
dest_folder = get_subtitle_destination_folder() or ''
core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
subtitles = search_external_subtitles(reversed_path, languages=get_language_set())
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(reversed_path), subtitles)
full_dest_folder_path = os.path.dirname(reversed_path)
if dest_folder:
if settings.general.subfolder == "absolute":
full_dest_folder_path = dest_folder
elif settings.general.subfolder == "relative":
full_dest_folder_path = os.path.join(os.path.dirname(reversed_path), dest_folder)
subtitles = guess_external_subtitles(full_dest_folder_path, subtitles)
except Exception as e:
logging.exception("BAZARR unable to index external subtitles.")
pass
@ -417,9 +429,21 @@ def guess_external_subtitles(dest_folder, subtitles):
detected_language = guess_language(text)
except UnicodeDecodeError:
detector = Detector()
guess = detector.detect(text)
logging.debug('BAZARR detected encoding %r', guess)
text = text.decode(guess)
try:
guess = detector.detect(text)
except:
logging.debug("BAZARR skipping this subtitles because we can't guess the encoding. "
"It's probably a binary file: " + subtitle_path)
continue
else:
logging.debug('BAZARR detected encoding %r', guess)
try:
text = text.decode(guess)
except:
logging.debug(
"BAZARR skipping this subtitles because we can't decode the file using the "
"guessed encoding. It's probably a binary file: " + subtitle_path)
continue
detected_language = guess_language(text)
except:
logging.debug('BAZARR was unable to detect encoding for this subtitles file: %r', subtitle_path)
@ -437,6 +461,12 @@ def guess_external_subtitles(dest_folder, subtitles):
if not subtitles[subtitle].hi:
subtitle_path = os.path.join(dest_folder, subtitle)
# to improve performance, skip detection of files larger that 1M
if os.path.getsize(subtitle_path) > 1 * 1024 * 1024:
logging.debug("BAZARR subtitles file is too large to be text based. Skipping this file: " +
subtitle_path)
continue
with open(subtitle_path, 'rb') as f:
text = f.read()
@ -444,12 +474,21 @@ def guess_external_subtitles(dest_folder, subtitles):
text = text.decode('utf-8')
except UnicodeDecodeError:
detector = Detector()
guess = detector.detect(text)
logging.debug('BAZARR detected encoding %r', guess)
text = text.decode(guess)
except:
logging.debug('BAZARR was unable to detect encoding for this subtitles file: %r', subtitle_path)
finally:
if bool(re.search(hi_regex, text)):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
try:
guess = detector.detect(text)
except:
logging.debug("BAZARR skipping this subtitles because we can't guess the encoding. "
"It's probably a binary file: " + subtitle_path)
continue
else:
logging.debug('BAZARR detected encoding %r', guess)
try:
text = text.decode(guess)
except:
logging.debug("BAZARR skipping this subtitles because we can't decode the file using the "
"guessed encoding. It's probably a binary file: " + subtitle_path)
continue
if bool(re.search(hi_regex, text)):
subtitles[subtitle] = Language.rebuild(subtitles[subtitle], forced=False, hi=True)
return subtitles

@ -93,6 +93,7 @@ def configure_logging(debug=False):
else:
logging.getLogger("sqlite3worker").setLevel(logging.CRITICAL)
logging.getLogger("apscheduler").setLevel(logging.WARNING)
logging.getLogger("apprise").setLevel(logging.WARNING)
logging.getLogger("subliminal").setLevel(logging.CRITICAL)
logging.getLogger("subliminal_patch").setLevel(logging.CRITICAL)
logging.getLogger("subzero").setLevel(logging.ERROR)

@ -1,6 +1,6 @@
# coding=utf-8
bazarr_version = '0.9.0.4'
bazarr_version = '0.9.0.5'
import os
os.environ["BAZARR_VERSION"] = bazarr_version
@ -478,7 +478,11 @@ def test_url(protocol, url):
def test_notification(protocol, provider):
provider = unquote(provider)
apobj = apprise.Apprise()
asset = apprise.AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset)
apobj.add(protocol + "://" + provider)
apobj.notify(

@ -67,7 +67,9 @@ def send_notifications(sonarr_series_id, sonarr_episode_id, message):
series = get_series_name(sonarr_series_id)
episode = get_episode_name(sonarr_episode_id)
apobj = apprise.Apprise()
asset = apprise.AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset)
for provider in providers:
if provider['url'] is not None:
@ -83,7 +85,9 @@ def send_notifications_movie(radarr_id, message):
providers = get_notifier_providers()
movie = get_movies_name(radarr_id)
apobj = apprise.Apprise()
asset = apprise.AppriseAsset(async_mode=False)
apobj = apprise.Apprise(asset=asset)
for provider in providers:
if provider['url'] is not None:

@ -290,6 +290,8 @@ class LegendasdivxProvider(Provider):
_searchurl = self.searchurl
subtitles = []
if isinstance(video, Movie):
querytext = video.imdb_id if video.imdb_id else video.title
@ -298,79 +300,83 @@ class LegendasdivxProvider(Provider):
querytext = quote(quote(querytext))
# language query filter
if isinstance(languages, (tuple, list, set)):
language_ids = ','.join(sorted(l.opensubtitles for l in languages))
if 'por' in language_ids: # prioritize portuguese subtitles
if not isinstance(languages, (tuple, list, set)):
languages = [languages]
for language in languages:
logger.debug("Legendasdivx.pt :: searching for %s subtitles.", language)
language_id = language.opensubtitles
if 'por' in language_id:
lang_filter = '&form_cat=28'
elif 'pob' in language_ids:
elif 'pob' in language_id:
lang_filter = '&form_cat=29'
else:
lang_filter = ''
querytext = querytext + lang_filter if lang_filter else querytext
querytext = querytext + lang_filter if lang_filter else querytext
try:
# sleep for a 1 second before another request
sleep(1)
self.headers['Referer'] = self.site + '/index.php'
self.session.headers.update(self.headers)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False)
res.raise_for_status()
if (res.status_code == 200 and "A legenda não foi encontrada" in res.text):
logger.warning('Legendasdivx.pt :: query %s return no results!', querytext)
# for series, if no results found, try again just with series and season (subtitle packs)
if isinstance(video, Episode):
logger.debug("Legendasdivx.pt :: trying again with just series and season on query.")
querytext = re.sub("(e|E)(\d{2})", "", querytext)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False)
res.raise_for_status()
if (res.status_code == 200 and "A legenda não foi encontrada" in res.text):
logger.warning('Legendasdivx.pt :: query %s return no results (for series and season only).', querytext)
return []
if res.status_code == 302: # got redirected to login page.
# seems that our session cookies are no longer valid... clean them from cache
region.delete("legendasdivx_cookies2")
logger.debug("Legendasdivx.pt :: Logging in again. Cookies have expired!")
# login and try again
self.login()
res = self.session.get(_searchurl.format(query=querytext))
try:
# sleep for a 1 second before another request
sleep(1)
self.headers['Referer'] = self.site + '/index.php'
self.session.headers.update(self.headers)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False)
res.raise_for_status()
except HTTPError as e:
if "bloqueado" in res.text.lower():
logger.error("LegendasDivx.pt :: Your IP is blocked on this server.")
raise IPAddressBlocked("LegendasDivx.pt :: Your IP is blocked on this server.")
logger.error("Legendasdivx.pt :: HTTP Error %s", e)
raise TooManyRequests("Legendasdivx.pt :: HTTP Error %s", e)
except Exception as e:
logger.error("LegendasDivx.pt :: Uncaught error: %r", e)
raise ServiceUnavailable("LegendasDivx.pt :: Uncaught error: %r", e)
bsoup = ParserBeautifulSoup(res.content, ['html.parser'])
# search for more than 10 results (legendasdivx uses pagination)
# don't throttle - maximum results = 6 * 10
MAX_PAGES = 6
# get number of pages bases on results found
page_header = bsoup.find("div", {"class": "pager_bar"})
results_found = re.search(r'\((.*?) encontradas\)', page_header.text).group(1) if page_header else 0
logger.debug("Legendasdivx.pt :: Found %s subtitles", str(results_found))
num_pages = (int(results_found) // 10) + 1
num_pages = min(MAX_PAGES, num_pages)
# process first page
subtitles = self._process_page(video, bsoup)
# more pages?
if num_pages > 1:
for num_page in range(2, num_pages+1):
sleep(1) # another 1 sec before requesting...
_search_next = self.searchurl.format(query=querytext) + "&page={0}".format(str(num_page))
logger.debug("Legendasdivx.pt :: Moving on to next page: %s", _search_next)
res = self.session.get(_search_next)
next_page = ParserBeautifulSoup(res.content, ['html.parser'])
subs = self._process_page(video, next_page)
subtitles.extend(subs)
if (res.status_code == 200 and "A legenda não foi encontrada" in res.text):
logger.warning('Legendasdivx.pt :: query %s return no results!', querytext)
# for series, if no results found, try again just with series and season (subtitle packs)
if isinstance(video, Episode):
logger.debug("Legendasdivx.pt :: trying again with just series and season on query.")
querytext = re.sub("(e|E)(\d{2})", "", querytext)
res = self.session.get(_searchurl.format(query=querytext), allow_redirects=False)
res.raise_for_status()
if (res.status_code == 200 and "A legenda não foi encontrada" in res.text):
logger.warning('Legendasdivx.pt :: query %s return no results (for series and season only).', querytext)
return []
if res.status_code == 302: # got redirected to login page.
# seems that our session cookies are no longer valid... clean them from cache
region.delete("legendasdivx_cookies2")
logger.debug("Legendasdivx.pt :: Logging in again. Cookies have expired!")
# login and try again
self.login()
res = self.session.get(_searchurl.format(query=querytext))
res.raise_for_status()
except HTTPError as e:
if "bloqueado" in res.text.lower():
logger.error("LegendasDivx.pt :: Your IP is blocked on this server.")
raise IPAddressBlocked("LegendasDivx.pt :: Your IP is blocked on this server.")
logger.error("Legendasdivx.pt :: HTTP Error %s", e)
raise TooManyRequests("Legendasdivx.pt :: HTTP Error %s", e)
except Exception as e:
logger.error("LegendasDivx.pt :: Uncaught error: %r", e)
raise ServiceUnavailable("LegendasDivx.pt :: Uncaught error: %r", e)
bsoup = ParserBeautifulSoup(res.content, ['html.parser'])
# search for more than 10 results (legendasdivx uses pagination)
# don't throttle - maximum results = 6 * 10
MAX_PAGES = 6
# get number of pages bases on results found
page_header = bsoup.find("div", {"class": "pager_bar"})
results_found = re.search(r'\((.*?) encontradas\)', page_header.text).group(1) if page_header else 0
logger.debug("Legendasdivx.pt :: Found %s subtitles", str(results_found))
num_pages = (int(results_found) // 10) + 1
num_pages = min(MAX_PAGES, num_pages)
# process first page
subtitles += self._process_page(video, bsoup)
# more pages?
if num_pages > 1:
for num_page in range(2, num_pages+1):
sleep(1) # another 1 sec before requesting...
_search_next = self.searchurl.format(query=querytext) + "&page={0}".format(str(num_page))
logger.debug("Legendasdivx.pt :: Moving on to next page: %s", _search_next)
res = self.session.get(_search_next)
next_page = ParserBeautifulSoup(res.content, ['html.parser'])
subs = self._process_page(video, next_page)
subtitles.extend(subs)
return subtitles

@ -43,6 +43,7 @@ class OpenSubtitlesSubtitle(_OpenSubtitlesSubtitle):
self.release_info = movie_release_name
self.wrong_fps = False
self.skip_wrong_fps = skip_wrong_fps
self.movie_imdb_id = movie_imdb_id
def get_fps(self):
try:
@ -90,6 +91,10 @@ class OpenSubtitlesSubtitle(_OpenSubtitlesSubtitle):
self.query_parameters.get("tag", None))
matches.add("hash")
# imdb_id match so we'll consider year as matching
if self.movie_imdb_id and video.imdb_id and (self.movie_imdb_id == video.imdb_id):
matches.add("year")
return matches
@ -231,13 +236,13 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider):
else:
query = [video.title] + video.alternative_titles
return self.query(languages, hash=video.hashes.get('opensubtitles'), size=video.size, imdb_id=video.imdb_id,
query=query, season=season, episode=episode, tag=video.original_name,
return self.query(video, languages, hash=video.hashes.get('opensubtitles'), size=video.size,
imdb_id=video.imdb_id, query=query, season=season, episode=episode, tag=video.original_name,
use_tag_search=self.use_tag_search, only_foreign=self.only_foreign,
also_foreign=self.also_foreign)
def query(self, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None, tag=None,
use_tag_search=False, only_foreign=False, also_foreign=False):
def query(self, video, languages, hash=None, size=None, imdb_id=None, query=None, season=None, episode=None,
tag=None, use_tag_search=False, only_foreign=False, also_foreign=False):
# fill the search criteria
criteria = []
if hash and size:
@ -294,7 +299,10 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider):
movie_name = _subtitle_item['MovieName']
movie_release_name = _subtitle_item['MovieReleaseName']
movie_year = int(_subtitle_item['MovieYear']) if _subtitle_item['MovieYear'] else None
movie_imdb_id = 'tt' + _subtitle_item['IDMovieImdb']
if season or episode:
movie_imdb_id = 'tt' + _subtitle_item['SeriesIMDBParent']
else:
movie_imdb_id = 'tt' + _subtitle_item['IDMovieImdb']
movie_fps = _subtitle_item.get('MovieFPS')
series_season = int(_subtitle_item['SeriesSeason']) if _subtitle_item['SeriesSeason'] else None
series_episode = int(_subtitle_item['SeriesEpisode']) if _subtitle_item['SeriesEpisode'] else None
@ -321,6 +329,9 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider):
if language not in languages:
continue
if video.imdb_id and (movie_imdb_id != video.imdb_id):
continue
query_parameters = _subtitle_item.get("QueryParameters")
subtitle = self.subtitle_class(language, hearing_impaired, page_link, subtitle_id, matched_by,

@ -9,11 +9,6 @@ import zipfile
import rarfile
from subzero.language import Language
from requests import Session
from six import PY2
if PY2:
from urlparse import urlparse
else:
from urllib.parse import urlparse
from subliminal import __short_version__
from subliminal.exceptions import ServiceUnavailable
@ -74,22 +69,27 @@ class SubdivxSubtitle(Subtitle):
formats = [video.source.lower()]
if formats[0] == "web":
formats.append("webdl")
formats.append("web-dl")
formats.append("webrip")
formats.append("web ")
for frmt in formats:
if frmt.lower() in self.description:
if frmt in self.description:
matches.add('source')
break
# video_codec
if video.video_codec:
video_codecs = [video.video_codec.lower()]
if video_codecs[0] == "H.264":
formats.append("x264")
elif video_codecs[0] == "H.265":
formats.append("x265")
for vc in formats:
if vc.lower() in self.description:
if video_codecs[0] == "h.264":
video_codecs.append("h264")
video_codecs.append("x264")
elif video_codecs[0] == "h.265":
video_codecs.append("h265")
video_codecs.append("x265")
elif video_codecs[0] == "divx":
video_codecs.append("divx")
for vc in video_codecs:
if vc in self.description:
matches.add('video_codec')
break
@ -99,7 +99,7 @@ class SubdivxSubtitle(Subtitle):
class SubdivxSubtitlesProvider(Provider):
provider_name = 'subdivx'
hash_verifiable = False
languages = {Language.fromalpha2(l) for l in ['es']}
languages = {Language.fromalpha2(lang) for lang in ['es']}
subtitle_class = SubdivxSubtitle
server_url = 'https://www.subdivx.com/'
@ -117,13 +117,18 @@ class SubdivxSubtitlesProvider(Provider):
self.session.close()
def query(self, video, languages):
if isinstance(video, Episode):
query = "{} S{:02d}E{:02d}".format(video.series, video.season, video.episode)
else:
# Subdvix has problems searching foreign movies if the year is
# appended. For example: if we search "Memories of Murder 2003",
# Subdix won't return any results; but if we search "Memories of
# Murder", it will. That's because in Subdvix foreign titles have
# the year after the original title ("Salinui chueok (2003) aka
# Memories of Murder").
# A proper solution would be filtering results with the year in
# _parse_subtitles_page.
query = video.title
if video.year:
query += ' {:4d}'.format(video.year)
params = {
'q': query, # search string
@ -148,7 +153,7 @@ class SubdivxSubtitlesProvider(Provider):
subtitles += page_subtitles
if len(page_subtitles) < 20:
if len(page_subtitles) < 100:
break # this is the last page
params['pg'] += 1 # search next page
@ -179,14 +184,10 @@ class SubdivxSubtitlesProvider(Provider):
subtitle_content = self._get_subtitle_from_archive(archive, subtitle)
subtitle.content = fix_line_ending(subtitle_content)
def _check_response(self, response):
if response.status_code != 200:
raise ServiceUnavailable('Bad status code: ' + str(response.status_code))
def _parse_subtitles_page(self, video, response, language):
subtitles = []
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
page_soup = ParserBeautifulSoup(response.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
title_soups = page_soup.find_all("div", {'id': 'menu_detalle_buscador'})
body_soups = page_soup.find_all("div", {'id': 'buscador_detalle'})
@ -195,6 +196,11 @@ class SubdivxSubtitlesProvider(Provider):
# title
title = title_soup.find("a").text.replace("Subtitulos de ", "")
# filter by year
if video.year and str(video.year) not in title:
continue
page_link = title_soup.find("a")["href"]
# description
@ -215,7 +221,7 @@ class SubdivxSubtitlesProvider(Provider):
response = self.session.get(subtitle.page_link, timeout=20)
self._check_response(response)
try:
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
page_soup = ParserBeautifulSoup(response.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
links_soup = page_soup.find_all("a", {'class': 'detalle_link'})
for link_soup in links_soup:
if link_soup['href'].startswith('bajar'):
@ -229,7 +235,13 @@ class SubdivxSubtitlesProvider(Provider):
raise APIThrottled('Download link not found')
def _get_archive(self, content):
@staticmethod
def _check_response(response):
if response.status_code != 200:
raise ServiceUnavailable('Bad status code: ' + str(response.status_code))
@staticmethod
def _get_archive(content):
# open the archive
archive_stream = io.BytesIO(content)
if rarfile.is_rarfile(archive_stream):
@ -243,35 +255,47 @@ class SubdivxSubtitlesProvider(Provider):
return archive
def _get_subtitle_from_archive(self, archive, subtitle):
_max_score = 0
_scores = get_scores (subtitle.video)
@staticmethod
def _get_subtitle_from_archive(archive, subtitle):
_valid_names = []
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
if not os.path.split(name)[-1].startswith('.') and name.lower().endswith(SUBTITLE_EXTENSIONS):
_valid_names.append(name)
# archive with only 1 subtitle
if len(_valid_names) == 1:
logger.debug("returning from archive: {} (single subtitle file)".format(_valid_names[0]))
return archive.read(_valid_names[0])
# in archives with more than 1 subtitle (season pack) we try to guess the best subtitle file
_scores = get_scores(subtitle.video)
_max_score = 0
_max_name = ""
for name in _valid_names:
_guess = guessit(name)
if 'season' not in _guess:
_guess['season'] = -1
if 'episode' not in _guess:
_guess['episode'] = -1
_guess = guessit (name)
if isinstance(subtitle.video, Episode):
logger.debug ("guessing %s" % name)
logger.debug("subtitle S{}E{} video S{}E{}".format(_guess['season'],_guess['episode'],subtitle.video.season,subtitle.video.episode))
logger.debug("guessing %s" % name)
logger.debug("subtitle S{}E{} video S{}E{}".format(
_guess['season'], _guess['episode'], subtitle.video.season, subtitle.video.episode))
if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']:
logger.debug('subtitle does not match video, skipping')
continue
matches = set()
matches |= guess_matches (subtitle.video, _guess)
_score = sum ((_scores.get (match, 0) for match in matches))
matches |= guess_matches(subtitle.video, _guess)
_score = sum((_scores.get(match, 0) for match in matches))
logger.debug('srt matches: %s, score %d' % (matches, _score))
if _score > _max_score:
_max_name = name
_max_score = _score
_max_name = name
logger.debug("new max: {} {}".format(name, _score))
if _max_score > 0:

@ -310,7 +310,7 @@ class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin):
# re-search for episodes without explicit release name
if isinstance(video, Episode):
titles = list(set([video.series] + video.alternative_series))[:2]
titles = list(set([video.series] + video.alternative_series[:1]))
# term = u"%s S%02iE%02i" % (video.series, video.season, video.episode)
more_than_one = len(titles) > 1
for series in titles:
@ -339,7 +339,7 @@ class SubsceneProvider(Provider, ProviderSubtitleArchiveMixin):
if more_than_one:
time.sleep(self.search_throttle)
else:
titles = list(set([video.title] + video.alternative_titles))[:2]
titles = list(set([video.title] + video.alternative_titles[:1]))
more_than_one = len(titles) > 1
for title in titles:
logger.debug('Searching for movie results: %r', title)

@ -1,75 +1,69 @@
# -*- coding: utf-8 -*-
import json
import logging
import os
import re
import io
from babelfish import language_converters
from guessit import guessit
from requests import Session
from subzero.language import Language
from subliminal import Movie, Episode, ProviderError, __short_version__
from subliminal.exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded, ProviderError
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.providers import ParserBeautifulSoup
from subliminal.subtitle import fix_line_ending, SUBTITLE_EXTENSIONS
from subliminal_patch.providers import Provider
logger = logging.getLogger(__name__)
server_url = 'https://subtitulamos.tv/'
class SubtitulamosTVSubtitle(Subtitle):
provider_name = 'subtitulamostv'
hash_verifiable = False
def __init__(self, language, page_link, download_link, description, title, matches, release_info):
super(SubtitulamosTVSubtitle, self).__init__(language, hearing_impaired=False,
page_link=page_link)
def __init__(self, language, page_link, download_link, title, release_info):
super(SubtitulamosTVSubtitle, self).__init__(language, hearing_impaired=False, page_link=page_link)
self.download_link = download_link
self.description = description.lower()
self.title = title
self.release_info = release_info
self.found_matches = matches
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = self.found_matches
matches = {'series', 'season', 'episode', 'year'}
title_lower = self.title.lower()
release_info_lower = self.release_info.lower()
# release_group
if video.release_group and video.release_group.lower() in self.description:
if video.title and video.title.lower() in title_lower:
matches.add('title')
if video.release_group and video.release_group.lower() in release_info_lower:
matches.add('release_group')
# resolution
if video.resolution and video.resolution.lower() in self.description:
if video.resolution and video.resolution.lower() in release_info_lower:
matches.add('resolution')
# source
if video.source:
formats = [video.source.lower()]
if formats[0] == "web":
formats.append("webdl")
formats.append("web-dl")
formats.append("webrip")
formats.append("web ")
for frmt in formats:
if frmt.lower() in self.description:
if frmt in release_info_lower:
matches.add('source')
break
# video_codec
if video.video_codec:
video_codecs = [video.video_codec.lower()]
if video_codecs[0] == "H.264":
formats.append("x264")
elif video_codecs[0] == "H.265":
formats.append("x265")
for vc in formats:
if vc.lower() in self.description:
if video_codecs[0] == "h.264":
video_codecs.append("h264")
video_codecs.append("x264")
elif video_codecs[0] == "h.265":
video_codecs.append("h265")
video_codecs.append("x265")
for vc in video_codecs:
if vc in release_info_lower:
matches.add('video_codec')
break
@ -78,9 +72,14 @@ class SubtitulamosTVSubtitle(Subtitle):
class SubtitulamosTVProvider(Provider):
"""Subtitulamostv Provider"""
languages = {Language.fromietf(l) for l in ['en','es']}
languages = {Language.fromietf(lang) for lang in ['en', 'es']}
video_types = (Episode,)
server_url = 'https://subtitulamos.tv'
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers = {
@ -90,58 +89,56 @@ class SubtitulamosTVProvider(Provider):
self.session.close()
def query(self, languages, video):
# query the server
result = None
year = (" (%d)" % video.year) if video.year else ""
q = "%s%s %dx%02d" % (video.series, year, video.season, video.episode)
logger.debug('Searching subtitles "%s"', q)
subtitle_name = "%s %dx%02d" % (video.series, video.season, video.episode)
logger.debug('Searching subtitles "%s"' % subtitle_name)
res = self.session.get(
server_url + 'search/query', params={'q':q}, timeout=10)
res.raise_for_status()
result = res.json()
response = self.session.get(self.server_url + '/search/query', params={'q': video.series}, timeout=10)
response.raise_for_status()
result = response.json()
subtitles = []
for s in [s for s in result if len(s['episodes'])]:
for e in s['episodes']:
res = self.session.get(
server_url + 'episodes/%d' % e['id'], timeout=10)
res.raise_for_status()
html = res.text
for lang_m in re.finditer(r"<div class=\"subtitle_language\">(.*?)<\/div>.*?(?=<div class=\"subtitle_language\">|<div id=\"subtitle-actions\">)", html, re.S):
lang = lang_m.group(1)
language = "es"
if "English" in lang:
for serie in result:
# skip non-matching series
if video.series.lower() != serie['name'].lower():
continue
response = self.session.get(self.server_url + "/shows/%d/season/%d" % (serie['id'], video.season),
timeout=10)
response.raise_for_status()
soup = ParserBeautifulSoup(response.text, ['lxml', 'html.parser'])
for episode in soup.select('div.episode'):
episode_soup = episode.find('a')
episode_name = episode_soup.text
episode_url = episode_soup['href']
# skip non-matching episodes
if subtitle_name.lower() not in episode_name.lower():
continue
for lang in episode.select("div.subtitle-language"):
if "English" in lang.text:
language = "en"
elif "Español" in lang.text:
language = "es"
else:
continue # not supported yet
logger.debug('Found subtitles in "%s" language.', language)
for subt_m in re.finditer(r"<div class=\"version_name\">(.*?)</div>.*?<a href=\"/(subtitles/\d+/download)\" rel=\"nofollow\">(?:.*?<div class=\"version_comments ?\">.*?</i>(.*?)</p>)?", lang_m.group(0), re.S):
matches = set()
if video.alternative_series is None:
if video.series.lower() == s['name'].lower():
matches.add('series')
elif s['name'].lower() in [video.series.lower()]+list(map(lambda name: name.lower(), video.alternative_series)):
matches.add('series')
if video.season == e['season']:
matches.add('season')
if video.episode == e['number']:
matches.add('episode')
if video.title == e['name']:
matches.add('title')
#if video.year is None or ("(%d)" % video.year) in s['name']:
matches.add('year')
for release in lang.find_next_sibling("div").select("div.sub"):
release_name = release.select('div.version-name')[0].text
release_url = release.select('a[href*="/download"]')[0]['href']
subtitles.append(
SubtitulamosTVSubtitle(
Language.fromietf(language),
server_url + 'episodes/%d' % e['id'],
server_url + subt_m.group(2),
subt_m.group(1)+(subt_m.group(3) if not subt_m.group(3) is None else ""),
e['name'],
matches,
'%s %dx%d,%s,%s' % (s['name'], e['season'], e['number'], subt_m.group(1), lang_m.group(1)),
Language.fromietf(language),
self.server_url + episode_url,
self.server_url + release_url,
episode_name,
release_name
)
)
return subtitles
def list_subtitles(self, video, languages):

@ -0,0 +1,215 @@
# -*- coding: utf-8 -*-
import io
import logging
import os
import zipfile
import rarfile
from requests import Session
from subliminal import Episode, Movie
from subliminal.exceptions import ServiceUnavailable
from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending
from subliminal_patch.exceptions import APIThrottled
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
logger = logging.getLogger(__name__)
server_url = "http://sapi.caretas.club/"
page_url = "https://sucha.caretas.club/"
class SuchaSubtitle(Subtitle):
provider_name = "sucha"
hash_verifiable = False
def __init__(
self,
language,
page_link,
filename,
guessit_dict,
download_link,
hearing_impaired,
matches,
):
super(SuchaSubtitle, self).__init__(
language, hearing_impaired=hearing_impaired, page_link=page_url
)
self.download_link = download_link
self.referer = page_link
self.guessit = guessit_dict
self.language = language
self.release_info = filename
self.filename = filename
self.found_matches = matches
@property
def id(self):
return self.download_link
def get_matches(self, video):
if (
video.release_group
and str(video.release_group).lower() in self.filename.lower()
):
self.found_matches.add("release_group")
if video.source and video.source.lower() in self.guessit["source"].lower():
self.found_matches.add("source")
if (
video.resolution
and video.resolution.lower() in self.guessit["resolution"].lower()
):
self.found_matches.add("resolution")
if (
video.audio_codec
and video.audio_codec.lower() in self.guessit["audio_codec"].lower()
):
self.found_matches.add("audio_codec")
if (
video.video_codec
and video.video_codec.lower() in self.guessit["video_codec"].lower()
):
self.found_matches.add("video_codec")
return self.found_matches
class SuchaProvider(Provider):
"""Sucha Provider"""
languages = {Language.fromalpha2(l) for l in ["es"]}
language_list = list(languages)
logger.debug(languages)
video_types = (Episode, Movie)
def initialize(self):
self.session = Session()
self.session.headers = {
"User-Agent": os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")
}
def terminate(self):
self.session.close()
def query(self, languages, video):
movie_year = video.year if video.year else None
is_episode = True if isinstance(video, Episode) else False
imdb_id = video.imdb_id if video.imdb_id else None
language = self.language_list[0]
if is_episode:
q = {
"query": "{} S{:02}E{:02}".format(
video.series, video.season, video.episode
)
}
else:
if imdb_id:
q = {"query": imdb_id}
else:
q = {"query": video.title, "year": movie_year}
logger.debug("Searching subtitles: {}".format(q["query"]))
res = self.session.get(server_url + "search", params=q, timeout=10)
res.raise_for_status()
result = res.json()
try:
subtitles = []
for i in result["results"]:
matches = set()
# We use 'in' instead of '==' since Subdivx titles are
# irregular
if video.title.lower() in i["title"].lower():
matches.add("title")
if is_episode:
if q["query"].lower() in i["title"].lower():
matches.add("title")
matches.add("series")
matches.add("imdb_id")
matches.add("season")
matches.add("episode")
matches.add("year")
if i["year"] == video.year:
matches.add("year")
if imdb_id:
matches.add("imdb_id")
# We'll add release group info (if found) to the pseudo filename
# in order to show it in the manual search
filename = i["pseudo_file"]
if (
video.release_group
and str(video.release_group).lower() in i["original_description"]
):
filename = i["pseudo_file"].replace(
".es.srt", "-" + str(video.release_group) + ".es.srt"
)
subtitles.append(
SuchaSubtitle(
language,
i["referer"],
filename,
i["guessit"],
i["download_url"],
i["hearing_impaired"],
matches,
)
)
return subtitles
except KeyError:
logger.debug("No subtitles found")
return []
def list_subtitles(self, video, languages):
return self.query(languages, video)
def _check_response(self, response):
if response.status_code != 200:
raise ServiceUnavailable("Bad status code: " + str(response.status_code))
def _get_archive(self, content):
archive_stream = io.BytesIO(content)
if rarfile.is_rarfile(archive_stream):
logger.debug("Identified rar archive")
archive = rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug("Identified zip archive")
archive = zipfile.ZipFile(archive_stream)
else:
raise APIThrottled("Unsupported compressed format")
return archive
def get_file(self, archive):
for name in archive.namelist():
if os.path.split(name)[-1].startswith("."):
continue
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
if (
"[eng]" in name.lower()
or ".en." in name.lower()
or ".eng." in name.lower()
):
continue
logger.debug("Returning from archive: {}".format(name))
return archive.read(name)
raise APIThrottled("Can not find the subtitle in the compressed file")
def download_subtitle(self, subtitle):
logger.info("Downloading subtitle %r", subtitle)
response = self.session.get(
subtitle.download_link, headers={"Referer": subtitle.page_link}, timeout=10
)
response.raise_for_status()
self._check_response(response)
archive = self._get_archive(response.content)
subtitle_file = self.get_file(archive)
subtitle.content = fix_line_ending(subtitle_file)

@ -1,9 +1,5 @@
# coding=utf-8
from __future__ import absolute_import
import io
import six
import os
from pkg_resources import require
import logging
import re
import os
@ -12,19 +8,20 @@ import time
from babelfish import language_converters
from subzero.language import Language
from requests import Session
import urllib.parse
from subliminal.subtitle import fix_line_ending
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal.providers import ParserBeautifulSoup
from subliminal_patch.exceptions import ProviderError
from bs4.element import Tag, NavigableString
from subliminal.score import get_equivalent_release_groups
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.utils import sanitize, sanitize_release_group
from subliminal.video import Episode, Movie
from zipfile import ZipFile, is_zipfile
from zipfile import ZipFile
from rarfile import RarFile, is_rarfile
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
from subliminal_patch.utils import sanitize
from guessit import guessit
@ -51,7 +48,7 @@ class SuperSubtitlesSubtitle(Subtitle):
return subtit.encode('utf-8')
def __init__(self, language, page_link, subtitle_id, series, season, episode, version,
releases, year, imdb_id, asked_for_episode=None, asked_for_release_group=None):
releases, year, imdb_id, uploader, asked_for_episode=None, asked_for_release_group=None):
super(SuperSubtitlesSubtitle, self).__init__(language, page_link=page_link)
self.subtitle_id = subtitle_id
self.series = series
@ -60,6 +57,7 @@ class SuperSubtitlesSubtitle(Subtitle):
self.version = version
self.releases = releases
self.year = year
self.uploader = uploader
if year:
self.year = int(year)
@ -69,6 +67,7 @@ class SuperSubtitlesSubtitle(Subtitle):
self.asked_for_episode = asked_for_episode
self.imdb_id = imdb_id
self.is_pack = True
self.matches = None
def numeric_id(self):
return self.subtitle_id
@ -139,16 +138,18 @@ class SuperSubtitlesSubtitle(Subtitle):
class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
"""SuperSubtitles Provider."""
languages = {Language('hun', 'HU')} | {Language(l) for l in [
languages = {Language('hun', 'HU')} | {Language(lang) for lang in [
'hun', 'eng'
]}
video_types = (Episode, Movie)
# https://www.feliratok.info/?search=&soriSorszam=&nyelv=&sorozatnev=The+Flash+%282014%29&sid=3212&complexsearch=true&knyelv=0&evad=4&epizod1=1&cimke=0&minoseg=0&rlsr=0&tab=all
server_url = 'https://www.feliratok.info/'
subtitle_class = SuperSubtitlesSubtitle
hearing_impaired_verifiable = False
multi_result_throttle = 2 # seconds
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers = {'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")}
@ -156,7 +157,8 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
def terminate(self):
self.session.close()
def get_language(self, text):
@staticmethod
def get_language(text):
if text == 'Magyar':
return Language.fromsupersubtitles('hu')
if text == 'Angol':
@ -178,8 +180,10 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
for value in links:
if "imdb.com" in str(value):
# <a alt="iMDB" href="http://www.imdb.com/title/tt2357547/" target="_blank"><img alt="iMDB" src="img/adatlap/imdb.png"/></a>
imdb_id = re.findall(r'(?<=www\.imdb\.com/title/).*(?=/")', str(value))[0]
# <a alt="iMDB" href="http://www.imdb.com/title/tt2357547/" target="_blank"><img alt="iMDB"
# src="img/adatlap/imdb.png"/></a>
imdb_id = re.search(r'(?<=www\.imdb\.com/title/).*(?=/")', str(value))
imdb_id = imdb_id.group() if imdb_id else ''
return imdb_id
return None
@ -190,8 +194,8 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
https://www.feliratok.info/index.php?term=SERIESNAME&nyelv=0&action=autoname
Where SERIESNAME is a searchable string.
The result will be something like this:
[{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)","ID":"3789"}
,{"name":"No Tomorrow (2016)","ID":"4179"}]
[{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)",
"ID":"3789"},{"name":"No Tomorrow (2016)","ID":"4179"}]
"""
@ -202,8 +206,8 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
r = self.session.get(url, timeout=10)
# r is something like this:
# [{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)","ID":"3789"}
# ,{"name":"No Tomorrow (2016)","ID":"4179"}]
# [{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)",
# "ID":"3789"},{"name":"No Tomorrow (2016)","ID":"4179"}]
results = r.json()
@ -211,13 +215,15 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
for result in results:
try:
# "name":"Miles from Tomorrowland (2015)","ID":"3789"
result_year = re.findall(r"(?<=\()\d\d\d\d(?=\))", result['name'])[0]
result_year = re.search(r"(?<=\()\d\d\d\d(?=\))", result['name'])
result_year = result_year.group() if result_year else ''
except IndexError:
result_year = ""
try:
# "name":"Miles from Tomorrowland (2015)","ID":"3789"
result_title = re.findall(r".*(?=\(\d\d\d\d\))", result['name'])[0]
result_title = re.search(r".*(?=\(\d\d\d\d\))", result['name'])
result_title = result_title.group() if result_title else ''
result_id = result['ID']
except IndexError:
continue
@ -227,26 +233,30 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
guessable = result_title.strip() + ".s01e01." + result_year
guess = guessit(guessable, {'type': "episode"})
if sanitize(original_title) == sanitize(guess['title']) and year and guess['year'] and year == guess['year']:
if sanitize(original_title) == sanitize(guess['title']) and year and guess['year'] and \
year == guess['year']:
# Return the founded id
return result_id
elif sanitize(original_title) == sanitize(guess['title']) and not year:
# Return the founded id
return result_id
return None
def query(self, series, video=None):
def query(self, series, languages, video=None):
year = video.year
subtitle = None
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
#seriesa = series.replace(' ', '+')
# seriesa = series.replace(' ', '+')
# Get ID of series with original name
series_id = self.find_id(series, year, series)
if not series_id:
# If not founded try without ' char
modified_series = series.replace(' ', '+').replace('\'', '')
modified_series = urllib.parse.quote_plus(series.replace('\'', ''))
series_id = self.find_id(modified_series, year, series)
if not series_id and modified_series:
# If still not founded try with the longest word is series title
@ -257,29 +267,30 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
if not series_id:
return None
# https://www.feliratok.info/index.php?search=&soriSorszam=&nyelv=&sorozatnev=&sid=2075&complexsearch=true&knyelv=0&evad=6&epizod1=16&cimke=0&minoseg=0&rlsr=0&tab=all
# https://www.feliratok.info/index.php?search=&soriSorszam=&nyelv=&sorozatnev=&sid=2075&complexsearch=true&
# knyelv=0&evad=6&epizod1=16&cimke=0&minoseg=0&rlsr=0&tab=all
url = self.server_url + "index.php?search=&soriSorszam=&nyelv=&sorozatnev=&sid=" + \
str(series_id) + "&complexsearch=true&knyelv=0&evad=" + str(season) + "&epizod1=" + str(
episode) + "&cimke=0&minoseg=0&rlsr=0&tab=all"
subtitle = self.process_subs(series, video, url)
str(series_id) + "&complexsearch=true&knyelv=0&evad=" + str(season) + "&epizod1=" + \
str(episode) + "&cimke=0&minoseg=0&rlsr=0&tab=all"
subtitle = self.process_subs(languages, video, url)
if not subtitle:
# No Subtitle found. Maybe already archived to season pack
url = self.server_url + "index.php?search=&soriSorszam=&nyelv=&sorozatnev=&sid=" + \
str(series_id) + "&complexsearch=true&knyelv=0&evad=" + str(
season) + "&epizod1=&evadpakk=on&cimke=0&minoseg=0&rlsr=0&tab=all"
subtitle = self.process_subs(series, video, url)
str(series_id) + "&complexsearch=true&knyelv=0&evad=" + \
str(season) + "&epizod1=&evadpakk=on&cimke=0&minoseg=0&rlsr=0&tab=all"
subtitle = self.process_subs(languages, video, url)
if isinstance(video, Movie):
title = series.replace(" ", "+")
title = urllib.parse.quote_plus(series)
# https://www.feliratok.info/index.php?search=The+Hitman%27s+BodyGuard&soriSorszam=&nyelv=&tab=film
url = self.server_url + "index.php?search=" + title + "&soriSorszam=&nyelv=&tab=film"
subtitle = self.process_subs(series, video, url)
subtitle = self.process_subs(languages, video, url)
return subtitle
def process_subs(self, series, video, url):
def process_subs(self, languages, video, url):
subtitles = []
@ -293,54 +304,69 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
series_imdb_id = None
for table in tables:
if "vilagit" in str(table) and i > 1:
try:
sub_hun_name = table.findAll("div", {"class": "magyar"})[0]
if isinstance(video, Episode):
if "vad)" not in str(sub_hun_name):
# <div class="magyar">A pletykaf<61>szek (3. <20>vad)</div>
sub_hun_name = re.findall(r'(?<=<div class="magyar">).*(?= -)', str(sub_hun_name))[0]
else:
# <div class="magyar">A holnap legend<6E>i - 3x11</div>
sub_hun_name = re.findall(r'(?<=<div class="magyar">).*(?= \()', str(sub_hun_name))[0]
if isinstance(video, Movie):
sub_hun_name = re.findall(r'(?<=<div class="magyar">).*(?=</div)', str(sub_hun_name))[0]
except IndexError:
sub_hun_name = ""
asked_for_episode = None
sub_season = None
sub_episode = None
sub_english = table.findAll("div", {"class": "eredeti"})
sub_english_name = None
if isinstance(video, Episode):
asked_for_episode = video.episode
if "Season" not in str(sub_english):
# [<div class="eredeti">Gossip Girl (Season 3) (DVDRip-REWARD)</div>]
sub_english_name = re.findall(r'(?<=<div class="eredeti">).*?(?= -)', str(sub_english))[0]
sub_season = int((re.findall(r"(?<=- ).*?(?= - )", str(sub_english))[0].split('x')[0]).strip())
sub_episode = int((re.findall(r"(?<=- ).*?(?= - )", str(sub_english))[0].split('x')[1]).strip())
sub_english_name = re.search(r'(?<=<div class="eredeti">).*?(?= -)',
str(sub_english))
sub_english_name = sub_english_name.group() if sub_english_name else ''
sub_season = re.search(r"(?<=- ).*?(?= - )", str(sub_english))
sub_season = sub_season.group() if sub_season else ''
sub_season = int((sub_season.split('x')[0]).strip())
sub_episode = re.search(r"(?<=- ).*?(?= - )", str(sub_english))
sub_episode = sub_episode.group() if sub_episode else ''
sub_episode = int((sub_episode.split('x')[1]).strip())
else:
# [<div class="eredeti">DC's Legends of Tomorrow - 3x11 - Here I Go Again (HDTV-AFG, HDTV-RMX, 720p-SVA, 720p-PSA </div>]
# [<div class="eredeti">DC's Legends of Tomorrow - 3x11 - Here I Go Again (HDTV-AFG, HDTV-RMX,
# 720p-SVA, 720p-PSA </div>]
sub_english_name = \
re.findall(r'(?<=<div class="eredeti">).*?(?=\(Season)', str(sub_english))[0]
sub_season = int(re.findall(r"(?<=Season )\d+(?=\))", str(sub_english))[0])
re.search(r'(?<=<div class="eredeti">).*?(?=\(Season)', str(sub_english))
sub_english_name = sub_english_name.group() if sub_english_name else ''
sub_season = re.search(r"(?<=Season )\d+(?=\))", str(sub_english))
sub_season = int(sub_season.group()) if sub_season else None
sub_episode = int(video.episode)
if isinstance(video, Movie):
sub_english_name = re.findall(r'(?<=<div class="eredeti">).*?(?=\()', str(sub_english))[0]
sub_english_name = re.search(r'(?<=<div class="eredeti">).*?(?=</div>)', str(sub_english))
sub_english_name = sub_english_name.group() if sub_english_name else ''
sub_english_name = sub_english_name.split(' (')[0]
sub_version = (str(sub_english).split('(')[len(str(sub_english).split('(')) - 1]).split(')')[0]
sub_version = 'n/a'
if len(str(sub_english).split('(')) > 1:
sub_version = (str(sub_english).split('(')[len(str(sub_english).split('(')) - 1]).split(')')[0]
# <small>Angol</small>
lang = table.findAll("small")[0]
sub_language = self.get_language(re.findall(r"(?<=<small>).*(?=</small>)", str(lang))[0])
lang = table.find("small")
sub_language = re.search(r"(?<=<small>).*(?=</small>)", str(lang))
sub_language = sub_language.group() if sub_language else ''
sub_language = self.get_language(sub_language)
# <a href="/index.php?action=letolt&amp;fnev=DCs Legends of Tomorrow - 03x11 - Here I Go Again.SVA.English.C.orig.Addic7ed.com.srt&amp;felirat=1519162191">
# <a href="/index.php?action=letolt&amp;fnev=DCs Legends of Tomorrow - 03x11 - Here I Go Again.SVA.
# English.C.orig.Addic7ed.com.srt&amp;felirat=1519162191">
link = str(table.findAll("a")[len(table.findAll("a")) - 1]).replace("amp;", "")
sub_downloadlink = self.server_url + re.findall(r'(?<=href="/).*(?=">)', link)[0]
sub_downloadlink = re.search(r'(?<=href="/).*(?=">)', link)
sub_downloadlink = sub_downloadlink.group() if sub_downloadlink else ''
sub_downloadlink = self.server_url + sub_downloadlink
sub_id = re.findall(r"(?<=felirat\=).*(?=\"\>)", link)[0]
sub_id = re.search(r"(?<=felirat=).*(?=\">)", link)
sub_id = sub_id.group() if sub_id else ''
sub_year = video.year
sub_releases = [s.strip() for s in sub_version.split(',')]
uploader = ''
for item in table.contents[7].contents:
if isinstance(item, Tag):
uploader = item.text.lstrip('\r\n\t\t\t\t\t').rstrip('\r\n\t\t\t\t')
elif isinstance(item, NavigableString):
uploader = item.lstrip('\r\n\t\t\t\t\t').rstrip('\r\n\t\t\t\t')
# For episodes we open the series page so all subtitles imdb_id must be the same. no need to check all
if isinstance(video, Episode) and series_imdb_id is not None:
sub_imdb_id = series_imdb_id
@ -348,26 +374,33 @@ class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
sub_imdb_id = self.find_imdb_id(sub_id)
series_imdb_id = sub_imdb_id
subtitle = SuperSubtitlesSubtitle(sub_language, sub_downloadlink, sub_id, sub_english_name.strip(), sub_season,
sub_episode, sub_version, sub_releases, sub_year, sub_imdb_id,
asked_for_episode, asked_for_release_group=video.release_group )
subtitles.append(subtitle)
subtitle = SuperSubtitlesSubtitle(sub_language, sub_downloadlink, sub_id, sub_english_name.strip(),
sub_season, sub_episode, sub_version, sub_releases, sub_year,
sub_imdb_id, uploader, asked_for_episode,
asked_for_release_group=video.release_group)
if subtitle.language in languages:
subtitles.append(subtitle)
i = i + 1
return subtitles
def list_subtitles(self, video, languages):
titles = []
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
elif isinstance(video, Movie):
titles = [video.title] + video.alternative_titles
subtitles = []
for title in titles:
subs = self.query(title, video=video)
subs = self.query(title, languages, video=video)
if subs:
return subs
for item in subs:
if item.series in titles:
subtitles.append(item)
time.sleep(self.multi_result_throttle)
return []
return subtitles
def download_subtitle(self, subtitle):

@ -0,0 +1,266 @@
# -*- coding: utf-8 -*-
import logging
from urllib import parse
import re
from bs4 import BeautifulSoup as bso
from requests import Session
from subzero.language import Language
from subliminal import Episode
from subliminal.exceptions import ServiceUnavailable
from subliminal_patch.subtitle import Subtitle
from subliminal.subtitle import fix_line_ending
from subliminal_patch.providers import Provider
logger = logging.getLogger(__name__)
BASE = "https://www.tusubtitulo.com/series.php?/"
class TuSubtituloSubtitle(Subtitle):
provider_name = "tusubtitulo"
hash_verifiable = False
def __init__(self, language, filename, download_link, page_link, matches):
super(TuSubtituloSubtitle, self).__init__(
language, hearing_impaired=False, page_link=page_link
)
self.download_link = download_link
self.page_link = page_link
self.language = language
self.release_info = filename
self.filename = filename
self.found_matches = matches
@property
def id(self):
return self.download_link
def get_matches(self, video):
if video.resolution and video.resolution.lower() in self.release_info.lower():
self.found_matches.add("resolution")
if video.source and video.source.lower() in self.release_info.lower():
self.found_matches.add("source")
if video.video_codec:
if video.video_codec == "H.264" and "x264" in self.release_info.lower():
self.found_matches.add("video_codec")
elif video.video_codec == "H.265" and "x265" in self.release_info.lower():
self.found_matches.add("video_codec")
elif video.video_codec.lower() in self.release_info.lower():
self.found_matches.add("video_codec")
if (
video.release_group
and video.release_group.lower() in self.release_info.lower()
):
self.found_matches.add("release_group")
if video.audio_codec:
if video.audio_codec.lower().replace(" ", ".") in self.release_info.lower():
self.found_matches.add("audio_codec")
return self.found_matches
class TuSubtituloProvider(Provider):
"""TuSubtitulo.com Provider"""
BASE = "https://www.tusubtitulo.com/series.php?/"
languages = {Language.fromietf(lang) for lang in ["en", "es"]}
logger.debug(languages)
video_types = (Episode,)
def initialize(self):
self.session = Session()
self.session.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
"referer": "https://www.tusubtitulo.com",
}
def terminate(self):
self.session.close()
def index_titles(self):
r = self.session.get(BASE)
r.raise_for_status()
soup = bso(r.content, "html.parser")
titles = []
for a in soup.find_all("a"):
href_url = a.get("href")
if "show" in href_url:
titles.append({"title": a.text, "url": href_url})
return titles
def is_season_available(self, seasons, season):
for i in seasons:
if i == season:
return True
def title_available(self, item):
try:
title_content = item[2].find_all("a")[0]
episode_number = re.search(
r".*\d+x(0+)?(\d+) - .*?", title_content.text
).group(2)
episode_id = title_content.get("href").split("/")[4]
return {
"episode_number": episode_number,
"episode_id": episode_id,
"episode_url": title_content.get("href"),
}
except IndexError:
return
def source_separator(self, item):
try:
text = item[3].text.replace("\n", "")
if "Vers" in text:
source = text.replace("Versión ", "")
if not source:
source = "Unknown"
return source
except IndexError:
return
def get_episodes(self, show_id, season):
logger.debug("https://www.tusubtitulo.com/show/{}/{}".format(show_id, season))
r2 = self.session.get(
"https://www.tusubtitulo.com/show/{}/{}".format(show_id, season),
)
r2.raise_for_status()
sopa = bso(r2.content, "lxml")
tables = sopa.find_all("tr")
seasons = [i.text for i in tables[1].find_all("a")]
if not self.is_season_available(seasons, season):
logger.debug("Season not found")
return
season_subs = []
episodes = []
for tr in range(len(tables)):
data = tables[tr].find_all("td")
title = self.title_available(data)
if title:
episodes.append(title)
source_var = self.source_separator(data)
if source_var:
inc = 1
while True:
try:
content = tables[tr + inc].find_all("td")
language = content[4].text
if "eng" in language.lower():
language = "en"
elif "esp" in language.lower():
language = "es"
else:
language = None
completed = True if not "%" in content[5].text else False
url = content[6].find_all("a")[0].get("href")
sub_id = parse.parse_qs(parse.urlparse(url).query)["id"][0]
lang_id = parse.parse_qs(parse.urlparse(url).query)["lang"][0]
version_ = parse.parse_qs(parse.urlparse(url).query)["version"][
0
]
download_url = (
"https://www.tusubtitulo.com/updated/{}/{}/{}".format(
lang_id, sub_id, version_
)
)
if language and completed:
season_subs.append(
{
"episode_id": sub_id,
"metadata": source_var,
"download_url": download_url,
"language": language,
}
)
inc += 1
except IndexError:
break
final_list = []
for i in episodes:
for t in season_subs:
if i["episode_id"] == t["episode_id"]:
final_list.append(
{
"episode_number": i["episode_number"],
"episode_url": i["episode_url"],
"metadata": t["metadata"],
"download_url": t["download_url"],
"language": t["language"],
}
)
return final_list
def search(self, title, season, episode):
titles = self.index_titles()
found_tv_show = None
for i in titles:
if title.lower() == i["title"].lower():
found_tv_show = i
break
if not found_tv_show:
logger.debug("Show not found")
return
tv_show_id = found_tv_show["url"].split("/")[2].replace(" ", "")
results = self.get_episodes(tv_show_id, season)
episode_list = []
if results:
for i in results:
if i["episode_number"] == episode:
episode_list.append(i)
if episode_list:
return episode_list
logger.debug("Episode not found")
def query(self, languages, video):
query = "{} {} {}".format(video.series, video.season, video.episode)
logger.debug("Searching subtitles: {}".format(query))
results = self.search(video.series, str(video.season), str(video.episode))
if results:
subtitles = []
for i in results:
matches = set()
# self.search only returns results for the specific episode
matches.add("title")
matches.add("series")
matches.add("season")
matches.add("episode")
matches.add("year")
subtitles.append(
TuSubtituloSubtitle(
Language.fromietf(i["language"]),
i["metadata"],
i["download_url"],
i["episode_url"],
matches,
)
)
return subtitles
else:
logger.debug("No subtitles found")
return []
def list_subtitles(self, video, languages):
return self.query(languages, video)
def _check_response(self, response):
if response.status_code != 200:
raise ServiceUnavailable("Bad status code: " + str(response.status_code))
def download_subtitle(self, subtitle):
logger.info("Downloading subtitle %r", subtitle)
response = self.session.get(
subtitle.download_link, headers={"Referer": subtitle.page_link}, timeout=10
)
response.raise_for_status()
self._check_response(response)
subtitle.content = fix_line_ending(response.content)

@ -243,20 +243,14 @@ def get_first_film(soup, section, year=None, session=None):
url = None
if not year:
url = SITE_DOMAIN + tag.findNext("ul").find("li").div.a.get("href")
else:
for t in tag.findNext("ul").findAll("li"):
if isinstance(t, NavigableString) or not t.div:
continue
if str(year) in t.div.a.string:
url = SITE_DOMAIN + t.div.a.get("href")
break
if not url:
# fallback to non-year results
logger.info("Falling back to non-year results as year wasn't found (%s)", year)
url = SITE_DOMAIN + tag.findNext("ul").find("li").div.a.get("href")
url = SITE_DOMAIN + tag.findNext("ul").find("li").div.a.get("href")
for t in tag.findNext("ul").findAll("li"):
if isinstance(t, NavigableString) or not t.div:
continue
if str(year) in t.div.a.string:
url = SITE_DOMAIN + t.div.a.get("href")
break
return Film.from_url(url, session=session)

@ -529,8 +529,10 @@
$('#buttons_bars').css('left', '30px');
} else if ($(window).width() <= 1152) {
$('#buttons_bars').width($('body').width() - 60);
$('#buttons_bars').css('left', '90px');
} else {
$('#buttons_bars').width($('body').width() - 240);
$('#buttons_bars').css('left', '270px');
}

@ -62,6 +62,10 @@
{% block bcright %}
<div class="d-flex m-t-5 justify-content-end">
<button class="btn btn-outline" id="mass_upload_button">
<div><i class="fas fa-cloud-upload-alt align-top text-themecolor text-center font-20" aria-hidden="true"></i></div>
<div class="align-bottom text-themecolor small text-center">Upload</div>
</button>
<button class="btn btn-outline" id="edit_button">
<div><i class="fas fa-wrench align-top text-themecolor text-center font-20" aria-hidden="true"></i></div>
<div class="align-bottom text-themecolor small text-center">Edit Series</div>
@ -219,6 +223,56 @@
</div>
</div>
<div id="massUploadModal" class="modal" tabindex="-1" role="dialog">
<div class="modal-dialog modal-xl" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title"><span id="mass_upload_title_span"></span></h5><br>
<button type="button" class="close" id="mass_upload_close_btn" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">&times;</span>
</button>
</div>
<form class="form" name="edit_form" id="mass_upload_form">
<div class="modal-body">
<div class="container-fluid">
<div class="row form-group">
<div class="custom-file">
<input type="file" multiple class="custom-file-input" id="mass_upload_file_list">
<label id="mass-upload-file-label" class="custom-file-label" for="upload">Choose files</label>
</div>
</div>
<!-- Store episodes we previous collect -->
<input type="hidden" id="mass-upload-exist-episodes" value=""/>
<table id="upload_table" class="table table-striped" style="width:100%">
<thead>
<tr>
<th></th>
<th style="text-align: left;">Filename</th>
<th style="text-align: left;">Season</th>
<th style="text-align: left;">Episode</th>
<th style="text-align: center;">Action</th>
</tr>
</thead>
</table>
</div>
</div>
<div class="modal-footer justify-content-between">
<div>
<div>
<select class="selectpicker" id="mass_upload_language_select" name="language"></select>
</div>
</div>
<div>
<button type="submit" id="mass_upload_save_button" class="btn btn-info">Upload</button>
<button type="button" class="btn btn-secondary" id="mass_upload_cancel_btn" data-dismiss="modal">Cancel</button>
</div>
</div>
</form>
</div>
</div>
</div>
<div id="editModal" class="modal" tabindex="-1" role="dialog">
<div class="modal-dialog modal-lg" role="document">
<div class="modal-content">
@ -507,6 +561,7 @@
$('#series_nav').addClass("active");
seriesDetailsRefresh();
episodesDetailsRefresh();
getLanguages();
getEnabledLanguages();
@ -760,7 +815,7 @@
pageLength: {{ settings.general.page_size_manual_search }},
lengthMenu: [ 5, 10, 15, 20, 25 ],
searching: true,
scrollX: true,
//scrollX: true, // causing dropdown to not show in short list
ordering: false,
processing: true,
serverSide: false,
@ -961,6 +1016,326 @@
});
});
const UploadStatus = {
ERROR: 0,
VALID: 1,
UPLOAD: 2,
DONE: 3
}
$('#mass_upload_button').on('click', function (e) {
e.preventDefault();
$('#upload_table').DataTable({
destroy: true,
processing: true,
language: {
zeroRecords: 'Select Subtitles to Get Started',
processing: "Loading Subtitle..."
},
searching: false,
ordering: false,
lengthChange: false,
serverSide: false,
responsive: true,
columns: [
{
data: null,
render: function(data, type, row) {
switch (data.status) {
case UploadStatus.VALID:
return '<i class="fas fa-check px-1"></i>'
case UploadStatus.UPLOAD:
return '<i class="spinner-border spinner-border-sm px-1" role="status" />'
case UploadStatus.DONE:
return '<i class="far fa-check-circle px-1"></i>'
case UploadStatus.ERROR:
default:
return '<i class="fas fa-exclamation-triangle px-1"></i>'
}
}
},
{data: 'filename'},
{
data: "season",
render: function(data, type, row) {
let cls = []
let readonly = false
if (data <= 0) {
if (row.status !== UploadStatus.UPLOAD) {
cls.push('is-invalid');
} else {
readonly = true
}
} else {
if (row.status === UploadStatus.UPLOAD) {
readonly = true;
}
}
return `<input type="text" ${readonly ? 'readonly' : ''} \
class="mass-upload-season-input form-control ${cls.join(' ')}" \
value="${data}" />`
}
},
{
data: "episode",
render: function(data, type, row) {
let cls = []
let readonly = false
if (data <= 0) {
if (row.status !== UploadStatus.UPLOAD) {
cls.push('is-invalid');
} else {
readonly = true
}
} else {
if (row.status === UploadStatus.UPLOAD) {
readonly = true;
}
}
return `<input type="text" ${readonly ? 'readonly' : ''} \
class="mass-upload-episode-input form-control ${cls.join(' ')}" \
value="${data}" />`
}
},
{
data: null,
render: function(data, type, row) {
return `<a href="" class="mass-upload-del-button badge badge-secondary"><i class="far fa-trash-alt"></i></a>`
}
}
]
})
// reset
$('#upload_table').DataTable().table().clear().draw();
$('#mass_upload_file_list').val("")
$('#mass-upload-file-label').text('Choose files')
$("#mass_upload_title_span")
.html(`${seriesDetails['title']} - Upload`);
$('#mass_upload_language_select')
.empty();
$.each(enabledLanguages, function (i, item) {
$('#mass_upload_language_select')
.append(`<option value="${item.code2}">${item.name}</option>`);
});
$("#mass_upload_language_select")
.selectpicker("refresh");
$('#mass_upload_forced_checkbox')
.val(seriesDetails['forced'])
.change();
$('#massUploadModal')
.modal({
focus: false
});
});
$('#upload_table').on('click', '.mass-upload-del-button', function(e) {
e.preventDefault();
$('#upload_table').DataTable()
.row($(this).parents('tr'))
.remove()
.draw();
});
$('#upload_table').on('change', '.mass-upload-season-input', function(e) {
const value = $(this).val();
let row = $('#upload_table').DataTable().row($(this).parents('tr'));
let data = row.data();
data.season = value;
data.status = UploadStatus.ERROR;
for(const exist of episodesDetails.data) {
if (exist.episode == data.episode && exist.season == data.season) {
data.status = UploadStatus.VALID;
data.exist = exist
break;
}
}
row.data(data).draw()
})
$('#upload_table').on('change', '.mass-upload-episode-input', function(e) {
const value = $(this).val();
let row = $('#upload_table').DataTable().row($(this).parents('tr'));
let data = row.data();
data.episode = value;
data.status = UploadStatus.ERROR;
for(const exist of episodesDetails.data) {
if (exist.episode == data.episode && exist.season == data.season) {
data.status = UploadStatus.VALID;
data.exist = exist
break;
}
}
row.data(data).draw()
})
$('#mass_upload_file_list').change(function() {
let filelist = $('#mass_upload_file_list').get(0).files
$('#mass-upload-file-label').text(`${filelist.length} Files`)
$('#mass_upload_save_button').prop('disabled', true)
$('#mass_upload_close_btn').prop('disabled', true);
$('#mass_upload_cancel_btn').prop('disabled', true);
let table = $('#upload_table').DataTable();
table.table().clear().draw();
const episodes = episodesDetails.data
let promiselist = []
for (const file of filelist) {
const name = file.name;
const object = {
file: file,
filename: name,
season: 0,
episode: 0,
status: UploadStatus.UPLOAD,
exist: null
}
const cacheRow = table.row.add(object)
promiselist.push(Promise.resolve($.ajax({
url: "{{ url_for('api.subtitlenameinfo') }}",
type: "GET",
dataType: "json",
data: {
filename: name
},
complete: function(data) {
const response = data.responseJSON.data;
const season = (response.season ?? 1);
let existdata = null
for(const exist of episodes) {
if (exist.episode == response.episode && exist.season == season) {
existdata = exist;
break;
}
}
let complete = {
file: file,
filename: name,
season: season,
episode: response.episode ?? 0,
status: existdata != null ? UploadStatus.VALID : UploadStatus.ERROR,
exist: existdata,
row: cacheRow
};
table.row(cacheRow).data(complete)
.draw();
},
error: function(data) {
let error = {
file: file,
filename: name,
season: 0,
episode: 0,
status: UploadStatus.ERROR,
exist: null,
row: cacheRow
};
table.row(cacheRow).data(error)
.draw();
}
})))
}
table.table().draw();
Promise.all(promiselist)
.then(function(){
$('#mass_upload_save_button').prop('disabled', false)
$('#mass_upload_close_btn').prop('disabled', false);
$('#mass_upload_cancel_btn').prop('disabled', false);
})
})
$('#mass_upload_form').on('submit', function(e) {
e.preventDefault();
$('#mass_upload_save_button').html('<div class="spinner-border spinner-border-sm" role="status"></div>');
const formdata = new FormData(document.getElementById("mass_upload_form"));
const language = formdata.get("language");
let table = $('#upload_table').DataTable();
const uploadlist = table.data().toArray().filter(function(item) {
return item.status === UploadStatus.VALID
});
const promiselist = uploadlist.map(function(item) {
const data = {
sonarrSeriesId: item.exist.sonarrSeriesId,
sonarrEpisodeId: item.exist.sonarrEpisodeId,
language: language,
upload: item.file,
episodePath: item.exist.mapped_path,
// sceneName,
title: item.exist.title,
audioLanguage: item.exist.audio_language.name,
forced: false
}
const form = new FormData()
for(const key in data) {
form.append(key, data[key])
}
const cacheRow = item.row ?? null
item.status = UploadStatus.UPLOAD;
let row = table.row(cacheRow);
row.data(item).draw()
return Promise.resolve($.ajax({
url: "{{ url_for('api.episodessubtitlesupload') }}",
data: form,
processData: false,
contentType: false,
type: 'POST',
complete: function(e) {
item.status = UploadStatus.DONE;
row.data(item).draw()
},
error: function(e) {
item.status = UploadStatus.ERROR;
row.data(item).draw()
}
}));
})
Promise.all(promiselist)
.then(function(){
$('#massUploadModal').modal('hide');
})
.catch(function() {
})
.finally(function() {
$('#mass_upload_save_button').html('Upload');
})
})
$('#edit_button').on('click', function (e) {
e.preventDefault();
$("#edit_series_title_span").html(seriesDetails['title']);
@ -1413,6 +1788,21 @@
});
function episodesDetailsRefresh() {
$.ajax({
url: "{{ url_for('api.episodes') }}",
type: "GET",
dataType: "json",
data: {
seriesid: "{{id}}",
},
complete: function(data) {
const response = data.responseJSON;
episodesDetails = response
}
})
}
function seriesDetailsRefresh() {
$.ajax({
url: "{{ url_for('api.series') }}?seriesid={{id}}"

@ -599,7 +599,7 @@
pageLength: {{ settings.general.page_size_manual_search }},
lengthMenu: [ 5, 10, 15, 20, 25 ],
searching: true,
scrollX: true,
//scrollX: true, // causing dropdown to not show in short list
ordering: false,
processing: true,
serverSide: false,

@ -516,7 +516,19 @@
<div class="form-group col-sm-8">
<label class="custom-control custom-checkbox">
<input type="checkbox" class="custom-control-input provider" id="subtitulamostv">
<span class="custom-control-label">Spanish subtitles provider.</span>
<span class="custom-control-label">Spanish Subtitles Provider.</span>
</label>
</div>
</div>
<div class="row">
<div class="col-sm-3 text-right">
<b>Sucha</b>
</div>
<div class="form-group col-sm-8">
<label class="custom-control custom-checkbox">
<input type="checkbox" class="custom-control-input provider" id="sucha">
<span class="custom-control-label">Spanish Subtitles Provider.</span>
</label>
</div>
</div>
@ -589,6 +601,18 @@
</div>
</div>
<div class="row">
<div class="col-sm-3 text-right">
<b>Tusubtitulo.com</b>
</div>
<div class="form-group col-sm-8">
<label class="custom-control custom-checkbox">
<input type="checkbox" class="custom-control-input provider" id="tusubtitulo">
<span class="custom-control-label">Spanish/English Subtitles Provider for TV Shows.</span>
</label>
</div>
</div>
<div class="row">
<div class="col-sm-3 text-right">
<b>TVSubtitles</b>

@ -14,6 +14,10 @@
.table tbody tr {
cursor: pointer;
}
td.dt-nowrap {
white-space: nowrap;
}
</style>
{% endblock page_head %}
@ -160,6 +164,10 @@
"visible": false,
"searchable": true
},
{
"targets": [ 3 ],
"className": "dt-nowrap"
},
{
"targets": [ 4 ],
"visible": false,

@ -2,6 +2,14 @@
{% block title %}Tasks - Bazarr{% endblock %}
{% block page_head %}
<style>
.execute {
cursor: pointer;
}
</style>
{% endblock page_head %}
{% block bcleft %}
<div class="">
<button class="btn btn-outline" id="refresh_button">

Loading…
Cancel
Save