Merge branch 'development' into python3

# Conflicts:
#	bazarr/get_movies.py
#	libs/subliminal_patch/core.py
#	libs/subliminal_patch/providers/subdivx.py
#	libs/subliminal_patch/providers/titlovi.py
pull/684/head
Louis Vézina 5 years ago
commit afd9cd6ddb

@ -67,7 +67,7 @@ defaults = {
'full_update_day': '6',
'full_update_hour': '4',
'only_monitored': 'False',
},
},
'radarr': {
'ip': '127.0.0.1',
'port': '7878',
@ -78,7 +78,7 @@ defaults = {
'full_update_day': '6',
'full_update_hour': '5',
'only_monitored': 'False',
},
},
'proxy': {
'type': 'None',
'url': '',
@ -132,6 +132,10 @@ defaults = {
},
'analytics': {
'enabled': 'True'
},
'titlovi': {
'username': '',
'password': ''
}
}

@ -5,6 +5,7 @@ import atexit
from get_args import args
from peewee import *
from playhouse.sqliteq import SqliteQueueDatabase
from playhouse.migrate import *
from helper import path_replace, path_replace_movie, path_replace_reverse, path_replace_reverse_movie
@ -15,6 +16,8 @@ database = SqliteQueueDatabase(
queue_max_size=256, # Max. # of pending writes that can accumulate.
results_timeout=30.0) # Max. time to wait for query to be executed.
migrator = SqliteMigrator(database)
@database.func('path_substitution')
def path_substitution(path):
@ -79,6 +82,11 @@ class TableEpisodes(BaseModel):
subtitles = TextField(null=True)
title = TextField(null=True)
video_codec = TextField(null=True)
episode_file_id = IntegerField(null=True)
migrate(
migrator.add_column('table_episodes', 'episode_file_id', episode_file_id),
)
class Meta:
table_name = 'table_episodes'
@ -110,6 +118,11 @@ class TableMovies(BaseModel):
tmdb_id = TextField(column_name='tmdbId', primary_key=True, null=False)
video_codec = TextField(null=True)
year = TextField(null=True)
movie_file_id = IntegerField(null=True)
migrate(
migrator.add_column('table_movies', 'movie_file_id', movie_file_id),
)
class Meta:
table_name = 'table_movies'

@ -117,7 +117,8 @@ def sync_episodes():
'format': format,
'resolution': resolution,
'video_codec': videoCodec,
'audio_codec': audioCodec})
'audio_codec': audioCodec,
'episode_file_id': episode['episodeFile']['id']})
else:
episodes_to_add.append({'sonarr_series_id': episode['seriesId'],
'sonarr_episode_id': episode['id'],
@ -130,7 +131,8 @@ def sync_episodes():
'format': format,
'resolution': resolution,
'video_codec': videoCodec,
'audio_codec': audioCodec})
'audio_codec': audioCodec,
'episode_file_id': episode['episodeFile']['id']})
# Update existing episodes in DB
episode_in_db_list = []
@ -146,7 +148,8 @@ def sync_episodes():
TableEpisodes.format,
TableEpisodes.resolution,
TableEpisodes.video_codec,
TableEpisodes.audio_codec
TableEpisodes.audio_codec,
TableEpisodes.episode_file_id
).dicts()
for item in episodes_in_db:

@ -158,7 +158,8 @@ def update_movies():
'video_codec': six.text_type(videoCodec),
'audio_codec': six.text_type(audioCodec),
'overview': six.text_type(overview),
'imdb_id': six.text_type(imdbId)})
'imdb_id': six.text_type(imdbId),
'movie_file_id': movie['movieFile']['id']})
else:
if movie_default_enabled is True:
movies_to_add.append({'radarr_id': movie["id"],
@ -182,7 +183,8 @@ def update_movies():
'video_codec': videoCodec,
'audio_codec': audioCodec,
'imdb_id': imdbId,
'forced': movie_default_forced})
'forced': movie_default_forced,
'movie_file_id': movie['movieFile']['id']})
else:
movies_to_add.append({'radarr_id': movie["id"],
'title': movie["title"],
@ -201,7 +203,8 @@ def update_movies():
'resolution': resolution,
'video_codec': videoCodec,
'audio_codec': audioCodec,
'imdb_id': imdbId})
'imdb_id': imdbId,
'movie_file_id': movie['movieFile']['id']})
else:
logging.error(
'BAZARR Radarr returned a movie without a file path: ' + movie["path"] + separator +
@ -227,7 +230,8 @@ def update_movies():
TableMovies.resolution,
TableMovies.video_codec,
TableMovies.audio_codec,
TableMovies.imdb_id
TableMovies.imdb_id,
TableMovies.movie_file_id
).dicts()
for item in movies_in_db:

@ -8,10 +8,11 @@ import time
from get_args import args
from config import settings
from subliminal_patch.exceptions import TooManyRequests, APIThrottled
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ParseResponseError
from subliminal.exceptions import DownloadLimitExceeded, ServiceUnavailable
VALID_THROTTLE_EXCEPTIONS = (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled)
VALID_THROTTLE_EXCEPTIONS = (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled,
ParseResponseError)
VALID_COUNT_EXCEPTIONS = ('TooManyRequests', 'ServiceUnavailable', 'APIThrottled')
PROVIDER_THROTTLE_MAP = {
@ -20,6 +21,7 @@ PROVIDER_THROTTLE_MAP = {
DownloadLimitExceeded: (datetime.timedelta(hours=3), "3 hours"),
ServiceUnavailable: (datetime.timedelta(minutes=20), "20 minutes"),
APIThrottled: (datetime.timedelta(minutes=10), "10 minutes"),
ParseResponseError: (datetime.timedelta(hours=6), "6 hours"),
},
"opensubtitles": {
TooManyRequests: (datetime.timedelta(hours=3), "3 hours"),
@ -124,7 +126,10 @@ def get_providers_auth():
'betaseries': {'token': settings.betaseries.token},
'titulky': {'username': settings.titulky.username,
'password': settings.titulky.password,
}
},
'titlovi': {'username': settings.titlovi.username,
'password': settings.titlovi.password,
},
}
return providers_auth
@ -141,10 +146,11 @@ def provider_throttle(name, exception):
throttle_data = PROVIDER_THROTTLE_MAP.get(name, PROVIDER_THROTTLE_MAP["default"]).get(cls, None) or \
PROVIDER_THROTTLE_MAP["default"].get(cls, None)
if not throttle_data:
return
if throttle_data:
throttle_delta, throttle_description = throttle_data
else:
throttle_delta, throttle_description = datetime.timedelta(minutes=10), "10 minutes"
throttle_delta, throttle_description = throttle_data
throttle_until = datetime.datetime.now() + throttle_delta
if cls_name not in VALID_COUNT_EXCEPTIONS or throttled_count(name):

@ -397,6 +397,8 @@ def save_wizard():
settings.napisy24.password = request.forms.get('settings_napisy24_password')
settings.subscene.username = request.forms.get('settings_subscene_username')
settings.subscene.password = request.forms.get('settings_subscene_password')
settings.titlovi.username = request.forms.get('settings_titlovi_username')
settings.titlovi.password = request.forms.get('settings_titlovi_password')
settings.betaseries.token = request.forms.get('settings_betaseries_token')
settings_subliminal_languages = request.forms.getall('settings_subliminal_languages')
@ -1825,6 +1827,8 @@ def save_settings():
settings.napisy24.password = request.forms.get('settings_napisy24_password')
settings.subscene.username = request.forms.get('settings_subscene_username')
settings.subscene.password = request.forms.get('settings_subscene_password')
settings.titlovi.username = request.forms.get('settings_titlovi_username')
settings.titlovi.password = request.forms.get('settings_titlovi_password')
settings.betaseries.token = request.forms.get('settings_betaseries_token')
settings_subliminal_languages = request.forms.getall('settings_subliminal_languages')

@ -101,9 +101,10 @@ def get_sonarr_version():
use_sonarr = settings.general.getboolean('use_sonarr')
apikey_sonarr = settings.sonarr.apikey
sv = url_sonarr + "/api/system/status?apikey=" + apikey_sonarr
sonarr_version = ''
if use_sonarr:
try:
sonarr_version = requests.get(sv, timeout=30, verify=False).json()['version']
sonarr_version = requests.get(sv, timeout=60, verify=False).json()['version']
except Exception as e:
logging.DEBUG('BAZARR cannot get Sonarr version')
@ -114,9 +115,10 @@ def get_radarr_version():
use_radarr = settings.general.getboolean('use_radarr')
apikey_radarr = settings.radarr.apikey
rv = url_radarr + "/api/system/status?apikey=" + apikey_radarr
radarr_version = ''
if use_radarr:
try:
radarr_version = requests.get(rv, timeout=30, verify=False).json()['version']
radarr_version = requests.get(rv, timeout=60, verify=False).json()['version']
except Exception as e:
logging.DEBUG('BAZARR cannot get Radarr version')

@ -28,16 +28,6 @@ class TitloviConverter(LanguageReverseConverter):
}
self.codes = set(self.from_titlovi.keys())
# temporary fix, should be removed as soon as API is used
self.lang_from_countrycode = {'ba': ('bos',),
'en': ('eng',),
'hr': ('hrv',),
'mk': ('mkd',),
'rs': ('srp',),
'rsc': ('srp', None, 'Cyrl'),
'si': ('slv',)
}
def convert(self, alpha3, country=None, script=None):
if (alpha3, country, script) in self.to_titlovi:
return self.to_titlovi[(alpha3, country, script)]
@ -50,9 +40,5 @@ class TitloviConverter(LanguageReverseConverter):
if titlovi in self.from_titlovi:
return self.from_titlovi[titlovi]
# temporary fix, should be removed as soon as API is used
if titlovi in self.lang_from_countrycode:
return self.lang_from_countrycode[titlovi]
raise ConfigurationError('Unsupported language number for titlovi: %s' % titlovi)

@ -29,9 +29,9 @@ from subliminal.utils import hash_napiprojekt, hash_opensubtitles, hash_shooter,
from subliminal.video import VIDEO_EXTENSIONS, Video, Episode, Movie
from subliminal.core import guessit, ProviderPool, io, is_windows_special_path, \
ThreadPoolExecutor, check_video
from subliminal_patch.exceptions import TooManyRequests, APIThrottled
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ParseResponseError
from subzero.language import Language
from subzero.language import Language, ENDSWITH_LANGUAGECODE_RE
from scandir import scandir, scandir_generic as _scandir_generic
import six
@ -188,12 +188,9 @@ class SZProviderPool(ProviderPool):
except (requests.Timeout, socket.timeout):
logger.error('Provider %r timed out', provider)
except (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled) as e:
self.throttle_callback(provider, e)
return
except:
except Exception as e:
logger.exception('Unexpected error in provider %r: %s', provider, traceback.format_exc())
self.throttle_callback(provider, e)
def list_subtitles(self, video, languages):
"""List subtitles.
@ -285,7 +282,7 @@ class SZProviderPool(ProviderPool):
logger.debug("RAR Traceback: %s", traceback.format_exc())
return False
except (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled) as e:
except (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled, ParseResponseError) as e:
self.throttle_callback(subtitle.provider_name, e)
self.discarded_providers.add(subtitle.provider_name)
return False
@ -576,12 +573,14 @@ def scan_video(path, dont_use_actual_file=False, hints=None, providers=None, ski
return video
def _search_external_subtitles(path, languages=None, only_one=False, scandir_generic=False):
def _search_external_subtitles(path, languages=None, only_one=False, scandir_generic=False, match_strictness="strict"):
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
fn_no_ext, fileext = os.path.splitext(filename)
fn_no_ext_lower = fn_no_ext.lower()
subtitles = {}
_scandir = _scandir_generic if scandir_generic else scandir
for entry in _scandir(dirpath):
if (not entry.name or entry.name in ('\x0c', '$', ',', '\x7f')) and not scandir_generic:
logger.debug('Could not determine the name of the file, retrying with scandir_generic')
@ -592,9 +591,11 @@ def _search_external_subtitles(path, languages=None, only_one=False, scandir_gen
p = entry.name
# keep only valid subtitle filenames
if not p.lower().startswith(fileroot.lower()) or not p.lower().endswith(SUBTITLE_EXTENSIONS):
if not p.lower().endswith(SUBTITLE_EXTENSIONS):
continue
# not p.lower().startswith(fileroot.lower()) or not
p_root, p_ext = os.path.splitext(p)
if not INCLUDE_EXOTIC_SUBS and p_ext not in (".srt", ".ass", ".ssa", ".vtt"):
continue
@ -613,7 +614,19 @@ def _search_external_subtitles(path, languages=None, only_one=False, scandir_gen
forced = "forced" in adv_tag
# extract the potential language code
language_code = p_root[len(fileroot):].replace('_', '-')[1:]
language_code = p_root.rsplit(".", 1)[1].replace('_', '-')
# remove possible language code for matching
p_root_bare = ENDSWITH_LANGUAGECODE_RE.sub("", p_root)
p_root_lower = p_root_bare.lower()
filename_matches = p_root_lower == fn_no_ext_lower
filename_contains = p_root_lower in fn_no_ext_lower
if not filename_matches:
if match_strictness == "strict" or (match_strictness == "loose" and not filename_contains):
continue
# default language is undefined
language = Language('und')
@ -637,7 +650,7 @@ def _search_external_subtitles(path, languages=None, only_one=False, scandir_gen
return subtitles
def search_external_subtitles(path, languages=None, only_one=False):
def search_external_subtitles(path, languages=None, only_one=False, match_strictness="strict"):
"""
wrap original search_external_subtitles function to search multiple paths for one given video
# todo: cleanup and merge with _search_external_subtitles
@ -658,10 +671,11 @@ def search_external_subtitles(path, languages=None, only_one=False):
if os.path.isdir(os.path.dirname(abspath)):
try:
subtitles.update(_search_external_subtitles(abspath, languages=languages,
only_one=only_one))
only_one=only_one, match_strictness=match_strictness))
except OSError:
subtitles.update(_search_external_subtitles(abspath, languages=languages,
only_one=only_one, scandir_generic=True))
only_one=only_one, match_strictness=match_strictness,
scandir_generic=True))
logger.debug("external subs: found %s", subtitles)
return subtitles
@ -854,6 +868,8 @@ def save_subtitles(file_path, subtitles, single=False, directory=None, chmod=Non
logger.debug(u"Saving %r to %r", subtitle, subtitle_path)
content = subtitle.get_modified_content(format=format, debug=debug_mods)
if content:
if os.path.exists(subtitle_path):
os.remove(subtitle_path)
with open(subtitle_path, 'wb') as f:
f.write(content)
subtitle.storage_path = subtitle_path

@ -10,3 +10,8 @@ class TooManyRequests(ProviderError):
class APIThrottled(ProviderError):
pass
class ParseResponseError(ProviderError):
"""Exception raised by providers when they are not able to parse the response."""
pass

@ -8,13 +8,14 @@ import zipfile
import rarfile
from subzero.language import Language
from guessit import guessit
from requests import Session
from subliminal import __short_version__
from subliminal.exceptions import ServiceUnavailable
from subliminal.providers import ParserBeautifulSoup, Provider
from subliminal.subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending,guess_matches
from subliminal.video import Episode, Movie
from subliminal_patch.exceptions import ParseResponseError
from six.moves import range
logger = logging.getLogger(__name__)
@ -121,35 +122,17 @@ class SubdivxSubtitlesProvider(Provider):
language = self.language_list[0]
search_link = self.server_url + 'index.php'
while True:
r = self.session.get(search_link, params=params, timeout=10)
r.raise_for_status()
response = self.session.get(search_link, params=params, timeout=10)
self._check_response(response)
if not r.content:
logger.debug('No data returned from provider')
return []
try:
page_subtitles = self._parse_subtitles_page(response, language)
except Exception as e:
raise ParseResponseError('Error parsing subtitles list: ' + str(e))
page_soup = ParserBeautifulSoup(r.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
title_soups = page_soup.find_all("div", {'id': 'menu_detalle_buscador'})
body_soups = page_soup.find_all("div", {'id': 'buscador_detalle'})
if len(title_soups) != len(body_soups):
logger.debug('Error in provider data')
return []
for subtitle in range(0, len(title_soups)):
title_soup, body_soup = title_soups[subtitle], body_soups[subtitle]
subtitles += page_subtitles
# title
title = title_soup.find("a").text.replace("Subtitulo de ", "")
page_link = title_soup.find("a")["href"].replace('http://', 'https://')
# body
description = body_soup.find("div", {'id': 'buscador_detalle_sub'}).text
subtitle = self.subtitle_class(language, page_link, description, title)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
if len(title_soups) >= 20:
if len(page_subtitles) >= 20:
params['pg'] += 1 # search next page
time.sleep(self.multi_result_throttle)
else:
@ -177,67 +160,91 @@ class SubdivxSubtitlesProvider(Provider):
return subtitles
def get_download_link(self, subtitle):
r = self.session.get(subtitle.page_link, timeout=10)
r.raise_for_status()
if r.content:
page_soup = ParserBeautifulSoup(r.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
links_soup = page_soup.find_all("a", {'class': 'detalle_link'})
for link_soup in links_soup:
if link_soup['href'].startswith('bajar'):
return self.server_url + link_soup['href']
logger.debug('No data returned from provider')
return None
def download_subtitle(self, subtitle):
if isinstance(subtitle, SubdivxSubtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
# get download link
download_link = self.get_download_link(subtitle)
r = self.session.get(download_link, headers={'Referer': subtitle.page_link}, timeout=30)
r.raise_for_status()
download_link = self._get_download_link(subtitle)
if not r.content:
logger.debug('Unable to download subtitle. No data returned from provider')
return
# download zip / rar file with the subtitle
response = self.session.get(download_link, headers={'Referer': subtitle.page_link}, timeout=30)
self._check_response(response)
archive = _get_archive(r.content)
# open the compressed archive
archive = self._get_archive(response.content)
subtitle_content = _get_subtitle_from_archive(archive)
if subtitle_content:
subtitle.content = fix_line_ending(subtitle_content)
else:
logger.debug('Could not extract subtitle from %r', archive)
# extract the subtitle
subtitle_content = self._get_subtitle_from_archive(archive)
subtitle.content = fix_line_ending(subtitle_content)
def _check_response(self, response):
if response.status_code != 200:
raise ServiceUnavailable('Bad status code: ' + str(response.status_code))
def _parse_subtitles_page(self, response, language):
subtitles = []
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
title_soups = page_soup.find_all("div", {'id': 'menu_detalle_buscador'})
body_soups = page_soup.find_all("div", {'id': 'buscador_detalle'})
for subtitle in range(0, len(title_soups)):
title_soup, body_soup = title_soups[subtitle], body_soups[subtitle]
# title
title = title_soup.find("a").text.replace("Subtitulo de ", "")
page_link = title_soup.find("a")["href"].replace('http://', 'https://')
def _get_archive(content):
# open the archive
archive_stream = io.BytesIO(content)
archive = None
if rarfile.is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(archive_stream)
# body
description = body_soup.find("div", {'id': 'buscador_detalle_sub'}).text
return archive
subtitle = self.subtitle_class(language, page_link, description, title)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def _get_download_link(self, subtitle):
response = self.session.get(subtitle.page_link, timeout=10)
self._check_response(response)
try:
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
links_soup = page_soup.find_all("a", {'class': 'detalle_link'})
for link_soup in links_soup:
if link_soup['href'].startswith('bajar'):
return self.server_url + link_soup['href']
except Exception as e:
raise ParseResponseError('Error parsing download link: ' + str(e))
raise ParseResponseError('Download link not found')
def _get_archive(self, content):
# open the archive
archive_stream = io.BytesIO(content)
if rarfile.is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive = rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive = zipfile.ZipFile(archive_stream)
else:
raise ParseResponseError('Unsupported compressed format')
return archive
def _get_subtitle_from_archive(archive):
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
def _get_subtitle_from_archive(self, archive):
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
return archive.read(name)
return archive.read(name)
return None
raise ParseResponseError('Can not find the subtitle in the compressed file')

@ -3,43 +3,36 @@
from __future__ import absolute_import
import io
import logging
import math
import re
import time
from datetime import datetime
import dateutil.parser
import rarfile
from bs4 import BeautifulSoup
from zipfile import ZipFile, is_zipfile
from rarfile import RarFile, is_rarfile
from babelfish import language_converters, Script
from requests import RequestException
from requests import RequestException, codes as request_codes
from guessit import guessit
from subliminal_patch.http import RetryingCFSession
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.utils import sanitize, fix_inconsistent_naming as _fix_inconsistent_naming
from subliminal.exceptions import ProviderError
from subliminal.exceptions import ProviderError, AuthenticationError, ConfigurationError
from subliminal.score import get_equivalent_release_groups
from subliminal.utils import sanitize_release_group
from subliminal.subtitle import guess_matches
from subliminal.video import Episode, Movie
from subliminal.subtitle import fix_line_ending
from subliminal_patch.pitcher import pitchers, load_verification, store_verification
from subzero.language import Language
from random import randint
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from subzero.language import Language
from dogpile.cache.api import NO_VALUE
from subliminal.cache import region
from six.moves import map
# parsing regex definitions
title_re = re.compile(r'(?P<title>(?:.+(?= [Aa][Kk][Aa] ))|.+)(?:(?:.+)(?P<altitle>(?<= [Aa][Kk][Aa] ).+))?')
lang_re = re.compile(r'(?<=flags/)(?P<lang>.{2})(?:.)(?P<script>c?)(?:.+)')
season_re = re.compile(r'Sezona (?P<season>\d+)')
episode_re = re.compile(r'Epizoda (?P<episode>\d+)')
year_re = re.compile(r'(?P<year>\d+)')
fps_re = re.compile(r'fps: (?P<fps>.+)')
def fix_inconsistent_naming(title):
@ -53,6 +46,7 @@ def fix_inconsistent_naming(title):
return _fix_inconsistent_naming(title, {"DC's Legends of Tomorrow": "Legends of Tomorrow",
"Marvel's Jessica Jones": "Jessica Jones"})
logger = logging.getLogger(__name__)
# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile`
@ -64,9 +58,9 @@ language_converters.register('titlovi = subliminal_patch.converters.titlovi:Titl
class TitloviSubtitle(Subtitle):
provider_name = 'titlovi'
def __init__(self, language, page_link, download_link, sid, releases, title, alt_title=None, season=None,
episode=None, year=None, fps=None, asked_for_release_group=None, asked_for_episode=None):
super(TitloviSubtitle, self).__init__(language, page_link=page_link)
def __init__(self, language, download_link, sid, releases, title, alt_title=None, season=None,
episode=None, year=None, rating=None, download_count=None, asked_for_release_group=None, asked_for_episode=None):
super(TitloviSubtitle, self).__init__(language)
self.sid = sid
self.releases = self.release_info = releases
self.title = title
@ -75,11 +69,21 @@ class TitloviSubtitle(Subtitle):
self.episode = episode
self.year = year
self.download_link = download_link
self.fps = fps
self.rating = rating
self.download_count = download_count
self.matches = None
self.asked_for_release_group = asked_for_release_group
self.asked_for_episode = asked_for_episode
def __repr__(self):
if self.season and self.episode:
return '<%s "%s (%r)" s%.2de%.2d [%s:%s] ID:%r R:%.2f D:%r>' % (
self.__class__.__name__, self.title, self.year, self.season, self.episode, self.language, self._guessed_encoding, self.sid,
self.rating, self.download_count)
else:
return '<%s "%s (%r)" [%s:%s] ID:%r R:%.2f D:%r>' % (
self.__class__.__name__, self.title, self.year, self.language, self._guessed_encoding, self.sid, self.rating, self.download_count)
@property
def id(self):
return self.sid
@ -136,20 +140,62 @@ class TitloviSubtitle(Subtitle):
class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
subtitle_class = TitloviSubtitle
languages = {Language.fromtitlovi(l) for l in language_converters['titlovi'].codes} | {Language.fromietf('sr-Latn')}
server_url = 'https://titlovi.com'
search_url = server_url + '/titlovi/?'
download_url = server_url + '/download/?type=1&mediaid='
api_url = 'https://kodi.titlovi.com/api/subtitles'
api_gettoken_url = api_url + '/gettoken'
api_search_url = api_url + '/search'
def __init__(self, username=None, password=None):
if not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.session = None
self.user_id = None
self.login_token = None
self.token_exp = None
def initialize(self):
self.session = RetryingCFSession()
#load_verification("titlovi", self.session)
token = region.get("titlovi_token")
if token is not NO_VALUE:
self.user_id, self.login_token, self.token_exp = token
if datetime.now() > self.token_exp:
logger.debug('Token expired')
self.log_in()
else:
logger.debug('Use cached token')
else:
logger.debug('Token not found in cache')
self.log_in()
def log_in(self):
login_params = dict(username=self.username, password=self.password, json=True)
try:
response = self.session.post(self.api_gettoken_url, params=login_params)
if response.status_code == request_codes.ok:
resp_json = response.json()
self.login_token = resp_json.get('Token')
self.user_id = resp_json.get('UserId')
self.token_exp = dateutil.parser.parse(resp_json.get('ExpirationDate'))
region.set("titlovi_token", [self.user_id, self.login_token, self.token_exp])
logger.debug('New token obtained')
elif response.status_code == request_codes.unauthorized:
raise AuthenticationError('Login failed')
except RequestException as e:
logger.error(e)
def terminate(self):
self.session.close()
def query(self, languages, title, season=None, episode=None, year=None, video=None):
items_per_page = 10
current_page = 1
def query(self, languages, title, season=None, episode=None, year=None, imdb_id=None, video=None):
search_params = dict()
used_languages = languages
lang_strings = [str(lang) for lang in used_languages]
@ -164,135 +210,73 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
langs = '|'.join(map(str, [l.titlovi for l in used_languages]))
# set query params
params = {'prijevod': title, 'jezik': langs}
search_params['query'] = title
search_params['lang'] = langs
is_episode = False
if season and episode:
is_episode = True
params['s'] = season
params['e'] = episode
if year:
params['g'] = year
search_params['season'] = season
search_params['episode'] = episode
#if year:
# search_params['year'] = year
if imdb_id:
search_params['imdbID'] = imdb_id
# loop through paginated results
logger.info('Searching subtitles %r', params)
logger.info('Searching subtitles %r', search_params)
subtitles = []
query_results = []
while True:
# query the server
try:
r = self.session.get(self.search_url, params=params, timeout=10)
r.raise_for_status()
except RequestException as e:
logger.exception('RequestException %s', e)
break
try:
search_params['token'] = self.login_token
search_params['userid'] = self.user_id
search_params['json'] = True
response = self.session.get(self.api_search_url, params=search_params)
resp_json = response.json()
if resp_json['SubtitleResults']:
query_results.extend(resp_json['SubtitleResults'])
except Exception as e:
logger.error(e)
for sub in query_results:
# title and alternate title
match = title_re.search(sub.get('Title'))
if match:
_title = match.group('title')
alt_title = match.group('altitle')
else:
try:
soup = BeautifulSoup(r.content, 'lxml')
# number of results
result_count = int(soup.select_one('.results_count b').string)
except:
result_count = None
# exit if no results
if not result_count:
if not subtitles:
logger.debug('No subtitles found')
else:
logger.debug("No more subtitles found")
break
# number of pages with results
pages = int(math.ceil(result_count / float(items_per_page)))
# get current page
if 'pg' in params:
current_page = int(params['pg'])
try:
sublist = soup.select('section.titlovi > ul.titlovi > li.subtitleContainer.canEdit')
for sub in sublist:
# subtitle id
sid = sub.find(attrs={'data-id': True}).attrs['data-id']
# get download link
download_link = self.download_url + sid
# title and alternate title
match = title_re.search(sub.a.string)
if match:
_title = match.group('title')
alt_title = match.group('altitle')
else:
continue
# page link
page_link = self.server_url + sub.a.attrs['href']
# subtitle language
_lang = sub.select_one('.lang')
match = lang_re.search(_lang.attrs.get('src', _lang.attrs.get('data-cfsrc', '')))
if match:
try:
# decode language
lang = Language.fromtitlovi(match.group('lang')+match.group('script'))
except ValueError:
continue
# relase year or series start year
match = year_re.search(sub.find(attrs={'data-id': True}).parent.i.string)
if match:
r_year = int(match.group('year'))
# fps
match = fps_re.search(sub.select_one('.fps').string)
if match:
fps = match.group('fps')
# releases
releases = str(sub.select_one('.fps').parent.contents[0].string)
# handle movies and series separately
if is_episode:
# season and episode info
sxe = sub.select_one('.s0xe0y').string
r_season = None
r_episode = None
if sxe:
match = season_re.search(sxe)
if match:
r_season = int(match.group('season'))
match = episode_re.search(sxe)
if match:
r_episode = int(match.group('episode'))
subtitle = self.subtitle_class(lang, page_link, download_link, sid, releases, _title,
alt_title=alt_title, season=r_season, episode=r_episode,
year=r_year, fps=fps,
asked_for_release_group=video.release_group,
asked_for_episode=episode)
else:
subtitle = self.subtitle_class(lang, page_link, download_link, sid, releases, _title,
alt_title=alt_title, year=r_year, fps=fps,
asked_for_release_group=video.release_group)
logger.debug('Found subtitle %r', subtitle)
# prime our matches so we can use the values later
subtitle.get_matches(video)
# add found subtitles
subtitles.append(subtitle)
finally:
soup.decompose()
# stop on last page
if current_page >= pages:
break
# increment current page
params['pg'] = current_page + 1
logger.debug('Getting page %d', params['pg'])
continue
# handle movies and series separately
if is_episode:
subtitle = self.subtitle_class(Language.fromtitlovi(sub.get('Lang')), sub.get('Link'), sub.get('Id'), sub.get('Release'), _title,
alt_title=alt_title, season=sub.get('Season'), episode=sub.get('Episode'),
year=sub.get('Year'), rating=sub.get('Rating'),
download_count=sub.get('DownloadCount'),
asked_for_release_group=video.release_group,
asked_for_episode=episode)
else:
subtitle = self.subtitle_class(Language.fromtitlovi(sub.get('Lang')), sub.get('Link'), sub.get('Id'), sub.get('Release'), _title,
alt_title=alt_title, year=sub.get('Year'), rating=sub.get('Rating'),
download_count=sub.get('DownloadCount'),
asked_for_release_group=video.release_group)
logger.debug('Found subtitle %r', subtitle)
# prime our matches so we can use the values later
subtitle.get_matches(video)
# add found subtitles
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
title = video.series
season = video.season
@ -302,6 +286,7 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
return [s for s in
self.query(languages, fix_inconsistent_naming(title), season=season, episode=episode, year=video.year,
imdb_id=video.imdb_id,
video=video)]
def download_subtitle(self, subtitle):
@ -339,10 +324,12 @@ class TitloviProvider(Provider, ProviderSubtitleArchiveMixin):
sub_to_extract = None
for sub_name in subs_in_archive:
if not ('.cyr' in sub_name or '.cir' in sub_name):
_sub_name = sub_name.lower()
if not ('.cyr' in _sub_name or '.cir' in _sub_name or 'cyr)' in _sub_name):
sr_lat_subs.append(sub_name)
if ('.cyr' in sub_name or '.cir' in sub_name) and not '.lat' in sub_name:
if ('.cyr' in sub_name or '.cir' in _sub_name) and not '.lat' in _sub_name.lower():
sr_cyr_subs.append(sub_name)
if subtitle.language == 'sr':

@ -1,6 +1,7 @@
# coding=utf-8
from __future__ import absolute_import
import types
import re
from babelfish.exceptions import LanguageError
from babelfish import Language as Language_, basestr
@ -139,3 +140,16 @@ class Language(Language_):
return Language(*Language_.fromietf(s).__getstate__())
return Language(*Language_.fromalpha3b(s).__getstate__())
IETF_MATCH = ".+\.([^-.]+)(?:-[A-Za-z]+)?$"
ENDSWITH_LANGUAGECODE_RE = re.compile("\.([^-.]{2,3})(?:-[A-Za-z]{2,})?$")
def match_ietf_language(s, ietf=False):
language_match = re.match(".+\.([^\.]+)$" if not ietf
else IETF_MATCH, s)
if language_match and len(language_match.groups()) == 1:
language = language_match.groups()[0]
return language
return s

@ -107,6 +107,12 @@ class Dicked(object):
for key, value in six.iteritems(entries):
self.__dict__[key] = (Dicked(**value) if isinstance(value, dict) else value)
def has(self, key):
return self._entries is not None and key in self._entries
def get(self, key, default=None):
return self._entries.get(key, default) if self._entries else default
def __repr__(self):
return str(self)

@ -19,7 +19,8 @@ def has_external_subtitle(part_id, stored_subs, language):
def set_existing_languages(video, video_info, external_subtitles=False, embedded_subtitles=False, known_embedded=None,
stored_subs=None, languages=None, only_one=False, known_metadata_subs=None):
stored_subs=None, languages=None, only_one=False, known_metadata_subs=None,
match_strictness="strict"):
logger.debug(u"Determining existing subtitles for %s", video.name)
external_langs_found = set()
@ -29,7 +30,8 @@ def set_existing_languages(video, video_info, external_subtitles=False, embedded
external_langs_found = known_metadata_subs
external_langs_found.update(set(search_external_subtitles(video.name, languages=languages,
only_one=only_one).values()))
only_one=only_one,
match_strictness=match_strictness).values()))
# found external subtitles should be considered?
if external_subtitles:

@ -645,7 +645,7 @@
<div class="middle aligned row">
<div class="right aligned four wide column">
<label>Titlovi (require anti-captcha)</label>
<label>Titlovi</label>
</div>
<div class="one wide column">
<div id="titlovi" class="ui toggle checkbox provider">
@ -655,7 +655,26 @@
</div>
</div>
<div id="titlovi_option" class="ui grid container">
<div class="middle aligned row">
<div class="right aligned six wide column">
<label>Username</label>
</div>
<div class="six wide column">
<div class="ui fluid input">
<input name="settings_titlovi_username" type="text" value="{{settings.titlovi.username if settings.titlovi.username != None else ''}}">
</div>
</div>
</div>
<div class="middle aligned row">
<div class="right aligned six wide column">
<label>Password</label>
</div>
<div class="six wide column">
<div class="ui fluid input">
<input name="settings_titlovi_password" type="password" value="{{settings.titlovi.password if settings.titlovi.password != None else ''}}">
</div>
</div>
</div>
</div>
<div class="middle aligned row">

Loading…
Cancel
Save