Merge branch 'python3' into development

pull/684/head
Louis Vézina 5 years ago
commit 4a2ed8830d

@ -1,5 +1,10 @@
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import bazarr.libs
from six import PY3
import subprocess as sp
import time
import os
@ -13,14 +18,19 @@ from bazarr.get_args import args
def check_python_version():
python_version = platform.python_version_tuple()
minimum_python_version_tuple = (2, 7, 13)
minimum_python3_version_tuple = (3, 6, 0)
minimum_python_version = ".".join(str(i) for i in minimum_python_version_tuple)
minimum_python3_version = ".".join(str(i) for i in minimum_python3_version_tuple)
if int(python_version[0]) > minimum_python_version_tuple[0]:
print "Python 3 isn't supported. Please use Python " + minimum_python_version + " or greater."
os._exit(0)
if int(python_version[0]) == minimum_python3_version_tuple[0]:
if int(python_version[1]) >= minimum_python3_version_tuple[1]:
pass
else:
print("Python " + minimum_python3_version + " or greater required. Current version is " + platform.python_version() + ". Please upgrade Python.")
os._exit(0)
elif int(python_version[1]) < minimum_python_version_tuple[1] or int(re.search(r'\d+', python_version[2]).group()) < minimum_python_version_tuple[2]:
print "Python " + minimum_python_version + " or greater required. Current version is " + platform.python_version() + ". Please upgrade Python."
print("Python " + minimum_python_version + " or greater required. Current version is " + platform.python_version() + ". Please upgrade Python.")
os._exit(0)
@ -33,10 +43,13 @@ def start_bazarr():
script = [sys.executable, "-u", os.path.normcase(os.path.join(dir_name, 'bazarr', 'main.py'))] + sys.argv[1:]
ep = sp.Popen(script, stdout=sp.PIPE, stderr=sp.STDOUT, stdin=sp.PIPE)
print "Bazarr starting..."
print("Bazarr starting...")
try:
for line in iter(ep.stdout.readline, ''):
sys.stdout.write(line)
if PY3:
sys.stdout.buffer.write(line)
else:
sys.stdout.write(line)
except KeyboardInterrupt:
pass
@ -61,16 +74,16 @@ if __name__ == '__main__':
try:
os.remove(stopfile)
except:
print 'Unable to delete stop file.'
print('Unable to delete stop file.')
else:
print 'Bazarr exited.'
print('Bazarr exited.')
os._exit(0)
if os.path.exists(restartfile):
try:
os.remove(restartfile)
except:
print 'Unable to delete restart file.'
print('Unable to delete restart file.')
else:
start_bazarr()

@ -1,6 +1,7 @@
# coding=utf-8
import cPickle as pickle
from __future__ import absolute_import
import six.moves.cPickle as pickle
import base64
import random
import platform
@ -28,14 +29,14 @@ def track_event(category=None, action=None, label=None):
try:
if settings.analytics.visitor:
visitor = pickle.loads(base64.b64decode(settings.analytics.visitor))
visitor = pickle.loads(base64.b64decode(settings.analytics.visitor), encoding='utf-8')
if visitor.user_agent is None:
visitor.user_agent = os.environ.get("SZ_USER_AGENT")
if visitor.unique_id > int(0x7fffffff):
visitor.unique_id = random.randint(0, 0x7fffffff)
except:
visitor = Visitor()
visitor.unique_id = long(random.randint(0, 0x7fffffff))
visitor.unique_id = random.randint(0, 0x7fffffff)
session = Session()
event = Event(category=category, action=action, label=label, value=1)

@ -1,4 +1,5 @@
# coding=utf-8
from __future__ import absolute_import
import os
import logging
import json

@ -1,4 +1,5 @@
# coding=utf-8
from __future__ import absolute_import
import os
from simpleconfigparser import simpleconfigparser

@ -1,3 +1,4 @@
from __future__ import absolute_import
import enzyme
from enzyme.exceptions import MalformedMKVError
import logging

@ -1,4 +1,5 @@
# coding=utf-8
from __future__ import absolute_import
import os
import argparse

@ -1,4 +1,5 @@
# coding=utf-8
from __future__ import absolute_import
import os
import requests
import logging
@ -164,7 +165,7 @@ def sync_episodes():
for i, altered_episode in enumerate(altered_episodes, 1):
notifications.write(msg='Indexing episodes embedded subtitles...', queue='get_episodes', item=i,
length=len(altered_episodes))
store_subtitles(path_replace(altered_episode[1]))
store_subtitles(altered_episode[1], path_replace(altered_episode[1]))
logging.debug('BAZARR All episodes synced from Sonarr into database.')

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import os
import pycountry
@ -10,25 +11,23 @@ from database import database
def load_language_in_db():
# Get languages list in langs tuple
langs = [{'code3': lang.alpha_3, 'code2': lang.alpha_2, 'name': lang.name}
langs = [[lang.alpha_3, lang.alpha_2, lang.name]
for lang in pycountry.languages
if hasattr(lang, 'alpha_2')]
# Insert languages in database table
for lang in langs:
database.execute("INSERT OR IGNORE INTO table_settings_languages (code3, code2, name) VALUES (?, ?, ?)",
(lang['code3'], lang['code2'], lang['name']))
database.execute("INSERT OR IGNORE INTO table_settings_languages (code3, code2, name) VALUES (?, ?, ?)",
langs, execute_many=True)
database.execute("INSERT OR IGNORE INTO table_settings_languages (code3, code2, name) "
"VALUES ('pob', 'pb', 'Brazilian Portuguese')")
langs = [{'code3b': lang.bibliographic, 'code3': lang.alpha_3}
langs = [[lang.bibliographic, lang.alpha_3]
for lang in pycountry.languages
if hasattr(lang, 'alpha_2') and hasattr(lang, 'bibliographic')]
# Update languages in database table
for lang in langs:
database.execute("UPDATE table_settings_languages SET code3b=? WHERE code3=?", (lang['code3b'], lang['code3']))
database.execute("UPDATE table_settings_languages SET code3b=? WHERE code3=?", langs, execute_many=True)
def language_from_alpha2(lang):

@ -1,6 +1,8 @@
# coding=utf-8
from __future__ import absolute_import
import os
import six
import requests
import logging
from queueconfig import notifications
@ -75,7 +77,7 @@ def update_movies():
if movie["path"] != None and movie['movieFile']['relativePath'] != None:
try:
overview = unicode(movie['overview'])
overview = six.text_type(movie['overview'])
except:
overview = ""
try:
@ -129,27 +131,27 @@ def update_movies():
audioCodec = None
# Add movies in radarr to current movies list
current_movies_radarr.append(unicode(movie['tmdbId']))
current_movies_radarr.append(six.text_type(movie['tmdbId']))
if unicode(movie['tmdbId']) in current_movies_db_list:
if six.text_type(movie['tmdbId']) in current_movies_db_list:
movies_to_update.append({'radarrId': movie["id"],
'title': unicode(movie["title"]),
'path': unicode(movie["path"] + separator + movie['movieFile']['relativePath']),
'tmdbId': unicode(movie["tmdbId"]),
'poster': unicode(poster),
'fanart': unicode(fanart),
'audio_language': unicode(profile_id_to_language(movie['qualityProfileId'], audio_profiles)),
'title': six.text_type(movie["title"]),
'path': six.text_type(movie["path"] + separator + movie['movieFile']['relativePath']),
'tmdbId': six.text_type(movie["tmdbId"]),
'poster': six.text_type(poster),
'fanart': six.text_type(fanart),
'audio_language': six.text_type(profile_id_to_language(movie['qualityProfileId'], audio_profiles)),
'sceneName': sceneName,
'monitored': unicode(bool(movie['monitored'])),
'year': unicode(movie['year']),
'sortTitle': unicode(movie['sortTitle']),
'alternativeTitles': unicode(alternativeTitles),
'format': unicode(format),
'resolution': unicode(resolution),
'video_codec': unicode(videoCodec),
'audio_codec': unicode(audioCodec),
'overview': unicode(overview),
'imdbId': unicode(imdbId),
'monitored': six.text_type(bool(movie['monitored'])),
'year': six.text_type(movie['year']),
'sortTitle': six.text_type(movie['sortTitle']),
'alternativeTitles': six.text_type(alternativeTitles),
'format': six.text_type(format),
'resolution': six.text_type(resolution),
'video_codec': six.text_type(videoCodec),
'audio_codec': six.text_type(audioCodec),
'overview': six.text_type(overview),
'imdbId': six.text_type(imdbId),
'movie_file_id': movie['movieFile']['id']})
else:
if movie_default_enabled is True:
@ -165,7 +167,7 @@ def update_movies():
'fanart': fanart,
'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
'sceneName': sceneName,
'monitored': unicode(bool(movie['monitored'])),
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
'year': movie['year'],
'alternativeTitles': alternativeTitles,
@ -189,7 +191,7 @@ def update_movies():
'fanart': fanart,
'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
'sceneName': sceneName,
'monitored': unicode(bool(movie['monitored'])),
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
'year': movie['year'],
'alternativeTitles': alternativeTitles,
@ -251,7 +253,7 @@ def update_movies():
for i, altered_movie in enumerate(altered_movies, 1):
notifications.write(msg='Indexing movies embedded subtitles...', queue='get_movies', item=i,
length=len(altered_movies))
store_subtitles_movie(path_replace_movie(altered_movie[1]))
store_subtitles_movie(altered_movie[1], path_replace_movie(altered_movie[1]))
logging.debug('BAZARR All movies synced from Radarr into database.')

@ -10,6 +10,7 @@ from get_args import args
from config import settings
from subliminal_patch.exceptions import TooManyRequests, APIThrottled, ParseResponseError
from subliminal.exceptions import DownloadLimitExceeded, ServiceUnavailable
from subliminal import region as subliminal_cache_region
VALID_THROTTLE_EXCEPTIONS = (TooManyRequests, DownloadLimitExceeded, ServiceUnavailable, APIThrottled,
ParseResponseError)
@ -154,19 +155,26 @@ def provider_throttle(name, exception):
throttle_until = datetime.datetime.now() + throttle_delta
if cls_name not in VALID_COUNT_EXCEPTIONS or throttled_count(name):
tp[name] = (cls_name, throttle_until, throttle_description)
settings.general.throtteled_providers = str(tp)
with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
settings.write(handle)
logging.info("Throttling %s for %s, until %s, because of: %s. Exception info: %r", name, throttle_description,
throttle_until.strftime("%y/%m/%d %H:%M"), cls_name, exception.message)
if cls_name == 'ValueError' and exception.args[0].startswith('unsupported pickle protocol'):
for fn in subliminal_cache_region.backend.all_filenames:
try:
os.remove(fn)
except (IOError, OSError):
logging.debug("Couldn't remove cache file: %s", os.path.basename(fn))
else:
tp[name] = (cls_name, throttle_until, throttle_description)
settings.general.throtteled_providers = str(tp)
with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
settings.write(handle)
logging.info("Throttling %s for %s, until %s, because of: %s. Exception info: %r", name, throttle_description,
throttle_until.strftime("%y/%m/%d %H:%M"), cls_name, exception.args[0])
def throttled_count(name):
global throttle_count
if name in throttle_count.keys():
if 'count' in throttle_count[name].keys():
if name in list(throttle_count.keys()):
if 'count' in list(throttle_count[name].keys()):
for key, value in throttle_count[name].items():
if key == 'count':
value += 1

@ -1,5 +1,7 @@
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import os
import requests
import logging
@ -12,6 +14,7 @@ from config import settings, url_sonarr
from list_subtitles import list_missing_subtitles
from database import database, dict_converter
from utils import get_sonarr_version
import six
from helper import path_replace
@ -60,7 +63,7 @@ def update_series():
for i, show in enumerate(r.json(), 1):
notifications.write(msg="Getting series data from Sonarr...", queue='get_series', item=i, length=seriesListLength)
try:
overview = unicode(show['overview'])
overview = six.text_type(show['overview'])
except:
overview = ""
try:
@ -82,17 +85,17 @@ def update_series():
current_shows_sonarr.append(show['tvdbId'])
if show['tvdbId'] in current_shows_db_list:
series_to_update.append({'title': unicode(show["title"]),
'path': unicode(show["path"]),
series_to_update.append({'title': six.text_type(show["title"]),
'path': six.text_type(show["path"]),
'tvdbId': int(show["tvdbId"]),
'sonarrSeriesId': int(show["id"]),
'overview': unicode(overview),
'poster': unicode(poster),
'fanart': unicode(fanart),
'audio_language': unicode(profile_id_to_language((show['qualityProfileId'] if sonarr_version.startswith('2') else show['languageProfileId']), audio_profiles)),
'sortTitle': unicode(show['sortTitle']),
'year': unicode(show['year']),
'alternateTitles': unicode(alternateTitles)})
'overview': six.text_type(overview),
'poster': six.text_type(poster),
'fanart': six.text_type(fanart),
'audio_language': six.text_type(profile_id_to_language((show['qualityProfileId'] if get_sonarr_version().startswith('2') else show['languageProfileId']), audio_profiles)),
'sortTitle': six.text_type(show['sortTitle']),
'year': six.text_type(show['year']),
'alternateTitles': six.text_type(alternateTitles)})
else:
if serie_default_enabled is True:
series_to_add.append({'title': show["title"],
@ -127,7 +130,7 @@ def update_series():
for series in removed_series:
database.execute("DELETE FROM table_shows WHERE tvdbId=?",(series,))
# Update existing series in DB
series_in_db_list = []
series_in_db = database.execute("SELECT title, path, tvdbId, sonarrSeriesId, overview, poster, fanart, "

@ -1,12 +1,13 @@
# coding=utf-8
from __future__ import absolute_import
import os
import sys
import ast
import logging
import subprocess
import time
import cPickle as pickle
import six.moves.cPickle as pickle
import codecs
import types
import re
@ -36,6 +37,9 @@ from pyprobe.pyprobe import VideoFileParser
from database import database, dict_mapper
from analytics import track_event
import six
from six.moves import range
from functools import reduce
def get_video(path, title, sceneName, use_scenename, providers=None, media_type="movie"):
@ -90,11 +94,11 @@ def get_scores(video, media_type, min_score_movie_perc=60 * 100 / 120.0, min_sco
"""
max_score = 120.0
min_score = max_score * min_score_movie_perc / 100.0
scores = subliminal_scores.movie_scores.keys()
scores = list(subliminal_scores.movie_scores.keys())
if media_type == "series":
max_score = 360.0
min_score = max_score * min_score_series_perc / 100.0
scores = subliminal_scores.episode_scores.keys()
scores = list(subliminal_scores.episode_scores.keys())
if video.is_special:
min_score = max_score * min_score_special_ep / 100.0
@ -118,7 +122,7 @@ def download_subtitle(path, language, hi, forced, providers, providers_auth, sce
hi = "force non-HI"
language_set = set()
if not isinstance(language, types.ListType):
if not isinstance(language, list):
language = [language]
if forced == "True":
@ -184,7 +188,7 @@ def download_subtitle(path, language, hi, forced, providers, providers_auth, sce
saved_any = False
if downloaded_subtitles:
for video, subtitles in downloaded_subtitles.iteritems():
for video, subtitles in six.iteritems(downloaded_subtitles):
if not subtitles:
continue
@ -220,10 +224,10 @@ def download_subtitle(path, language, hi, forced, providers, providers_auth, sce
else:
action = "downloaded"
if video.used_scene_name:
message = downloaded_language + is_forced_string + " subtitles " + action + " from " + downloaded_provider + " with a score of " + unicode(
message = downloaded_language + is_forced_string + " subtitles " + action + " from " + downloaded_provider + " with a score of " + six.text_type(
round(subtitle.score * 100 / max_score, 2)) + "% using this scene name: " + sceneName
else:
message = downloaded_language + is_forced_string + " subtitles " + action + " from " + downloaded_provider + " with a score of " + unicode(
message = downloaded_language + is_forced_string + " subtitles " + action + " from " + downloaded_provider + " with a score of " + six.text_type(
round(subtitle.score * 100 / max_score, 2)) + "% using filename guessing."
if use_postprocessing is True:
@ -420,7 +424,6 @@ def manual_download_subtitle(path, language, hi, forced, subtitle, provider, pro
if not subtitle.is_valid():
logging.exception('BAZARR No valid Subtitles file found for this file: ' + path)
return
logging.debug('BAZARR Subtitles file downloaded for this file:' + path)
try:
score = round(subtitle.score / max_score * 100, 2)
fld = get_target_folder(path)
@ -449,7 +452,7 @@ def manual_download_subtitle(path, language, hi, forced, subtitle, provider, pro
downloaded_path = saved_subtitle.storage_path
logging.debug('BAZARR Subtitles file saved to disk: ' + downloaded_path)
is_forced_string = " forced" if subtitle.language.forced else ""
message = downloaded_language + is_forced_string + " Subtitles downloaded from " + downloaded_provider + " with a score of " + unicode(
message = downloaded_language + is_forced_string + " Subtitles downloaded from " + downloaded_provider + " with a score of " + six.text_type(
score) + "% using manual search."
if use_postprocessing is True:
@ -595,7 +598,7 @@ def series_download_subtitles(no):
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles(path_replace(episode['path']))
store_subtitles(episode['path'], path_replace(episode['path']))
history_log(1, no, episode['sonarrEpisodeId'], message, path, language_code, provider, score)
send_notifications(no, episode['sonarrEpisodeId'], message)
else:
@ -651,7 +654,7 @@ def episode_download_subtitles(no):
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles(path_replace(episode['path']))
store_subtitles(episode['path'], path_replace(episode['path']))
history_log(1, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path, language_code, provider, score)
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message)
else:
@ -702,7 +705,7 @@ def movies_download_subtitles(no):
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles_movie(path_replace_movie(movie['path']))
store_subtitles_movie(movie['path'], path_replace_movie(movie['path']))
history_log_movie(1, no, message, path, language_code, provider, score)
send_notifications_movie(no, message)
else:
@ -730,19 +733,19 @@ def wanted_download_subtitles(path, l, count_episodes):
for episode in episodes_details:
attempt = episode['failedAttempts']
if type(attempt) == unicode:
if type(attempt) == six.text_type:
attempt = ast.literal_eval(attempt)
for language in ast.literal_eval(episode['missing_subtitles']):
if attempt is None:
attempt = []
attempt.append([language, time.time()])
else:
att = zip(*attempt)[0]
att = list(zip(*attempt))[0]
if language not in att:
attempt.append([language, time.time()])
database.execute("UPDATE table_episodes SET failedAttempts=? WHERE sonarrEpisodeId=?",
(unicode(attempt), episode['sonarrEpisodeId']))
(six.text_type(attempt), episode['sonarrEpisodeId']))
for i in range(len(attempt)):
if attempt[i][0] == language:
@ -765,7 +768,7 @@ def wanted_download_subtitles(path, l, count_episodes):
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles(path_replace(episode['path']))
store_subtitles(episode['path'], path_replace(episode['path']))
history_log(1, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path, language_code, provider, score)
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message)
else:
@ -783,19 +786,19 @@ def wanted_download_subtitles_movie(path, l, count_movies):
for movie in movies_details:
attempt = movie['failedAttempts']
if type(attempt) == unicode:
if type(attempt) == six.text_type:
attempt = ast.literal_eval(attempt)
for language in ast.literal_eval(movie['missing_subtitles']):
if attempt is None:
attempt = []
attempt.append([language, time.time()])
else:
att = zip(*attempt)[0]
att = list(zip(*attempt))[0]
if language not in att:
attempt.append([language, time.time()])
database.execute("UPDATE table_movies SET failedAttempts=? WHERE radarrId=?",
(unicode(attempt), movie['radarrId']))
(six.text_type(attempt), movie['radarrId']))
for i in range(len(attempt)):
if attempt[i][0] == language:
@ -818,7 +821,7 @@ def wanted_download_subtitles_movie(path, l, count_movies):
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles_movie(path_replace_movie(movie['path']))
store_subtitles_movie(movie['path'], path_replace_movie(movie['path']))
history_log_movie(1, movie['radarrId'], message, path, language_code, provider, score)
send_notifications_movie(movie['radarrId'], message)
else:
@ -902,7 +905,7 @@ def refine_from_db(path, video):
"table_episodes.video_codec, table_episodes.audio_codec, table_episodes.path "
"FROM table_episodes INNER JOIN table_shows on "
"table_shows.sonarrSeriesId = table_episodes.sonarrSeriesId "
"WHERE table_episodes.path = ?", (unicode(path_replace_reverse(path)),), only_one=True)
"WHERE table_episodes.path = ?", (path_replace_reverse(path),), only_one=True)
if data:
video.series, year, country = series_re.match(data['seriesTitle']).groups()
@ -924,7 +927,7 @@ def refine_from_db(path, video):
elif isinstance(video, Movie):
data = database.execute("SELECT title, year, alternativeTitles, format, resolution, video_codec, audio_codec, "
"imdbId FROM table_movies WHERE path = ?",
(unicode(path_replace_reverse_movie(path)),), only_one=True)
(path_replace_reverse_movie(path),), only_one=True)
if data:
video.title = re.sub(r'(\(\d\d\d\d\))', '', data['title'])
@ -1118,7 +1121,7 @@ def upgrade_subtitles():
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles(path_replace(episode['video_path']))
store_subtitles(episode['video_path'], path_replace(episode['video_path']))
history_log(3, episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message, path, language_code, provider, score)
send_notifications(episode['sonarrSeriesId'], episode['sonarrEpisodeId'], message)
@ -1167,6 +1170,6 @@ def upgrade_subtitles():
language_code = result[2] + ":forced" if forced else result[2]
provider = result[3]
score = result[4]
store_subtitles_movie(path_replace_movie(movie['video_path']))
store_subtitles_movie(movie['video_path'], path_replace_movie(movie['video_path']))
history_log_movie(3, movie['radarrId'], message, path, language_code, provider, score)
send_notifications_movie(movie['radarrId'], message)

@ -1,73 +1,140 @@
# coding=utf-8
from __future__ import absolute_import
import ast
import os
import re
import types
import logging
import operator
import libs
import chardet
from bs4 import UnicodeDammit
from collections import OrderedDict
from config import settings
from utils import get_sonarr_platform, get_radarr_platform
def sonarr_path_mapping_regex():
global path_mapping
global sonarr_regex
global sonarr_use_path_mapping
path_mapping = ast.literal_eval(settings.general.path_mappings)
path_mapping = sorted(path_mapping, key=operator.itemgetter(0), reverse=True)
path_mapping = OrderedDict((mapping[0], mapping[1]) for mapping in path_mapping if mapping[0] != '')
if any(item for sublist in path_mapping for item in sublist):
sonarr_use_path_mapping = True
sonarr_regex = re.compile("|".join(path_mapping.keys()))
else:
sonarr_use_path_mapping = False
def sonarr_path_mapping_reverse_regex():
global sonarr_platform
global path_mapping_reverse
global sonarr_reverse_regex
global sonarr_use_path_mapping
sonarr_platform = get_sonarr_platform()
path_mapping_reverse = ast.literal_eval(settings.general.path_mappings)
path_mapping_reverse = sorted(path_mapping_reverse, key=operator.itemgetter(0), reverse=True)
path_mapping_reverse = OrderedDict((mapping[1], mapping[0]) for mapping in path_mapping_reverse if mapping[0] != '')
if any(item for sublist in path_mapping_reverse for item in sublist):
sonarr_use_path_mapping = True
sonarr_reverse_regex = re.compile("|".join(map(re.escape, path_mapping_reverse.keys())))
else:
sonarr_use_path_mapping = False
def radarr_path_mapping_regex():
global path_mapping_movie
global radarr_regex
global radarr_use_path_mapping
path_mapping_movie = ast.literal_eval(settings.general.path_mappings_movie)
path_mapping_movie = sorted(path_mapping_movie, key=operator.itemgetter(0), reverse=True)
path_mapping_movie = OrderedDict((mapping[0], mapping[1]) for mapping in path_mapping_movie if mapping[0] != '')
if any(item for sublist in path_mapping_movie for item in sublist):
radarr_use_path_mapping = True
radarr_regex = re.compile("|".join(path_mapping_movie.keys()))
else:
radarr_use_path_mapping = False
def radarr_path_mapping_reverse_regex():
global radarr_platform
global path_mapping_reverse_movie
global radarr_reverse_regex
global radarr_use_path_mapping
radarr_platform = get_sonarr_platform()
path_mapping_reverse_movie = ast.literal_eval(settings.general.path_mappings)
path_mapping_reverse_movie = sorted(path_mapping_reverse_movie, key=operator.itemgetter(0), reverse=True)
path_mapping_reverse_movie = OrderedDict((mapping[1], mapping[0]) for mapping in path_mapping_reverse_movie if mapping[0] != '')
if any(item for sublist in path_mapping_reverse_movie for item in sublist):
radarr_use_path_mapping = True
radarr_reverse_regex = re.compile("|".join(map(re.escape, path_mapping_reverse_movie.keys())))
else:
radarr_use_path_mapping = False
def path_replace(path):
if path is None:
return None
if path is None or sonarr_use_path_mapping is False:
return path
reverted_path = sonarr_regex.sub(lambda match: path_mapping[match.group(0)], path, count=1)
for path_mapping in ast.literal_eval(settings.general.path_mappings):
if path_mapping[0] in path:
path = path.replace(path_mapping[0], path_mapping[1])
if path.startswith('\\\\') or re.match(r'^[a-zA-Z]:\\', path):
path = path.replace('/', '\\')
elif path.startswith('/'):
path = path.replace('\\', '/')
break
return path
from os.path import normpath
return normpath(reverted_path)
def path_replace_reverse(path):
if path is None:
return None
if path is None or sonarr_use_path_mapping is False:
return path
reverted_path_temp = sonarr_reverse_regex.sub(lambda match: path_mapping_reverse[match.group(0)], path, count=1)
for path_mapping in ast.literal_eval(settings.general.path_mappings):
if path_mapping[1] in path:
path = path.replace(path_mapping[1], path_mapping[0])
if path.startswith('\\\\') or re.match(r'^[a-zA-Z]:\\', path):
path = path.replace('/', '\\')
elif path.startswith('/'):
path = path.replace('\\', '/')
break
return path
if sonarr_platform == 'posix':
from posixpath import normpath
reverted_path = reverted_path_temp.replace('\\', '/')
elif sonarr_platform == 'nt':
from ntpath import normpath
reverted_path = reverted_path_temp.replace('/', '\\')
return normpath(reverted_path)
def path_replace_movie(path):
if path is None:
return None
if path is None or radarr_use_path_mapping is False:
return path
reverted_path = radarr_regex.sub(lambda match: path_mapping_movie[match.group(0)], path, count=1)
for path_mapping in ast.literal_eval(settings.general.path_mappings_movie):
if path_mapping[0] in path:
path = path.replace(path_mapping[0], path_mapping[1])
if path.startswith('\\\\') or re.match(r'^[a-zA-Z]:\\', path):
path = path.replace('/', '\\')
elif path.startswith('/'):
path = path.replace('\\', '/')
break
return path
from os.path import normpath
return normpath(reverted_path)
def path_replace_reverse_movie(path):
if path is None:
return None
if path is None or radarr_use_path_mapping is False:
return path
reverted_path_temp = radarr_reverse_regex.sub(lambda match: path_mapping_reverse_movie[match.group(0)], path, count=1)
for path_mapping in ast.literal_eval(settings.general.path_mappings_movie):
if path_mapping[1] in path:
path = path.replace(path_mapping[1], path_mapping[0])
if path.startswith('\\\\') or re.match(r'^[a-zA-Z]:\\', path):
path = path.replace('/', '\\')
elif path.startswith('/'):
path = path.replace('\\', '/')
break
return path
if radarr_platform == 'posix':
from posixpath import normpath
reverted_path = reverted_path_temp.replace('\\', '/')
elif radarr_platform == 'nt':
from ntpath import normpath
reverted_path = reverted_path_temp.replace('/', '\\')
return normpath(reverted_path)
def pp_replace(pp_command, episode, subtitles, language, language_code2, language_code3, forced):
@ -126,7 +193,7 @@ def force_unicode(s):
:param s: string
:return: unicode string
"""
if not isinstance(s, types.UnicodeType):
if not isinstance(s, str):
try:
s = s.decode("utf-8")
except UnicodeDecodeError:
@ -136,3 +203,9 @@ def force_unicode(s):
except UnicodeDecodeError:
s = UnicodeDammit(s).unicode_markup
return s
sonarr_path_mapping_regex()
sonarr_path_mapping_reverse_regex()
radarr_path_mapping_regex()
radarr_path_mapping_reverse_regex()

@ -1,11 +1,12 @@
# coding=utf-8
from __future__ import absolute_import, print_function
import os
import time
import rarfile
from cork import Cork
from ConfigParser2 import ConfigParser
from backports import configparser2
from config import settings
from get_args import args
from logger import configure_logging
@ -34,7 +35,7 @@ if not os.path.exists(args.config_dir):
try:
os.mkdir(os.path.join(args.config_dir))
except OSError:
print "BAZARR The configuration directory doesn't exist and Bazarr cannot create it (permission issue?)."
print("BAZARR The configuration directory doesn't exist and Bazarr cannot create it (permission issue?).")
exit(2)
if not os.path.exists(os.path.join(args.config_dir, 'config')):
@ -83,7 +84,7 @@ if not os.path.exists(os.path.join(args.config_dir, 'config', 'releases.txt')):
config_file = os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini'))
cfg = ConfigParser()
cfg = configparser2.ConfigParser()
if not os.path.exists(os.path.normpath(os.path.join(args.config_dir, 'config', 'users.json'))):
cork = Cork(os.path.normpath(os.path.join(args.config_dir, 'config')), initialize=True)

@ -2,10 +2,28 @@
import os
import sys
from shutil import rmtree
def clean_libs():
libs_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'libs')
# Delete the old module almost empty directory compatible only with Python 2.7.x that cause bad magic number error
# if they are present in Python 3.x.
module_list = ['enum', 'concurrent']
for module in module_list:
module_path = os.path.join(libs_dir, module)
rmtree(module_path, ignore_errors=True)
def set_libs():
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../libs/'))
from six import PY3
if PY3:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../libs3/'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../libs2/'))
clean_libs()
set_libs()

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import gc
import os
import babelfish
@ -13,6 +14,7 @@ from subliminal import core
from subliminal_patch import search_external_subtitles
from subzero.language import Language
from bs4 import UnicodeDammit
import six
from get_args import args
from database import database
@ -23,18 +25,19 @@ from helper import path_replace, path_replace_movie, path_replace_reverse, \
from queueconfig import notifications
from embedded_subs_reader import embedded_subs_reader
import six
gc.enable()
def store_subtitles(file):
logging.debug('BAZARR started subtitles indexing for this file: ' + file)
def store_subtitles(original_path, reversed_path):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(file):
if os.path.exists(reversed_path):
if settings.general.getboolean('use_embedded_subs'):
logging.debug("BAZARR is trying to index embedded subtitles.")
try:
subtitle_languages = embedded_subs_reader.list_languages(file)
subtitle_languages = embedded_subs_reader.list_languages(reversed_path)
for subtitle_language, subtitle_forced, subtitle_codec in subtitle_languages:
try:
if settings.general.getboolean("ignore_pgs_subs") and subtitle_codec == "hdmv_pgs_subtitle":
@ -52,23 +55,23 @@ def store_subtitles(file):
pass
except Exception as e:
logging.exception(
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(file)[1], file))
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path))
pass
brazilian_portuguese = [".pt-br", ".pob", "pb"]
brazilian_portuguese_forced = [".pt-br.forced", ".pob.forced", "pb.forced"]
try:
dest_folder = get_subtitle_destination_folder()
subliminal_patch.core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
subtitles = search_external_subtitles(file, languages=get_language_set(),
subtitles = search_external_subtitles(reversed_path, languages=get_language_set(),
only_one=settings.general.getboolean('single_language'))
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(file), subtitles)
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(reversed_path), subtitles)
except Exception as e:
logging.exception("BAZARR unable to index external subtitles.")
pass
else:
for subtitle, language in subtitles.iteritems():
subtitle_path = get_external_subtitles_path(file, subtitle)
for subtitle, language in six.iteritems(subtitles):
subtitle_path = get_external_subtitles_path(reversed_path, subtitle)
if str(os.path.splitext(subtitle)[0]).lower().endswith(tuple(brazilian_portuguese)):
logging.debug("BAZARR external subtitles detected: " + "pb")
actual_subtitles.append(
@ -86,29 +89,29 @@ def store_subtitles(file):
else:
if os.path.splitext(subtitle)[1] != ".sub":
logging.debug("BAZARR falling back to file content analysis to detect language.")
with open(os.path.join(os.path.dirname(file), subtitle), 'r') as f:
with open(os.path.join(os.path.dirname(reversed_path), subtitle), 'r') as f:
text = f.read()
try:
encoding = UnicodeDammit(text)
text = text.decode(encoding.original_encoding)
if six.PY2:
text = text.decode(encoding.original_encoding)
detected_language = langdetect.detect(text)
except Exception as e:
logging.exception(
'BAZARR Error trying to detect language for this subtitles file: ' +
os.path.join(os.path.dirname(file), subtitle) +
os.path.join(os.path.dirname(reversed_path), subtitle) +
' You should try to delete this subtitles file manually and ask Bazarr to download it again.')
else:
if len(detected_language) > 0:
logging.debug(
"BAZARR external subtitles detected and analysis guessed this language: " + str(
detected_language))
actual_subtitles.append([str(detected_language), path_replace_reverse(
os.path.join(os.path.dirname(file), subtitle))])
actual_subtitles.append([str(detected_language), path_replace_reverse(subtitle_path)])
database.execute("UPDATE table_episodes SET subtitles=? WHERE path=?",
(str(actual_subtitles), path_replace_reverse(file)))
(str(actual_subtitles), original_path))
episode = database.execute("SELECT sonarrEpisodeId FROM table_episodes WHERE path=?",
(path_replace_reverse(file),), only_one=True)
(original_path,), only_one=True)
if episode:
logging.debug("BAZARR storing those languages to DB: " + str(actual_subtitles))
@ -118,19 +121,19 @@ def store_subtitles(file):
else:
logging.debug("BAZARR this file doesn't seems to exist or isn't accessible.")
logging.debug('BAZARR ended subtitles indexing for this file: ' + file)
logging.debug('BAZARR ended subtitles indexing for this file: ' + reversed_path)
return actual_subtitles
def store_subtitles_movie(file):
logging.debug('BAZARR started subtitles indexing for this file: ' + file)
def store_subtitles_movie(original_path, reversed_path):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(file):
if os.path.exists(reversed_path):
if settings.general.getboolean('use_embedded_subs'):
logging.debug("BAZARR is trying to index embedded subtitles.")
try:
subtitle_languages = embedded_subs_reader.list_languages(file)
subtitle_languages = embedded_subs_reader.list_languages(reversed_path)
for subtitle_language, subtitle_forced, subtitle_codec in subtitle_languages:
try:
if settings.general.getboolean("ignore_pgs_subs") and subtitle_codec == "hdmv_pgs_subtitle":
@ -148,64 +151,58 @@ def store_subtitles_movie(file):
pass
except Exception as e:
logging.exception(
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(file)[1], file))
"BAZARR error when trying to analyze this %s file: %s" % (os.path.splitext(reversed_path)[1], reversed_path))
pass
brazilian_portuguese = [".pt-br", ".pob", "pb"]
brazilian_portuguese_forced = [".pt-br.forced", ".pob.forced", "pb.forced"]
try:
dest_folder = get_subtitle_destination_folder() or ''
subliminal_patch.core.CUSTOM_PATHS = [dest_folder] if dest_folder else []
subtitles = search_external_subtitles(file, languages=get_language_set())
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(file), subtitles)
subtitles = search_external_subtitles(reversed_path, languages=get_language_set())
subtitles = guess_external_subtitles(get_subtitle_destination_folder() or os.path.dirname(reversed_path), subtitles)
except Exception as e:
logging.exception("BAZARR unable to index external subtitles.")
pass
else:
for subtitle, language in subtitles.iteritems():
if str(os.path.splitext(subtitle)[0]).lower().endswith(tuple(brazilian_portuguese)) is True:
for subtitle, language in six.iteritems(subtitles):
subtitle_path = get_external_subtitles_path(reversed_path, subtitle)
if str(os.path.splitext(subtitle)[0]).lower().endswith(tuple(brazilian_portuguese)):
logging.debug("BAZARR external subtitles detected: " + "pb")
actual_subtitles.append(
[str("pb"),
path_replace_reverse_movie(os.path.join(os.path.dirname(file), dest_folder, subtitle))])
elif str(os.path.splitext(subtitle)[0]).lower().endswith(tuple(brazilian_portuguese_forced)) is True:
actual_subtitles.append([str("pb"), path_replace_reverse_movie(subtitle_path)])
elif str(os.path.splitext(subtitle)[0]).lower().endswith(tuple(brazilian_portuguese_forced)):
logging.debug("BAZARR external subtitles detected: " + "pb:forced")
actual_subtitles.append(
[str("pb:forced"),
path_replace_reverse_movie(os.path.join(os.path.dirname(file), dest_folder, subtitle))])
actual_subtitles.append([str("pb:forced"), path_replace_reverse_movie(subtitle_path)])
elif not language:
continue
elif str(language) != 'und':
logging.debug("BAZARR external subtitles detected: " + str(language))
actual_subtitles.append(
[str(language),
path_replace_reverse_movie(os.path.join(os.path.dirname(file), dest_folder, subtitle))])
actual_subtitles.append([str(language), path_replace_reverse_movie(subtitle_path)])
else:
if os.path.splitext(subtitle)[1] != ".sub":
logging.debug("BAZARR falling back to file content analysis to detect language.")
with open(os.path.join(os.path.dirname(file), dest_folder, subtitle), 'r') as f:
with open(os.path.join(os.path.dirname(reversed_path), dest_folder, subtitle), 'r') as f:
text = f.read()
try:
encoding = UnicodeDammit(text)
text = text.decode(encoding.original_encoding)
if six.PY2:
text = text.decode(encoding.original_encoding)
detected_language = langdetect.detect(text)
except Exception as e:
logging.exception(
'BAZARR Error trying to detect language for this subtitles file: ' +
os.path.join(os.path.dirname(file), subtitle) +
os.path.join(os.path.dirname(reversed_path), subtitle) +
' You should try to delete this subtitles file manually and ask Bazarr to download it again.')
else:
if len(detected_language) > 0:
logging.debug(
"BAZARR external subtitles detected and analysis guessed this language: " + str(
detected_language))
actual_subtitles.append([str(detected_language), path_replace_reverse_movie(
os.path.join(os.path.dirname(file), dest_folder, subtitle))])
"BAZARR external subtitles detected and analysis guessed this language: " +
str(detected_language))
actual_subtitles.append([str(detected_language), path_replace_reverse_movie(subtitle_path)])
database.execute("UPDATE table_movies SET subtitles=? WHERE path=?",
(str(actual_subtitles), path_replace_reverse_movie(file)))
movie = database.execute("SELECT radarrId FROM table_movies WHERE path=?",
(path_replace_reverse_movie(file),), only_one=True)
(str(actual_subtitles), original_path))
movie = database.execute("SELECT radarrId FROM table_movies WHERE path=?", (original_path,), only_one=True)
if movie:
logging.debug("BAZARR storing those languages to DB: " + str(actual_subtitles))
@ -215,7 +212,7 @@ def store_subtitles_movie(file):
else:
logging.debug("BAZARR this file doesn't seems to exist or isn't accessible.")
logging.debug('BAZARR ended subtitles indexing for this file: ' + file)
logging.debug('BAZARR ended subtitles indexing for this file: ' + reversed_path)
return actual_subtitles
@ -339,7 +336,7 @@ def series_full_scan_subtitles():
for i, episode in enumerate(episodes, 1):
notifications.write(msg='Updating all episodes subtitles from disk...',
queue='list_subtitles_series', item=i, length=count_episodes)
store_subtitles(path_replace(episode['path']))
store_subtitles(episode['path'], path_replace(episode['path']))
gc.collect()
@ -351,7 +348,7 @@ def movies_full_scan_subtitles():
for i, movie in enumerate(movies, 1):
notifications.write(msg='Updating all movies subtitles from disk...',
queue='list_subtitles_movies', item=i, length=count_movies)
store_subtitles_movie(path_replace_movie(movie['path']))
store_subtitles_movie(movie['path'], path_replace_movie(movie['path']))
gc.collect()
@ -360,14 +357,14 @@ def series_scan_subtitles(no):
episodes = database.execute("SELECT path FROM table_episodes WHERE sonarrSeriesId=?", (no,))
for episode in episodes:
store_subtitles(path_replace(episode['path']))
store_subtitles(episode['path'], path_replace(episode['path']))
def movies_scan_subtitles(no):
movies = database.execute("SELECT path FROM table_movies WHERE radarrId=?", (no,))
for movie in movies:
store_subtitles_movie(path_replace_movie(movie['path']))
store_subtitles_movie(movie['path'], path_replace_movie(movie['path']))
def get_external_subtitles_path(file, subtitle):
@ -398,7 +395,7 @@ def get_external_subtitles_path(file, subtitle):
def guess_external_subtitles(dest_folder, subtitles):
for subtitle, language in subtitles.iteritems():
for subtitle, language in six.iteritems(subtitles):
if not language:
subtitle_path = os.path.join(dest_folder, subtitle)
if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS:
@ -408,7 +405,8 @@ def guess_external_subtitles(dest_folder, subtitles):
text = f.read()
try:
encoding = UnicodeDammit(text)
text = text.decode(encoding.original_encoding)
if six.PY2:
text = text.decode(encoding.original_encoding)
detected_language = langdetect.detect(text)
except Exception as e:
logging.exception('BAZARR Error trying to detect language for this subtitles file: ' +

@ -1,10 +1,13 @@
# coding=utf-8
from __future__ import absolute_import
import os
import logging
import re
import types
import platform
import warnings
import six
from logging.handlers import TimedRotatingFileHandler
from get_args import args
@ -39,6 +42,9 @@ class NoExceptionFormatter(logging.Formatter):
def configure_logging(debug=False):
if six.PY3:
warnings.simplefilter('ignore', category=ResourceWarning)
if not debug:
log_level = "INFO"
else:
@ -61,7 +67,7 @@ def configure_logging(debug=False):
# File Logging
global fh
fh = TimedRotatingFileHandler(os.path.join(args.config_dir, 'log/bazarr.log'), when="midnight", interval=1,
backupCount=7)
backupCount=7, delay=True)
f = OneLineExceptionFormatter('%(asctime)s|%(levelname)-8s|%(name)-32s|%(message)s|',
'%d/%m/%Y %H:%M:%S')
fh.setFormatter(f)
@ -108,10 +114,10 @@ class MyFilter(logging.Filter):
class ArgsFilteringFilter(logging.Filter):
def filter_args(self, record, func):
if isinstance(record.args, (types.ListType, types.TupleType)):
if isinstance(record.args, (list, tuple)):
final_args = []
for arg in record.args:
if not isinstance(arg, basestring):
if not isinstance(arg, six.string_types):
final_args.append(arg)
continue
@ -119,7 +125,7 @@ class ArgsFilteringFilter(logging.Filter):
record.args = type(record.args)(final_args)
elif isinstance(record.args, dict):
for key, arg in record.args.items():
if not isinstance(arg, basestring):
if not isinstance(arg, six.string_types):
continue
record.args[key] = func(arg)

@ -1,6 +1,6 @@
# coding=utf-8
bazarr_version = '0.8.3.4'
bazarr_version = '0.8.4'
import os
os.environ["SZ_USER_AGENT"] = "Bazarr/1"
@ -9,6 +9,11 @@ os.environ["BAZARR_VERSION"] = bazarr_version
import gc
import sys
import libs
import six
from six.moves import zip
from functools import reduce
import bottle
import itertools
import operator
@ -16,7 +21,7 @@ import pretty
import math
import ast
import hashlib
import urllib
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
import warnings
import queueconfig
import platform
@ -54,13 +59,15 @@ from list_subtitles import store_subtitles, store_subtitles_movie, series_scan_s
from get_subtitle import download_subtitle, series_download_subtitles, movies_download_subtitles, \
manual_search, manual_download_subtitle, manual_upload_subtitle
from utils import history_log, history_log_movie, get_sonarr_version, get_radarr_version
from helper import path_replace_reverse, path_replace_reverse_movie
from scheduler import *
from notifier import send_notifications, send_notifications_movie
from subliminal_patch.extensions import provider_registry as provider_manager
from subliminal_patch.core import SUBTITLE_EXTENSIONS
reload(sys)
sys.setdefaultencoding('utf8')
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
gc.enable()
# Check and install update on startup when running on Windows from installer
@ -1289,12 +1296,12 @@ def save_settings():
settings_death_by_captcha_username = request.forms.get('settings_death_by_captcha_username')
settings_death_by_captcha_password = request.forms.get('settings_death_by_captcha_password')
before = (unicode(settings.general.ip), int(settings.general.port), unicode(settings.general.base_url),
unicode(settings.general.path_mappings), unicode(settings.general.getboolean('use_sonarr')),
unicode(settings.general.getboolean('use_radarr')), unicode(settings.general.path_mappings_movie))
after = (unicode(settings_general_ip), int(settings_general_port), unicode(settings_general_baseurl),
unicode(settings_general_pathmapping), unicode(settings_general_use_sonarr),
unicode(settings_general_use_radarr), unicode(settings_general_pathmapping_movie))
before = (six.text_type(settings.general.ip), int(settings.general.port), six.text_type(settings.general.base_url),
six.text_type(settings.general.path_mappings), six.text_type(settings.general.getboolean('use_sonarr')),
six.text_type(settings.general.getboolean('use_radarr')), six.text_type(settings.general.path_mappings_movie))
after = (six.text_type(settings_general_ip), int(settings_general_port), six.text_type(settings_general_baseurl),
six.text_type(settings_general_pathmapping), six.text_type(settings_general_use_sonarr),
six.text_type(settings_general_use_radarr), six.text_type(settings_general_pathmapping_movie))
settings.general.ip = text_type(settings_general_ip)
settings.general.port = text_type(settings_general_port)
@ -1359,7 +1366,7 @@ def save_settings():
settings_proxy_password = request.forms.get('settings_proxy_password')
settings_proxy_exclude = request.forms.get('settings_proxy_exclude')
before_proxy_password = (unicode(settings.proxy.type), unicode(settings.proxy.exclude))
before_proxy_password = (six.text_type(settings.proxy.type), six.text_type(settings.proxy.exclude))
if before_proxy_password[0] != settings_proxy_type:
configured()
if before_proxy_password[1] == settings_proxy_password:
@ -1698,10 +1705,12 @@ def system():
def get_logs():
authorize()
logs = []
for line in reversed(open(os.path.join(args.config_dir, 'log', 'bazarr.log')).readlines()):
lin = []
lin = line.split('|')
logs.append(lin)
with open(os.path.join(args.config_dir, 'log', 'bazarr.log')) as file:
for line in file.readlines():
lin = []
lin = line.split('|')
logs.append(lin)
logs.reverse()
return dict(data=logs)
@ -1733,7 +1742,7 @@ def remove_subtitles():
history_log(0, sonarrSeriesId, sonarrEpisodeId, result, language=alpha2_from_alpha3(language))
except OSError as e:
logging.exception('BAZARR cannot delete subtitles file: ' + subtitlesPath)
store_subtitles(unicode(episodePath))
store_subtitles(path_replace_reverse(episodePath), episodePath)
@route(base_url + 'remove_subtitles_movie', method='POST')
@ -1751,7 +1760,7 @@ def remove_subtitles_movie():
history_log_movie(0, radarrId, result, language=alpha2_from_alpha3(language))
except OSError as e:
logging.exception('BAZARR cannot delete subtitles file: ' + subtitlesPath)
store_subtitles_movie(unicode(moviePath))
store_subtitles_movie(path_replace_reverse_movie(moviePath), moviePath)
@route(base_url + 'get_subtitle', method='POST')
@ -1784,7 +1793,7 @@ def get_subtitle():
score = result[4]
history_log(1, sonarrSeriesId, sonarrEpisodeId, message, path, language_code, provider, score)
send_notifications(sonarrSeriesId, sonarrEpisodeId, message)
store_subtitles(unicode(episodePath))
store_subtitles(path, episodePath)
redirect(ref)
except OSError:
pass
@ -1841,7 +1850,7 @@ def manual_get_subtitle():
score = result[4]
history_log(2, sonarrSeriesId, sonarrEpisodeId, message, path, language_code, provider, score)
send_notifications(sonarrSeriesId, sonarrEpisodeId, message)
store_subtitles(unicode(episodePath))
store_subtitles(path, episodePath)
redirect(ref)
except OSError:
pass
@ -1884,7 +1893,7 @@ def perform_manual_upload_subtitle():
score = 360
history_log(4, sonarrSeriesId, sonarrEpisodeId, message, path, language_code, provider, score)
send_notifications(sonarrSeriesId, sonarrEpisodeId, message)
store_subtitles(unicode(episodePath))
store_subtitles(path, episodePath)
redirect(ref)
except OSError:
@ -1920,7 +1929,7 @@ def get_subtitle_movie():
score = result[4]
history_log_movie(1, radarrId, message, path, language_code, provider, score)
send_notifications_movie(radarrId, message)
store_subtitles_movie(unicode(moviePath))
store_subtitles_movie(path, moviePath)
redirect(ref)
except OSError:
pass
@ -1975,7 +1984,7 @@ def manual_get_subtitle_movie():
score = result[4]
history_log_movie(2, radarrId, message, path, language_code, provider, score)
send_notifications_movie(radarrId, message)
store_subtitles_movie(unicode(moviePath))
store_subtitles_movie(path, moviePath)
redirect(ref)
except OSError:
pass
@ -2017,7 +2026,7 @@ def perform_manual_upload_subtitle_movie():
score = 120
history_log_movie(4, radarrId, message, path, language_code, provider, score)
send_notifications_movie(radarrId, message)
store_subtitles_movie(unicode(moviePath))
store_subtitles_movie(path, moviePath)
redirect(ref)
except OSError:
@ -2091,7 +2100,7 @@ def api_history():
@route(base_url + 'test_url/<protocol>/<url:path>', method='GET')
@custom_auth_basic(check_credentials)
def test_url(protocol, url):
url = urllib.unquote(url)
url = six.moves.urllib.parse.unquote(url)
try:
result = requests.get(protocol + "://" + url, allow_redirects=False, verify=False).json()['version']
except:
@ -2103,7 +2112,7 @@ def test_url(protocol, url):
@route(base_url + 'test_notification/<protocol>/<provider:path>', method='GET')
@custom_auth_basic(check_credentials)
def test_notification(protocol, provider):
provider = urllib.unquote(provider)
provider = six.moves.urllib.parse.unquote(provider)
apobj = apprise.Apprise()
apobj.add(protocol + "://" + provider)

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import apprise
import os
import logging
@ -22,23 +23,20 @@ def update_notifier():
notifiers_current = []
for notifier in notifiers_current_db:
notifiers_current.append(notifier['name'])
notifiers_current.append([notifier['name']])
for x in results['schemas']:
if x['service_name'] not in notifiers_current:
notifiers_new.append(x['service_name'])
if [x['service_name']] not in notifiers_current:
notifiers_new.append([x['service_name'], 0])
logging.debug('Adding new notifier agent: ' + x['service_name'])
else:
notifiers_old.append(x['service_name'])
notifier_current = [i for i in notifiers_current]
notifiers_old.append([x['service_name']])
notifiers_to_delete = list(set(notifier_current) - set(notifiers_old))
for notifier_new in notifiers_new:
database.execute("INSERT INTO table_settings_notifier (name, enabled) VALUES (?, ?)", (notifier_new, 0))
notifiers_to_delete = [item for item in notifiers_current if item not in notifiers_old]
database.execute("INSERT INTO table_settings_notifier (name, enabled) VALUES (?, ?)", notifiers_new, execute_many=True)
for notifier_to_delete in notifiers_to_delete:
database.execute("DELETE FROM table_settings_notifier WHERE name=?", (notifier_to_delete,))
database.execute("DELETE FROM table_settings_notifier WHERE name=?", notifiers_to_delete, execute_many=True)
def get_notifier_providers():

@ -1,3 +1,4 @@
from __future__ import absolute_import
from collections import deque
import json
@ -36,7 +37,7 @@ class Notify:
:rtype: str
"""
if self.queue:
if self.queue and (len(self.queue) > 0):
return self.queue.popleft()

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
from get_episodes import sync_episodes, update_all_episodes
from get_movies import update_movies, update_all_movies
from get_series import update_series

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import os
import time
import platform
@ -10,7 +11,6 @@ import requests
from whichcraft import which
from get_args import args
from config import settings, url_sonarr, url_radarr
from database import database
from subliminal import region as subliminal_cache_region
import datetime
@ -19,6 +19,7 @@ import glob
def history_log(action, sonarrSeriesId, sonarrEpisodeId, description, video_path=None, language=None, provider=None,
score=None, forced=False):
from database import database
database.execute("INSERT INTO table_history (action, sonarrSeriesId, sonarrEpisodeId, timestamp, description,"
"video_path, language, provider, score) VALUES (?,?,?,?,?,?,?,?,?)", (action, sonarrSeriesId,
sonarrEpisodeId, time.time(),
@ -28,6 +29,7 @@ def history_log(action, sonarrSeriesId, sonarrEpisodeId, description, video_path
def history_log_movie(action, radarrId, description, video_path=None, language=None, provider=None, score=None,
forced=False):
from database import database
database.execute("INSERT INTO table_history_movie (action, radarrId, timestamp, description, video_path, language, "
"provider, score) VALUES (?,?,?,?,?,?,?,?)", (action, radarrId, time.time(), description,
video_path, language, provider, score))
@ -93,6 +95,23 @@ def get_sonarr_version():
return sonarr_version
def get_sonarr_platform():
use_sonarr = settings.general.getboolean('use_sonarr')
apikey_sonarr = settings.sonarr.apikey
sv = url_sonarr() + "/api/system/status?apikey=" + apikey_sonarr
sonarr_platform = ''
if use_sonarr:
try:
if requests.get(sv, timeout=60, verify=False).json()['isLinux'] or requests.get(sv, timeout=60, verify=False).json()['isOsx']:
sonarr_platform = 'posix'
elif requests.get(sv, timeout=60, verify=False).json()['isWindows']:
sonarr_platform = 'nt'
except Exception as e:
logging.DEBUG('BAZARR cannot get Sonarr platform')
return sonarr_platform
def get_radarr_version():
use_radarr = settings.general.getboolean('use_radarr')
apikey_radarr = settings.radarr.apikey
@ -105,3 +124,20 @@ def get_radarr_version():
logging.debug('BAZARR cannot get Radarr version')
return radarr_version
def get_radarr_platform():
use_radarr = settings.general.getboolean('use_radarr')
apikey_radarr = settings.radarr.apikey
rv = url_radarr() + "/api/system/status?apikey=" + apikey_radarr
radarr_platform = ''
if use_radarr:
try:
if requests.get(rv, timeout=60, verify=False).json()['isLinux'] or requests.get(rv, timeout=60, verify=False).json()['isOsx']:
radarr_platform = 'posix'
elif requests.get(rv, timeout=60, verify=False).json()['isWindows']:
radarr_platform = 'nt'
except Exception as e:
logging.DEBUG('BAZARR cannot get Radarr platform')
return radarr_platform

@ -1,797 +0,0 @@
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self.comment_store = None ## used for storing comments in ini
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
comment_store = {}
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' :
continue
### store comments for doc purposes
### Deal with cases of sections and options being there or not
if line[0] in '#;' and cursect is not None:
if optname is None:
comment_store.setdefault(cursect['__name__'] +
"::" + "global",[]).append(line)
else:
comment_store.setdefault(cursect['__name__'] +
"::" + optname,[]).append(line)
continue
elif line[0] in '#;' and cursect is None:
comment_store.setdefault("global" +
"::" + optname,[]).append(line)
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
self.comment_store = comment_store
def ini_as_rst(self):
"""trivial helper function to putput comment_stroe as rest
.. todo:: write actual doctests with string input
>> p = ConfigParser2.SafeConfigParser()
>> p.read(f)
['/usr/home/pbrian/src/public/configparser2/example.ini']
>> open("/tmp/foo.rst", "w").write(p.ini_as_rst())
"""
outstr = ".. rst version of ini file\n\n"
_cursectname = None
for item in sorted(self.comment_store.keys()):
_sect, _opt = item.split("::")
if _sect != _cursectname:
outstr += "\n%s\n%s\n" % (_sect, "-"* len(_sect))
_cursectname = _sect
txt = " ".join(self.comment_store[item])
txt = txt.replace("#", "").replace(";","")
outstr += ":%s: %s" % (_opt, txt)
return outstr
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)

@ -13,7 +13,7 @@ See <http://github.com/ActiveState/appdirs> for details and usage.
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
@ -117,7 +117,7 @@ def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
@ -184,13 +184,13 @@ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
@ -222,7 +222,7 @@ def site_config_dir(appname=None, appauthor=None, version=None, multipath=False)
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
@ -311,6 +311,48 @@ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
@ -329,7 +371,7 @@ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
@ -364,8 +406,8 @@ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
@ -397,6 +439,11 @@ class AppDirs(object):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
@ -410,7 +457,10 @@ def _get_win_folder_from_registry(csidl_name):
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
@ -500,7 +550,7 @@ def _get_win_folder_with_jna(csidl_name):
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
@ -527,9 +577,15 @@ if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")

File diff suppressed because it is too large Load Diff

@ -0,0 +1,171 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import MutableMapping
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from io import open
import sys
try:
from thread import get_ident
except ImportError:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
str = type('str')
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
# from reprlib 3.2.1
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
# from collections 3.2.1
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from collections import ChainMap
except ImportError:
ChainMap = _ChainMap

@ -1,3 +1,3 @@
from .core import where
__version__ = "2019.03.09"
__version__ = "2019.09.11"

@ -771,36 +771,6 @@ vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
+OkuE6N36B9K
-----END CERTIFICATE-----
# Issuer: CN=Class 2 Primary CA O=Certplus
# Subject: CN=Class 2 Primary CA O=Certplus
# Label: "Certplus Class 2 Primary CA"
# Serial: 177770208045934040241468760488327595043
# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
-----BEGIN CERTIFICATE-----
MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
l7+ijrRU
-----END CERTIFICATE-----
# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Label: "DST Root CA X3"
@ -1219,36 +1189,6 @@ t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
-----END CERTIFICATE-----
# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Label: "Deutsche Telekom Root CA 2"
# Serial: 38
# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
-----BEGIN CERTIFICATE-----
MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
Cm26OWMohpLzGITY+9HPBVZkVw==
-----END CERTIFICATE-----
# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
# Label: "Cybertrust Global Root"
@ -3453,46 +3393,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
-----END CERTIFICATE-----
# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Label: "Certinomis - Root CA"
# Serial: 1
# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f
# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8
# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58
-----BEGIN CERTIFICATE-----
MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
-----END CERTIFICATE-----
# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Label: "OISTE WISeKey Global Root GB CA"

@ -21,10 +21,8 @@ class User_Agent():
def loadUserAgent(self, *args, **kwargs):
browser = kwargs.pop('browser', 'chrome')
user_agents = json.load(
open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r'),
object_pairs_hook=OrderedDict
)
with open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r') as file:
user_agents = json.load(file, object_pairs_hook=OrderedDict)
if not user_agents.get(browser):
logging.error('Sorry "{}" browser User-Agent was not found.'.format(browser))

@ -80,7 +80,6 @@ import sys
import threading
import time
import urllib
import urllib2
try:
from json import read as json_decode, write as json_encode
except ImportError:
@ -146,7 +145,7 @@ class Client(object):
def _log(self, cmd, msg=''):
if self.is_verbose:
print '%d %s %s' % (time.time(), cmd, msg.rstrip())
print('%d %s %s' % (time.time(), cmd, msg.rstrip()))
return self
def close(self):
@ -251,7 +250,7 @@ class HttpClient(Client):
data=payload,
headers=headers
)).read()
except urllib2.HTTPError, err:
except urllib2.HTTPError as err:
if 403 == err.code:
raise AccessDeniedException('Access denied, please check'
' your credentials and/or balance')
@ -357,7 +356,7 @@ class SocketClient(Client):
self.socket.settimeout(0)
try:
self.socket.connect(host)
except socket.error, err:
except socket.error as err:
if (err.args[0] not in
(errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS)):
self.close()
@ -392,7 +391,7 @@ class SocketClient(Client):
raise IOError('recv(): connection lost')
else:
response += s
except socket.error, err:
except socket.error as err:
if (err.args[0] not in
(errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS)):
raise err
@ -416,10 +415,10 @@ class SocketClient(Client):
try:
sock = self.connect()
response = self._sendrecv(sock, request)
except IOError, err:
except IOError as err:
sys.stderr.write(str(err) + "\n")
self.close()
except socket.error, err:
except socket.error as err:
sys.stderr.write(str(err) + "\n")
self.close()
raise IOError('Connection refused')
@ -493,20 +492,20 @@ if '__main__' == __name__:
client = SocketClient(sys.argv[1], sys.argv[2])
client.is_verbose = True
print 'Your balance is %s US cents' % client.get_balance()
print('Your balance is %s US cents' % client.get_balance())
for fn in sys.argv[3:]:
try:
# Put your CAPTCHA image file name or file-like object, and optional
# solving timeout (in seconds) here:
captcha = client.decode(fn, DEFAULT_TIMEOUT)
except Exception, e:
except Exception as e:
sys.stderr.write('Failed uploading CAPTCHA: %s\n' % (e, ))
captcha = None
if captcha:
print 'CAPTCHA %d solved: %s' % \
(captcha['captcha'], captcha['text'])
print('CAPTCHA %d solved: %s' % \
(captcha['captcha'], captcha['text']))
# Report as incorrectly solved if needed. Make sure the CAPTCHA was
# in fact incorrectly solved!

@ -40,7 +40,7 @@ import operator
import itertools
import collections
__version__ = '4.3.0'
__version__ = '4.4.0'
if sys.version >= '3':
from inspect import getfullargspec
@ -65,6 +65,12 @@ except AttributeError:
# let's assume there are no coroutine functions in old Python
def iscoroutinefunction(f):
return False
try:
from inspect import isgeneratorfunction
except ImportError:
# assume no generator function in old Python versions
def isgeneratorfunction(caller):
return False
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
@ -173,7 +179,8 @@ class FunctionMaker(object):
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % (next(self._compile_count),)
filename = '<%s:decorator-gen-%d>' % (
__file__, next(self._compile_count))
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
@ -218,6 +225,8 @@ class FunctionMaker(object):
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
@ -225,9 +234,23 @@ def decorate(func, caller, extras=()):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if '3.5' <= sys.version < '3.6':
# with Python 3.5 isgeneratorfunction returns True for all coroutines
# however we know that it is NOT possible to have a generator
# coroutine in python 3.5: PEP525 was not there yet
generatorcaller = isgeneratorfunction(
caller) and not iscoroutinefunction(caller)
else:
generatorcaller = isgeneratorfunction(caller)
if generatorcaller:
fun = FunctionMaker.create(
func, "for res in _call_(_func_, %s%%(shortsignature)s):\n"
" yield res" % es, evaldict, __wrapped__=func)
else:
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
@ -261,12 +284,12 @@ def decorator(caller, _func=None):
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(%s func)' % (name, defaultargs),
'%s(func, %s)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = defaults + (None,)
dec.__defaults__ = (None,) + defaults
return dec

@ -1,4 +1,4 @@
__version__ = '0.6.5'
__version__ = '0.7.1'
from .lock import Lock # noqa
from .lock import NeedRegenerationException # noqa

@ -10,8 +10,9 @@ from ..util import compat
import time
import datetime
from numbers import Number
from functools import wraps
from functools import wraps, partial
import threading
from decorator import decorate
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
@ -188,7 +189,7 @@ class DefaultInvalidationStrategy(RegionInvalidationStrategy):
class CacheRegion(object):
"""A front end to a particular cache backend.
r"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
@ -484,6 +485,26 @@ class CacheRegion(object):
else:
return self._LockWrapper()
# cached value
_actual_backend = None
@property
def actual_backend(self):
"""Return the ultimate backend underneath any proxies.
The backend might be the result of one or more ``proxy.wrap``
applications. If so, derive the actual underlying backend.
.. versionadded:: 0.6.6
"""
if self._actual_backend is None:
_backend = self.backend
while hasattr(_backend, 'proxied'):
_backend = _backend.proxied
self._actual_backend = _backend
return self._actual_backend
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
@ -723,7 +744,8 @@ class CacheRegion(object):
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
self, key, creator, expiration_time=None, should_cache_fn=None,
creator_args=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
@ -759,6 +781,11 @@ class CacheRegion(object):
:param creator: function which creates a new value.
:param creator_args: optional tuple of (args, kwargs) that will be
passed to the creator function if present.
.. versionadded:: 0.7.0
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
@ -799,7 +826,7 @@ class CacheRegion(object):
value = self.backend.get(key)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata["ct"])):
value.metadata["ct"])):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
@ -808,7 +835,10 @@ class CacheRegion(object):
return value.payload, ct
def gen_value():
created_value = creator()
if creator_args:
created_value = creator(*creator_args[0], **creator_args[1])
else:
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
@ -831,8 +861,13 @@ class CacheRegion(object):
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
if creator_args:
@wraps(creator)
def go():
return creator(*creator_args[0], **creator_args[1])
else:
go = creator
return self.async_creation_runner(self, orig_key, go, mutex)
else:
async_creator = None
@ -896,7 +931,7 @@ class CacheRegion(object):
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata['v'])):
value.metadata['ct'])):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
@ -1228,26 +1263,31 @@ class CacheRegion(object):
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
key = key_generator(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, user_func, timeout,
should_cache_fn, (arg, kw))
def cache_decorator(user_func):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
key_generator = function_key_generator(namespace, user_func)
else:
key_generator = function_key_generator(
namespace, fn,
namespace, user_func,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
def refresh(*arg, **kw):
"""
Like invalidate, but regenerates the value instead
"""
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
value = user_func(*arg, **kw)
self.set(key, value)
return value
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
@ -1261,20 +1301,18 @@ class CacheRegion(object):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
user_func.set = set_
user_func.invalidate = invalidate
user_func.get = get
user_func.refresh = refresh
user_func.original = user_func
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
decorate.original = fn
# Use `decorate` to preserve the signature of :param:`user_func`.
return decorate
return decorator
return decorate(user_func, partial(
get_or_create_for_user_func, key_generator))
return cache_decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
@ -1402,50 +1440,49 @@ class CacheRegion(object):
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(user_func)
def creator(*keys_to_create):
return user_func(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
return result
def cache_decorator(user_func):
key_generator = function_multi_key_generator(
namespace, user_func,
to_str=to_str)
def invalidate(*arg):
keys = key_generator(*arg)
@ -1466,7 +1503,7 @@ class CacheRegion(object):
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
values = user_func(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
@ -1478,13 +1515,18 @@ class CacheRegion(object):
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
user_func.set = set_
user_func.invalidate = invalidate
user_func.refresh = refresh
user_func.get = get
# Use `decorate` to preserve the signature of :param:`user_func`.
return decorate(user_func, partial(get_or_create_for_user_func, key_generator))
return cache_decorator
return decorate
return decorator
def make_region(*arg, **kw):

@ -1,5 +1,4 @@
from hashlib import sha1
import inspect
from ..util import compat
from ..util import langhelpers
@ -28,7 +27,7 @@ def function_key_generator(namespace, fn, to_str=compat.string_type):
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
args = compat.inspect_getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_key(*args, **kw):
@ -50,7 +49,7 @@ def function_multi_key_generator(namespace, fn, to_str=compat.string_type):
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
args = compat.inspect_getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_keys(*args, **kw):
@ -88,7 +87,7 @@ def kwarg_function_key_generator(namespace, fn, to_str=compat.string_type):
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
argspec = inspect.getargspec(fn)
argspec = compat.inspect_getargspec(fn)
default_list = list(argspec.defaults or [])
# Reverse the list, as we want to compare the argspec by negative index,
# meaning default_list[0] should be args[-1], which works well with

@ -69,11 +69,10 @@ class Lock(object):
"""Return true if the expiration time is reached, or no
value is available."""
return not self._has_value(createdtime) or \
(
self.expiretime is not None and
time.time() - createdtime > self.expiretime
)
return not self._has_value(createdtime) or (
self.expiretime is not None and
time.time() - createdtime > self.expiretime
)
def _has_value(self, createdtime):
"""Return true if the creation function has proceeded
@ -91,68 +90,100 @@ class Lock(object):
value = NOT_REGENERATED
createdtime = -1
generated = self._enter_create(createdtime)
generated = self._enter_create(value, createdtime)
if generated is not NOT_REGENERATED:
generated, createdtime = generated
return generated
elif value is NOT_REGENERATED:
# we called upon the creator, and it said that it
# didn't regenerate. this typically means another
# thread is running the creation function, and that the
# cache should still have a value. However,
# we don't have a value at all, which is unusual since we just
# checked for it, so check again (TODO: is this a real codepath?)
try:
value, createdtime = value_fn()
return value
except NeedRegenerationException:
raise Exception("Generation function should "
"have just been called by a concurrent "
"thread.")
raise Exception(
"Generation function should "
"have just been called by a concurrent "
"thread.")
else:
return value
def _enter_create(self, createdtime):
def _enter_create(self, value, createdtime):
if not self._is_expired(createdtime):
return NOT_REGENERATED
async = False
_async = False
if self._has_value(createdtime):
has_value = True
if not self.mutex.acquire(False):
log.debug("creation function in progress "
"elsewhere, returning")
log.debug(
"creation function in progress "
"elsewhere, returning")
return NOT_REGENERATED
else:
has_value = False
log.debug("no value, waiting for create lock")
self.mutex.acquire()
try:
log.debug("value creation lock %r acquired" % self.mutex)
# see if someone created the value already
try:
value, createdtime = self.value_and_created_fn()
except NeedRegenerationException:
pass
else:
if not self._is_expired(createdtime):
log.debug("value already present")
return value, createdtime
elif self.async_creator:
log.debug("Passing creation lock to async runner")
self.async_creator(self.mutex)
async = True
return value, createdtime
log.debug("Calling creation function")
created = self.creator()
return created
if not has_value:
# we entered without a value, or at least with "creationtime ==
# 0". Run the "getter" function again, to see if another
# thread has already generated the value while we waited on the
# mutex, or if the caller is otherwise telling us there is a
# value already which allows us to use async regeneration. (the
# latter is used by the multi-key routine).
try:
value, createdtime = self.value_and_created_fn()
except NeedRegenerationException:
# nope, nobody created the value, we're it.
# we must create it right now
pass
else:
has_value = True
# caller is telling us there is a value and that we can
# use async creation if it is expired.
if not self._is_expired(createdtime):
# it's not expired, return it
log.debug("Concurrent thread created the value")
return value, createdtime
# otherwise it's expired, call creator again
if has_value and self.async_creator:
# we have a value we can return, safe to use async_creator
log.debug("Passing creation lock to async runner")
# so...run it!
self.async_creator(self.mutex)
_async = True
# and return the expired value for now
return value, createdtime
# it's expired, and it's our turn to create it synchronously, *or*,
# there's no value at all, and we have to create it synchronously
log.debug(
"Calling creation function for %s value",
"not-yet-present" if not has_value else
"previously expired"
)
return self.creator()
finally:
if not async:
if not _async:
self.mutex.release()
log.debug("Released creation lock")
def __enter__(self):
return self._enter()
def __exit__(self, type, value, traceback):
pass

@ -51,11 +51,33 @@ else:
import thread # noqa
if py3k:
import collections
ArgSpec = collections.namedtuple(
"ArgSpec",
["args", "varargs", "keywords", "defaults"])
from inspect import getfullargspec as inspect_getfullargspec
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
else:
from inspect import getargspec as inspect_getargspec # noqa
if py3k or jython:
import pickle
else:
import cPickle as pickle # noqa
if py3k:
def read_config_file(config, fileobj):
return config.read_file(fileobj)
else:
def read_config_file(config, fileobj):
return config.readfp(fileobj)
def timedelta_total_seconds(td):
if py27:

@ -50,7 +50,7 @@ class NameRegistry(object):
self.creator = creator
def get(self, identifier, *args, **kw):
"""Get and possibly create the value.
r"""Get and possibly create the value.
:param identifier: Hash key for the value.
If the creation function is called, this identifier
@ -75,10 +75,12 @@ class NameRegistry(object):
if identifier in self._values:
return self._values[identifier]
else:
self._values[identifier] = value = self.creator(identifier, *args, **kw)
self._values[identifier] = value = self.creator(
identifier, *args, **kw)
return value
except KeyError:
self._values[identifier] = value = self.creator(identifier, *args, **kw)
self._values[identifier] = value = self.creator(
identifier, *args, **kw)
return value
finally:
self._mutex.release()

@ -23,7 +23,7 @@ class ReadWriteMutex(object):
def __init__(self):
# counts how many asynchronous methods are executing
self.async = 0
self.async_ = 0
# pointer to thread that is the current sync operation
self.current_sync_operation = None
@ -31,7 +31,7 @@ class ReadWriteMutex(object):
# condition object to lock on
self.condition = threading.Condition(threading.Lock())
def acquire_read_lock(self, wait = True):
def acquire_read_lock(self, wait=True):
"""Acquire the 'read' lock."""
self.condition.acquire()
try:
@ -45,7 +45,7 @@ class ReadWriteMutex(object):
if self.current_sync_operation is not None:
return False
self.async += 1
self.async_ += 1
log.debug("%s acquired read lock", self)
finally:
self.condition.release()
@ -57,23 +57,23 @@ class ReadWriteMutex(object):
"""Release the 'read' lock."""
self.condition.acquire()
try:
self.async -= 1
self.async_ -= 1
# check if we are the last asynchronous reader thread
# out the door.
if self.async == 0:
if self.async_ == 0:
# yes. so if a sync operation is waiting, notifyAll to wake
# it up
if self.current_sync_operation is not None:
self.condition.notifyAll()
elif self.async < 0:
elif self.async_ < 0:
raise LockError("Synchronizer error - too many "
"release_read_locks called")
log.debug("%s released read lock", self)
finally:
self.condition.release()
def acquire_write_lock(self, wait = True):
def acquire_write_lock(self, wait=True):
"""Acquire the 'write' lock."""
self.condition.acquire()
try:
@ -96,7 +96,7 @@ class ReadWriteMutex(object):
self.current_sync_operation = threading.currentThread()
# now wait again for asyncs to finish
if self.async > 0:
if self.async_ > 0:
if wait:
# wait
self.condition.wait()

@ -4,13 +4,9 @@ import os
import pickle
import shutil
import tempfile
import traceback
import hashlib
import appdirs
from scandir import scandir, scandir_generic as _scandir_generic
try:
from collections.abc import MutableMapping
unicode = str
@ -90,7 +86,7 @@ class FileCache(MutableMapping):
"""
def __init__(self, appname, flag='c', mode=0o666, keyencoding='utf-8',
serialize=True, app_cache_dir=None, key_file_ext=".txt"):
serialize=True, app_cache_dir=None):
"""Initialize a :class:`FileCache` object."""
if not isinstance(flag, str):
raise TypeError("flag must be str not '{}'".format(type(flag)))
@ -131,7 +127,6 @@ class FileCache(MutableMapping):
self._mode = mode
self._keyencoding = keyencoding
self._serialize = serialize
self.key_file_ext = key_file_ext
def _parse_appname(self, appname):
"""Splits an appname into the appname and subcache components."""
@ -185,16 +180,7 @@ class FileCache(MutableMapping):
self._sync = True
for ekey in self._buffer:
filename = self._key_to_filename(ekey)
try:
self._write_to_file(filename, self._buffer[ekey])
except:
logger.error("Couldn't write content from %r to cache file: %r: %s", ekey, filename,
traceback.format_exc())
try:
self.__write_to_file(filename + self.key_file_ext, ekey)
except:
logger.error("Couldn't write content from %r to cache file: %r: %s", ekey, filename,
traceback.format_exc())
self._write_to_file(filename, self._buffer[ekey])
self._buffer.clear()
self._sync = False
@ -203,7 +189,8 @@ class FileCache(MutableMapping):
raise ValueError("invalid operation on closed cache")
def _encode_key(self, key):
"""
"""Encode key using *hex_codec* for constructing a cache filename.
Keys are implicitly converted to :class:`bytes` if passed as
:class:`str`.
@ -212,15 +199,16 @@ class FileCache(MutableMapping):
key = key.encode(self._keyencoding)
elif not isinstance(key, bytes):
raise TypeError("key must be bytes or str")
return key.decode(self._keyencoding)
return codecs.encode(key, 'hex_codec').decode(self._keyencoding)
def _decode_key(self, key):
"""
"""Decode key using hex_codec to retrieve the original key.
Keys are returned as :class:`str` if serialization is enabled.
Keys are returned as :class:`bytes` if serialization is disabled.
"""
bkey = key.encode(self._keyencoding)
bkey = codecs.decode(key.encode(self._keyencoding), 'hex_codec')
return bkey.decode(self._keyencoding) if self._serialize else bkey
def _dumps(self, value):
@ -231,27 +219,19 @@ class FileCache(MutableMapping):
def _key_to_filename(self, key):
"""Convert an encoded key to an absolute cache filename."""
if isinstance(key, unicode):
key = key.encode(self._keyencoding)
return os.path.join(self.cache_dir, hashlib.md5(key).hexdigest())
return os.path.join(self.cache_dir, key)
def _filename_to_key(self, absfilename):
"""Convert an absolute cache filename to a key name."""
hkey_hdr_fn = absfilename + self.key_file_ext
if os.path.isfile(hkey_hdr_fn):
with open(hkey_hdr_fn, 'rb') as f:
key = f.read()
return key.decode(self._keyencoding) if self._serialize else key
return os.path.split(absfilename)[1]
def _all_filenames(self, scandir_generic=True):
def _all_filenames(self):
"""Return a list of absolute cache filenames"""
_scandir = _scandir_generic if scandir_generic else scandir
try:
for entry in _scandir(self.cache_dir):
if entry.is_file(follow_symlinks=False) and not entry.name.endswith(self.key_file_ext):
yield os.path.join(self.cache_dir, entry.name)
return [os.path.join(self.cache_dir, filename) for filename in
os.listdir(self.cache_dir)]
except (FileNotFoundError, OSError):
raise StopIteration
return []
def _all_keys(self):
"""Return a list of all encoded key names."""
@ -261,17 +241,14 @@ class FileCache(MutableMapping):
else:
return set(file_keys + list(self._buffer))
def __write_to_file(self, filename, value):
def _write_to_file(self, filename, bytesvalue):
"""Write bytesvalue to filename."""
fh, tmp = tempfile.mkstemp()
with os.fdopen(fh, self._flag) as f:
f.write(value)
f.write(self._dumps(bytesvalue))
rename(tmp, filename)
os.chmod(filename, self._mode)
def _write_to_file(self, filename, bytesvalue):
self.__write_to_file(filename, self._dumps(bytesvalue))
def _read_from_file(self, filename):
"""Read data from filename."""
try:
@ -288,7 +265,6 @@ class FileCache(MutableMapping):
else:
filename = self._key_to_filename(ekey)
self._write_to_file(filename, value)
self.__write_to_file(filename + self.key_file_ext, ekey)
def __getitem__(self, key):
ekey = self._encode_key(key)
@ -298,9 +274,8 @@ class FileCache(MutableMapping):
except KeyError:
pass
filename = self._key_to_filename(ekey)
if not os.path.isfile(filename):
if filename not in self._all_filenames():
raise KeyError(key)
return self._read_from_file(filename)
def __delitem__(self, key):
@ -317,11 +292,6 @@ class FileCache(MutableMapping):
except (IOError, OSError):
pass
try:
os.remove(filename + self.key_file_ext)
except (IOError, OSError):
pass
def __iter__(self):
for key in self._all_keys():
yield self._decode_key(key)
@ -331,10 +301,4 @@ class FileCache(MutableMapping):
def __contains__(self, key):
ekey = self._encode_key(key)
if not self._sync:
try:
return ekey in self._buffer
except KeyError:
pass
filename = self._key_to_filename(ekey)
return os.path.isfile(filename)
return ekey in self._all_keys()

@ -267,10 +267,7 @@ def alabel(label):
try:
label = label.encode('ascii')
try:
ulabel(label)
except IDNAError:
raise IDNAError('The label {0} is not a valid A-label'.format(label))
ulabel(label)
if not valid_label_length(label):
raise IDNAError('Label too long')
return label

@ -1,6 +1,6 @@
# This file is automatically generated by tools/idna-data
__version__ = "10.0.0"
__version__ = "11.0.0"
scripts = {
'Greek': (
0x37000000374,
@ -49,7 +49,7 @@ scripts = {
0x30210000302a,
0x30380000303c,
0x340000004db6,
0x4e0000009feb,
0x4e0000009ff0,
0xf9000000fa6e,
0xfa700000fada,
0x200000002a6d7,
@ -62,7 +62,7 @@ scripts = {
'Hebrew': (
0x591000005c8,
0x5d0000005eb,
0x5f0000005f5,
0x5ef000005f5,
0xfb1d0000fb37,
0xfb380000fb3d,
0xfb3e0000fb3f,
@ -248,6 +248,7 @@ joining_types = {
0x6fb: 68,
0x6fc: 68,
0x6ff: 68,
0x70f: 84,
0x710: 82,
0x712: 68,
0x713: 68,
@ -522,6 +523,7 @@ joining_types = {
0x1875: 68,
0x1876: 68,
0x1877: 68,
0x1878: 68,
0x1880: 85,
0x1881: 85,
0x1882: 85,
@ -690,6 +692,70 @@ joining_types = {
0x10bad: 68,
0x10bae: 68,
0x10baf: 85,
0x10d00: 76,
0x10d01: 68,
0x10d02: 68,
0x10d03: 68,
0x10d04: 68,
0x10d05: 68,
0x10d06: 68,
0x10d07: 68,
0x10d08: 68,
0x10d09: 68,
0x10d0a: 68,
0x10d0b: 68,
0x10d0c: 68,
0x10d0d: 68,
0x10d0e: 68,
0x10d0f: 68,
0x10d10: 68,
0x10d11: 68,
0x10d12: 68,
0x10d13: 68,
0x10d14: 68,
0x10d15: 68,
0x10d16: 68,
0x10d17: 68,
0x10d18: 68,
0x10d19: 68,
0x10d1a: 68,
0x10d1b: 68,
0x10d1c: 68,
0x10d1d: 68,
0x10d1e: 68,
0x10d1f: 68,
0x10d20: 68,
0x10d21: 68,
0x10d22: 82,
0x10d23: 68,
0x10f30: 68,
0x10f31: 68,
0x10f32: 68,
0x10f33: 82,
0x10f34: 68,
0x10f35: 68,
0x10f36: 68,
0x10f37: 68,
0x10f38: 68,
0x10f39: 68,
0x10f3a: 68,
0x10f3b: 68,
0x10f3c: 68,
0x10f3d: 68,
0x10f3e: 68,
0x10f3f: 68,
0x10f40: 68,
0x10f41: 68,
0x10f42: 68,
0x10f43: 68,
0x10f44: 68,
0x10f45: 85,
0x10f51: 68,
0x10f52: 68,
0x10f53: 68,
0x10f54: 82,
0x110bd: 85,
0x110cd: 85,
0x1e900: 68,
0x1e901: 68,
0x1e902: 68,
@ -1034,14 +1100,15 @@ codepoint_classes = {
0x52d0000052e,
0x52f00000530,
0x5590000055a,
0x56100000587,
0x56000000587,
0x58800000589,
0x591000005be,
0x5bf000005c0,
0x5c1000005c3,
0x5c4000005c6,
0x5c7000005c8,
0x5d0000005eb,
0x5f0000005f3,
0x5ef000005f3,
0x6100000061b,
0x62000000640,
0x64100000660,
@ -1054,12 +1121,13 @@ codepoint_classes = {
0x7100000074b,
0x74d000007b2,
0x7c0000007f6,
0x7fd000007fe,
0x8000000082e,
0x8400000085c,
0x8600000086b,
0x8a0000008b5,
0x8b6000008be,
0x8d4000008e2,
0x8d3000008e2,
0x8e300000958,
0x96000000964,
0x96600000970,
@ -1077,6 +1145,7 @@ codepoint_classes = {
0x9e0000009e4,
0x9e6000009f2,
0x9fc000009fd,
0x9fe000009ff,
0xa0100000a04,
0xa0500000a0b,
0xa0f00000a11,
@ -1136,8 +1205,7 @@ codepoint_classes = {
0xbd000000bd1,
0xbd700000bd8,
0xbe600000bf0,
0xc0000000c04,
0xc0500000c0d,
0xc0000000c0d,
0xc0e00000c11,
0xc1200000c29,
0xc2a00000c3a,
@ -1276,7 +1344,7 @@ codepoint_classes = {
0x17dc000017de,
0x17e0000017ea,
0x18100000181a,
0x182000001878,
0x182000001879,
0x1880000018ab,
0x18b0000018f6,
0x19000000191f,
@ -1544,11 +1612,11 @@ codepoint_classes = {
0x309d0000309f,
0x30a1000030fb,
0x30fc000030ff,
0x31050000312f,
0x310500003130,
0x31a0000031bb,
0x31f000003200,
0x340000004db6,
0x4e0000009feb,
0x4e0000009ff0,
0xa0000000a48d,
0xa4d00000a4fe,
0xa5000000a60d,
@ -1655,8 +1723,10 @@ codepoint_classes = {
0xa7a50000a7a6,
0xa7a70000a7a8,
0xa7a90000a7aa,
0xa7af0000a7b0,
0xa7b50000a7b6,
0xa7b70000a7b8,
0xa7b90000a7ba,
0xa7f70000a7f8,
0xa7fa0000a828,
0xa8400000a874,
@ -1664,8 +1734,7 @@ codepoint_classes = {
0xa8d00000a8da,
0xa8e00000a8f8,
0xa8fb0000a8fc,
0xa8fd0000a8fe,
0xa9000000a92e,
0xa8fd0000a92e,
0xa9300000a954,
0xa9800000a9c1,
0xa9cf0000a9da,
@ -1743,7 +1812,7 @@ codepoint_classes = {
0x10a0500010a07,
0x10a0c00010a14,
0x10a1500010a18,
0x10a1900010a34,
0x10a1900010a36,
0x10a3800010a3b,
0x10a3f00010a40,
0x10a6000010a7d,
@ -1756,6 +1825,11 @@ codepoint_classes = {
0x10b8000010b92,
0x10c0000010c49,
0x10cc000010cf3,
0x10d0000010d28,
0x10d3000010d3a,
0x10f0000010f1d,
0x10f2700010f28,
0x10f3000010f51,
0x1100000011047,
0x1106600011070,
0x1107f000110bb,
@ -1763,10 +1837,11 @@ codepoint_classes = {
0x110f0000110fa,
0x1110000011135,
0x1113600011140,
0x1114400011147,
0x1115000011174,
0x1117600011177,
0x11180000111c5,
0x111ca000111cd,
0x111c9000111cd,
0x111d0000111db,
0x111dc000111dd,
0x1120000011212,
@ -1786,7 +1861,7 @@ codepoint_classes = {
0x1132a00011331,
0x1133200011334,
0x113350001133a,
0x1133c00011345,
0x1133b00011345,
0x1134700011349,
0x1134b0001134e,
0x1135000011351,
@ -1796,6 +1871,7 @@ codepoint_classes = {
0x1137000011375,
0x114000001144b,
0x114500001145a,
0x1145e0001145f,
0x11480000114c6,
0x114c7000114c8,
0x114d0000114da,
@ -1807,15 +1883,17 @@ codepoint_classes = {
0x116500001165a,
0x11680000116b8,
0x116c0000116ca,
0x117000001171a,
0x117000001171b,
0x1171d0001172c,
0x117300001173a,
0x118000001183b,
0x118c0000118ea,
0x118ff00011900,
0x11a0000011a3f,
0x11a4700011a48,
0x11a5000011a84,
0x11a8600011a9a,
0x11a9d00011a9e,
0x11ac000011af9,
0x11c0000011c09,
0x11c0a00011c37,
@ -1831,6 +1909,13 @@ codepoint_classes = {
0x11d3c00011d3e,
0x11d3f00011d48,
0x11d5000011d5a,
0x11d6000011d66,
0x11d6700011d69,
0x11d6a00011d8f,
0x11d9000011d92,
0x11d9300011d99,
0x11da000011daa,
0x11ee000011ef7,
0x120000001239a,
0x1248000012544,
0x130000001342f,
@ -1845,11 +1930,12 @@ codepoint_classes = {
0x16b5000016b5a,
0x16b6300016b78,
0x16b7d00016b90,
0x16e6000016e80,
0x16f0000016f45,
0x16f5000016f7f,
0x16f8f00016fa0,
0x16fe000016fe2,
0x17000000187ed,
0x17000000187f2,
0x1880000018af3,
0x1b0000001b11f,
0x1b1700001b2fc,

@ -1,2 +1,2 @@
__version__ = '2.7'
__version__ = '2.8'

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,5 +1,6 @@
# coding=utf-8
from main import get_filebot_attrs
from __future__ import absolute_import
from .main import get_filebot_attrs
__all__ = ["get_filebot_attrs"]

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import os
import sys

@ -1,5 +1,7 @@
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import subprocess
import sys
import traceback
@ -10,7 +12,7 @@ import types
import os
from pipes import quote
from lib import find_executable
from .lib import find_executable
mswindows = False
if sys.platform == "win32":
@ -87,7 +89,7 @@ def get_filebot_attrs(fn):
args_func, match_func = XATTR_MAP.get(sys.platform, XATTR_MAP["default"])
args = args_func(fn)
if isinstance(args, types.ListType):
if isinstance(args, list):
try:
env = dict(os.environ)
if not mswindows:
@ -132,4 +134,4 @@ def get_filebot_attrs(fn):
if __name__ == "__main__":
print get_filebot_attrs(sys.argv[1])
print(get_filebot_attrs(sys.argv[1]))

@ -8,10 +8,16 @@ __copyright__ = "Copyright 2010, S Anand"
__license__ = "WTFPL"
from datetime import datetime
from six import PY3
def _df(seconds, denominator=1, text='', past=True):
if past: return str((seconds + denominator/2)/ denominator) + text + ' ago'
else: return 'in ' + str((seconds + denominator/2)/ denominator) + text
if PY3:
result = str(round(seconds / denominator))
else:
result = str((seconds + denominator/2)/ denominator)
if past: return result + text + ' ago'
else: return 'in ' + result + text
def date(time=False, asdays=False, short=False):
'''Returns a pretty formatted date.

@ -1,2 +1,2 @@
from pyprobe import VideoFileParser
from .pyprobe import VideoFileParser

@ -1,6 +1,7 @@
from __future__ import absolute_import
from os import path
from baseparser import BaseParser
from .baseparser import BaseParser
class StreamParser(BaseParser):

@ -1,9 +1,11 @@
from __future__ import absolute_import
from six import PY3
import json
import subprocess
from os import path
from sys import getfilesystemencoding
import ffprobeparsers
from . import ffprobeparsers
class VideoFileParser:
@ -174,16 +176,27 @@ class VideoFileParser:
IOError: ffprobe execution failed
"""
command = [parser] + commandArgs + [inputFile.encode(getfilesystemencoding())]
try:
completedProcess = subprocess.check_output(
command, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
raise IOError(
"Error occurred during execution - " + e.output
if PY3:
command = [parser] + commandArgs + [inputFile]
completedProcess = subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8"
)
return completedProcess
if completedProcess.returncode != 0:
raise IOError(
"Error occurred during execution - " + completedProcess.stderr
)
return completedProcess.stdout
else:
command = [parser] + commandArgs + [inputFile.encode(getfilesystemencoding())]
try:
completedProcess = subprocess.check_output(
command, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
raise IOError(
"Error occurred during execution - " + e.output
)
return completedProcess
@staticmethod
def _checkExecutable(executable):

@ -17,7 +17,7 @@ class Color(_Color):
return _Color.__new__(cls, r, g, b, a)
#: Version of the pysubs2 library.
VERSION = "0.2.3"
VERSION = "0.2.4"
PY3 = sys.version_info.major == 3

@ -4,6 +4,7 @@ from .subrip import SubripFormat
from .jsonformat import JSONFormat
from .substation import SubstationFormat
from .mpl2 import MPL2Format
from .tmp import TmpFormat
from .exceptions import *
#: Dict mapping file extensions to format identifiers.
@ -13,6 +14,7 @@ FILE_EXTENSION_TO_FORMAT_IDENTIFIER = {
".ssa": "ssa",
".sub": "microdvd",
".json": "json",
".txt": "tmp",
}
#: Dict mapping format identifiers to implementations (FormatBase subclasses).
@ -23,6 +25,7 @@ FORMAT_IDENTIFIER_TO_FORMAT_CLASS = {
"microdvd": MicroDVDFormat,
"json": JSONFormat,
"mpl2": MPL2Format,
"tmp": TmpFormat,
}
def get_format_class(format_):

@ -66,7 +66,14 @@ class SSAFile(MutableSequence):
be detected from the file, in which case you don't need
to specify it here (when given, this argument overrides
autodetection).
kwargs: Extra options for the parser.
keep_unknown_html_tags (bool): This affects SubRip only (SRT),
for other formats this argument is ignored.
By default, HTML tags are converted to equivalent SubStation tags
(eg. ``<i>`` to ``{\\i1}`` and any remaining tags are removed
to keep the text clean. Set this parameter to ``True``
if you want to pass through these tags (eg. ``<sub>``).
This is useful if your output format is SRT and your player
supports these tags.
Returns:
SSAFile
@ -86,6 +93,7 @@ class SSAFile(MutableSequence):
Example:
>>> subs1 = pysubs2.load("subrip-subtitles.srt")
>>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976)
>>> subs3 = pysubs2.load("subrip-subtitles-with-fancy-tags.srt", keep_unknown_html_tags=True)
"""
with open(path, encoding=encoding) as fp:

@ -56,7 +56,7 @@ class SSAStyle(object):
self.encoding = 1 #: Charset
for k, v in fields.items():
if k in self.FIELDS and v is not None:
if k in self.FIELDS:
setattr(self, k, v)
else:
raise ValueError("SSAStyle has no field named %r" % k)

@ -31,7 +31,7 @@ class SubripFormat(FormatBase):
return "srt"
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
def from_file(cls, subs, fp, format_, keep_unknown_html_tags=False, **kwargs):
timestamps = [] # (start, end)
following_lines = [] # contains lists of lines following each timestamp
@ -56,15 +56,15 @@ class SubripFormat(FormatBase):
# Handle the general case.
s = "".join(lines).strip()
s = re.sub(r"\n+ *\d+ *$", "", s) # strip number of next subtitle
s = re.sub(r"< *i *>", r"{\i1}", s)
s = re.sub(r"< */ *i *>", r"{\i0}", s)
s = re.sub(r"< *s *>", r"{\s1}", s)
s = re.sub(r"< */ *s *>", r"{\s0}", s)
s = re.sub(r"< *u *>", "{\\u1}", s) # not r" for Python 2.7 compat, triggers unicodeescape
s = re.sub(r"< */ *u *>", "{\\u0}", s)
s = re.sub(r"< */? *[a-zA-Z][^>]*>", "", s) # strip other HTML tags
s = re.sub(r"\r", "", s) # convert newlines
s = re.sub(r"\n", r"\N", s) # convert newlines
s = re.sub(r"< *i *>", r"{\\i1}", s)
s = re.sub(r"< */ *i *>", r"{\\i0}", s)
s = re.sub(r"< *s *>", r"{\\s1}", s)
s = re.sub(r"< */ *s *>", r"{\\s0}", s)
s = re.sub(r"< *u *>", "{\\\\u1}", s) # not r" for Python 2.7 compat, triggers unicodeescape
s = re.sub(r"< */ *u *>", "{\\\\u0}", s)
if not keep_unknown_html_tags:
s = re.sub(r"< */? *[a-zA-Z][^>]*>", "", s) # strip other HTML tags
s = re.sub(r"\n", r"\\N", s) # convert newlines
return s
subs.events = [SSAEvent(start=start, end=end, text=prepare_text(lines))

@ -145,7 +145,12 @@ class SubstationFormat(FormatBase):
def string_to_field(f, v):
if f in {"start", "end"}:
return timestamp_to_ms(TIMESTAMP.match(v).groups())
if v.startswith("-"):
# handle negative timestamps
v = v[1:]
return -timestamp_to_ms(TIMESTAMP.match(v).groups())
else:
return timestamp_to_ms(TIMESTAMP.match(v).groups())
elif "color" in f:
if format_ == "ass":
return ass_rgba_to_color(v)
@ -184,22 +189,22 @@ class SubstationFormat(FormatBase):
elif inside_info_section or inside_aegisub_section:
if line.startswith(";"): continue # skip comments
try:
k, v = line.split(": ", 1)
k, v = line.split(":", 1)
if inside_info_section:
subs.info[k] = v
subs.info[k] = v.strip()
elif inside_aegisub_section:
subs.aegisub_project[k] = v
subs.aegisub_project[k] = v.strip()
except ValueError:
pass
elif line.startswith("Style:"):
_, rest = line.split(": ", 1)
_, rest = line.split(":", 1)
buf = rest.strip().split(",")
name, raw_fields = buf[0], buf[1:] # splat workaround for Python 2.7
field_dict = {f: string_to_field(f, v) for f, v in zip(STYLE_FIELDS[format_], raw_fields)}
sty = SSAStyle(**field_dict)
subs.styles[name] = sty
elif line.startswith("Dialogue:") or line.startswith("Comment:"):
ev_type, rest = line.split(": ", 1)
ev_type, rest = line.split(":", 1)
raw_fields = rest.strip().split(",", len(EVENT_FIELDS[format_])-1)
field_dict = {f: string_to_field(f, v) for f, v in zip(EVENT_FIELDS[format_], raw_fields)}
field_dict["type"] = ev_type

@ -49,6 +49,20 @@ def timestamp_to_ms(groups):
ms += h * 3600000
return ms
def tmptimestamp_to_ms(groups):
"""
Convert groups from :data:`pysubs2.time.TMPTIMESTAMP` match to milliseconds.
Example:
>>> timestamp_to_ms(TIMESTAMP.match("0:00:01").groups())
1000
"""
h, m, s = map(int, groups)
ms = s * 1000
ms += m * 60000
ms += h * 3600000
return ms
def times_to_ms(h=0, m=0, s=0, ms=0):
"""
Convert hours, minutes, seconds to milliseconds.

@ -0,0 +1,88 @@
from __future__ import print_function, unicode_literals
import re
from .formatbase import FormatBase
from .ssaevent import SSAEvent
from .ssastyle import SSAStyle
from .substation import parse_tags
from .time import ms_to_times, make_time, tmptimestamp_to_ms
#: Pattern that matches TMP timestamp
TMPTIMESTAMP = re.compile(r"(\d{1,2}):(\d{2}):(\d{2})")
#: Pattern that matches TMP line
TMP_LINE = re.compile(r"(\d{1,2}:\d{2}:\d{2}):(.+)")
#: Largest timestamp allowed in Tmp, ie. 99:59:59.
MAX_REPRESENTABLE_TIME = make_time(h=100) - 1
def ms_to_timestamp(ms):
"""Convert ms to 'HH:MM:SS'"""
# XXX throw on overflow/underflow?
if ms < 0: ms = 0
if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME
h, m, s, ms = ms_to_times(ms)
return "%02d:%02d:%02d" % (h, m, s)
class TmpFormat(FormatBase):
@classmethod
def guess_format(cls, text):
if "[Script Info]" in text or "[V4+ Styles]" in text:
# disambiguation vs. SSA/ASS
return None
for line in text.splitlines():
if TMP_LINE.match(line) and len(TMP_LINE.findall(line)) == 1:
return "tmp"
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
timestamps = [] # (start)
lines = [] # contains lists of lines following each timestamp
for line in fp:
match = TMP_LINE.match(line)
if not match:
continue
start, text = match.groups()
start = tmptimestamp_to_ms(TMPTIMESTAMP.match(start).groups())
#calculate endtime from starttime + 500 miliseconds + 67 miliseconds per each character (15 chars per second)
end = start + 500 + (len(line) * 67)
timestamps.append((start, end))
lines.append(text)
def prepare_text(lines):
lines = lines.replace("|", r"\N") # convert newlines
lines = re.sub(r"< *u *>", "{\\\\u1}", lines) # not r" for Python 2.7 compat, triggers unicodeescape
lines = re.sub(r"< */? *[a-zA-Z][^>]*>", "", lines) # strip other HTML tags
return lines
subs.events = [SSAEvent(start=start, end=end, text=prepare_text(lines))
for (start, end), lines in zip(timestamps, lines)]
@classmethod
def to_file(cls, subs, fp, format_, **kwargs):
def prepare_text(text, style):
body = []
for fragment, sty in parse_tags(text, style, subs.styles):
fragment = fragment.replace(r"\h", " ")
fragment = fragment.replace(r"\n", "\n")
fragment = fragment.replace(r"\N", "\n")
if sty.italic: fragment = "<i>%s</i>" % fragment
if sty.underline: fragment = "<u>%s</u>" % fragment
if sty.strikeout: fragment = "<s>%s</s>" % fragment
body.append(fragment)
return re.sub("\n+", "\n", "".join(body).strip())
visible_lines = (line for line in subs if not line.is_comment)
for i, line in enumerate(visible_lines, 1):
start = ms_to_timestamp(line.start)
#end = ms_to_timestamp(line.end)
text = prepare_text(line.text, subs.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
#print("%d" % i, file=fp) # Python 2.7 compat
print(start + ":" + text, end="\n", file=fp)
#print(text, end="\n\n", file=fp)

@ -1,45 +0,0 @@
# coding=utf-8
from __future__ import print_function, division, unicode_literals
import re
from numbers import Number
from pysubs2.time import times_to_ms
from .formatbase import FormatBase
from .ssaevent import SSAEvent
from .ssastyle import SSAStyle
# thanks to http://otsaloma.io/gaupol/doc/api/aeidon.files.mpl2_source.html
MPL2_FORMAT = re.compile(r"^(?um)\[(-?\d+)\]\[(-?\d+)\](.*?)$")
class TXTGenericFormat(FormatBase):
@classmethod
def guess_format(cls, text):
if MPL2_FORMAT.match(text):
return "mpl2"
class MPL2Format(FormatBase):
@classmethod
def guess_format(cls, text):
return TXTGenericFormat.guess_format(text)
@classmethod
def from_file(cls, subs, fp, format_, **kwargs):
def prepare_text(lines):
out = []
for s in lines.split("|"):
if s.startswith("/"):
out.append(r"{\i1}%s{\i0}" % s[1:])
continue
out.append(s)
return "\n".join(out)
subs.events = [SSAEvent(start=times_to_ms(s=float(start) / 10), end=times_to_ms(s=float(end) / 10),
text=prepare_text(text)) for start, end, text in MPL2_FORMAT.findall(fp.getvalue())]
@classmethod
def to_file(cls, subs, fp, format_, **kwargs):
raise NotImplemented

@ -57,10 +57,10 @@ def check_compatibility(urllib3_version, chardet_version):
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.24
# urllib3 >= 1.21.1, <= 1.25
assert major == 1
assert minor >= 21
assert minor <= 24
assert minor <= 25
# Check chardet for compatibility.
major, minor, patch = chardet_version.split('.')[:3]

@ -5,10 +5,10 @@
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'http://python-requests.org'
__version__ = '2.21.0'
__build__ = 0x022100
__version__ = '2.22.0'
__build__ = 0x022200
__author__ = 'Kenneth Reitz'
__author_email__ = 'me@kennethreitz.org'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2018 Kenneth Reitz'
__copyright__ = 'Copyright 2019 Kenneth Reitz'
__cake__ = u'\u2728 \U0001f370 \u2728'

@ -19,7 +19,7 @@ def request(method, url, **kwargs):
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
@ -65,7 +65,7 @@ def get(url, params=None, **kwargs):
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response

@ -23,12 +23,10 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Bazarr patch to use custom ConfigParser2:
from ConfigParser2 import ConfigParser as configparser, NoOptionError, NoSectionError
#try:
# from configparser2 import ConfigParser as configparser, NoOptionError, NoSectionError
#except ImportError:
# from ConfigParser import SafeConfigParser as configparser, NoOptionError, NoSectionError
try:
from backports.configparser2 import ConfigParser as configparser, NoOptionError, NoSectionError
except ImportError:
from ConfigParser import SafeConfigParser as configparser, NoOptionError, NoSectionError
class simpleconfigparser(configparser):

@ -1,4 +1,4 @@
# Copyright (c) 2010-2017 Benjamin Peterson
# Copyright (c) 2010-2018 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@ -29,7 +29,7 @@ import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.11.0"
__version__ = "1.12.0"
# Useful for very coarse version differentiation.
@ -844,10 +844,71 @@ def add_metaclass(metaclass):
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.

@ -0,0 +1,127 @@
"""
Soup Sieve.
A CSS selector filter for BeautifulSoup4.
MIT License
Copyright (c) 2018 Isaac Muse
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
from .__meta__ import __version__, __version_info__ # noqa: F401
from . import css_parser as cp
from . import css_match as cm
from . import css_types as ct
from .util import DEBUG, deprecated, SelectorSyntaxError # noqa: F401
__all__ = (
'DEBUG', 'SelectorSyntaxError', 'SoupSieve',
'closest', 'comments', 'compile', 'filter', 'icomments',
'iselect', 'match', 'select', 'select_one'
)
SoupSieve = cm.SoupSieve
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001
"""Compile CSS pattern."""
if namespaces is not None:
namespaces = ct.Namespaces(**namespaces)
custom = kwargs.get('custom')
if custom is not None:
custom = ct.CustomSelectors(**custom)
if isinstance(pattern, SoupSieve):
if flags:
raise ValueError("Cannot process 'flags' argument on a compiled selector list")
elif namespaces is not None:
raise ValueError("Cannot process 'namespaces' argument on a compiled selector list")
elif custom is not None:
raise ValueError("Cannot process 'custom' argument on a compiled selector list")
return pattern
return cp._cached_css_compile(pattern, namespaces, custom, flags)
def purge():
"""Purge cached patterns."""
cp._purge_cache()
def closest(select, tag, namespaces=None, flags=0, **kwargs):
"""Match closest ancestor."""
return compile(select, namespaces, flags, **kwargs).closest(tag)
def match(select, tag, namespaces=None, flags=0, **kwargs):
"""Match node."""
return compile(select, namespaces, flags, **kwargs).match(tag)
def filter(select, iterable, namespaces=None, flags=0, **kwargs): # noqa: A001
"""Filter list of nodes."""
return compile(select, namespaces, flags, **kwargs).filter(iterable)
@deprecated("'comments' is not related to CSS selectors and will be removed in the future.")
def comments(tag, limit=0, flags=0, **kwargs):
"""Get comments only."""
return [comment for comment in cm.CommentsMatch(tag).get_comments(limit)]
@deprecated("'icomments' is not related to CSS selectors and will be removed in the future.")
def icomments(tag, limit=0, flags=0, **kwargs):
"""Iterate comments only."""
for comment in cm.CommentsMatch(tag).get_comments(limit):
yield comment
def select_one(select, tag, namespaces=None, flags=0, **kwargs):
"""Select a single tag."""
return compile(select, namespaces, flags, **kwargs).select_one(tag)
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs):
"""Select the specified tags."""
return compile(select, namespaces, flags, **kwargs).select(tag, limit)
def iselect(select, tag, namespaces=None, limit=0, flags=0, **kwargs):
"""Iterate the specified tags."""
for el in compile(select, namespaces, flags, **kwargs).iselect(tag, limit):
yield el
def escape(ident):
"""Escape identifier."""
return cp.escape(ident)

@ -0,0 +1,190 @@
"""Meta related things."""
from __future__ import unicode_literals
from collections import namedtuple
import re
RE_VER = re.compile(
r'''(?x)
(?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.(?P<micro>\d+))?
(?:(?P<type>a|b|rc)(?P<pre>\d+))?
(?:\.post(?P<post>\d+))?
(?:\.dev(?P<dev>\d+))?
'''
)
REL_MAP = {
".dev": "",
".dev-alpha": "a",
".dev-beta": "b",
".dev-candidate": "rc",
"alpha": "a",
"beta": "b",
"candidate": "rc",
"final": ""
}
DEV_STATUS = {
".dev": "2 - Pre-Alpha",
".dev-alpha": "2 - Pre-Alpha",
".dev-beta": "2 - Pre-Alpha",
".dev-candidate": "2 - Pre-Alpha",
"alpha": "3 - Alpha",
"beta": "4 - Beta",
"candidate": "4 - Beta",
"final": "5 - Production/Stable"
}
PRE_REL_MAP = {"a": 'alpha', "b": 'beta', "rc": 'candidate'}
class Version(namedtuple("Version", ["major", "minor", "micro", "release", "pre", "post", "dev"])):
"""
Get the version (PEP 440).
A biased approach to the PEP 440 semantic version.
Provides a tuple structure which is sorted for comparisons `v1 > v2` etc.
(major, minor, micro, release type, pre-release build, post-release build, development release build)
Release types are named in is such a way they are comparable with ease.
Accessors to check if a development, pre-release, or post-release build. Also provides accessor to get
development status for setup files.
How it works (currently):
- You must specify a release type as either `final`, `alpha`, `beta`, or `candidate`.
- To define a development release, you can use either `.dev`, `.dev-alpha`, `.dev-beta`, or `.dev-candidate`.
The dot is used to ensure all development specifiers are sorted before `alpha`.
You can specify a `dev` number for development builds, but do not have to as implicit development releases
are allowed.
- You must specify a `pre` value greater than zero if using a prerelease as this project (not PEP 440) does not
allow implicit prereleases.
- You can optionally set `post` to a value greater than zero to make the build a post release. While post releases
are technically allowed in prereleases, it is strongly discouraged, so we are rejecting them. It should be
noted that we do not allow `post0` even though PEP 440 does not restrict this. This project specifically
does not allow implicit post releases.
- It should be noted that we do not support epochs `1!` or local versions `+some-custom.version-1`.
Acceptable version releases:
```
Version(1, 0, 0, "final") 1.0
Version(1, 2, 0, "final") 1.2
Version(1, 2, 3, "final") 1.2.3
Version(1, 2, 0, ".dev-alpha", pre=4) 1.2a4
Version(1, 2, 0, ".dev-beta", pre=4) 1.2b4
Version(1, 2, 0, ".dev-candidate", pre=4) 1.2rc4
Version(1, 2, 0, "final", post=1) 1.2.post1
Version(1, 2, 3, ".dev") 1.2.3.dev0
Version(1, 2, 3, ".dev", dev=1) 1.2.3.dev1
```
"""
def __new__(cls, major, minor, micro, release="final", pre=0, post=0, dev=0):
"""Validate version info."""
# Ensure all parts are positive integers.
for value in (major, minor, micro, pre, post):
if not (isinstance(value, int) and value >= 0):
raise ValueError("All version parts except 'release' should be integers.")
if release not in REL_MAP:
raise ValueError("'{}' is not a valid release type.".format(release))
# Ensure valid pre-release (we do not allow implicit pre-releases).
if ".dev-candidate" < release < "final":
if pre == 0:
raise ValueError("Implicit pre-releases not allowed.")
elif dev:
raise ValueError("Version is not a development release.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure valid development or development/pre release
elif release < "alpha":
if release > ".dev" and pre == 0:
raise ValueError("Implicit pre-release not allowed.")
elif post:
raise ValueError("Post-releases are not allowed with pre-releases.")
# Ensure a valid normal release
else:
if pre:
raise ValueError("Version is not a pre-release.")
elif dev:
raise ValueError("Version is not a development release.")
return super(Version, cls).__new__(cls, major, minor, micro, release, pre, post, dev)
def _is_pre(self):
"""Is prerelease."""
return self.pre > 0
def _is_dev(self):
"""Is development."""
return bool(self.release < "alpha")
def _is_post(self):
"""Is post."""
return self.post > 0
def _get_dev_status(self): # pragma: no cover
"""Get development status string."""
return DEV_STATUS[self.release]
def _get_canonical(self):
"""Get the canonical output string."""
# Assemble major, minor, micro version and append `pre`, `post`, or `dev` if needed..
if self.micro == 0:
ver = "{}.{}".format(self.major, self.minor)
else:
ver = "{}.{}.{}".format(self.major, self.minor, self.micro)
if self._is_pre():
ver += '{}{}'.format(REL_MAP[self.release], self.pre)
if self._is_post():
ver += ".post{}".format(self.post)
if self._is_dev():
ver += ".dev{}".format(self.dev)
return ver
def parse_version(ver, pre=False):
"""Parse version into a comparable Version tuple."""
m = RE_VER.match(ver)
# Handle major, minor, micro
major = int(m.group('major'))
minor = int(m.group('minor')) if m.group('minor') else 0
micro = int(m.group('micro')) if m.group('micro') else 0
# Handle pre releases
if m.group('type'):
release = PRE_REL_MAP[m.group('type')]
pre = int(m.group('pre'))
else:
release = "final"
pre = 0
# Handle development releases
dev = m.group('dev') if m.group('dev') else 0
if m.group('dev'):
dev = int(m.group('dev'))
release = '.dev-' + release if pre else '.dev'
else:
dev = 0
# Handle post
post = int(m.group('post')) if m.group('post') else 0
return Version(major, minor, micro, release, pre, post, dev)
__version_info__ = Version(1, 9, 3, "final")
__version__ = __version_info__._get_canonical()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,344 @@
"""CSS selector structure items."""
from __future__ import unicode_literals
from . import util
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(util.Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, util.Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, util.Hashable) and isinstance(v, util.Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, util.string) and isinstance(v, util.string) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, util.string) and isinstance(v, util.string) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, util.string) and isinstance(v, util.string) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, util.string) and isinstance(v, util.string) for k, v in arg]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
def __iter__(self):
"""Iterator."""
return iter(self.selectors)
def __len__(self):
"""Length."""
return len(self.selectors)
def __getitem__(self, index):
"""Get item."""
return self.selectors[index]
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj):
"""Allow object to be pickled."""
util.copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)

@ -0,0 +1,170 @@
"""Utility."""
from __future__ import unicode_literals
from functools import wraps
import warnings
import sys
import struct
import os
import re
MODULE = os.path.dirname(__file__)
PY3 = sys.version_info >= (3, 0)
PY35 = sys.version_info >= (3, 5)
PY37 = sys.version_info >= (3, 7)
if PY3:
from functools import lru_cache # noqa F401
import copyreg # noqa F401
from collections.abc import Hashable, Mapping # noqa F401
ustr = str
bstr = bytes
unichar = chr
string = str
else:
from backports.functools_lru_cache import lru_cache # noqa F401
import copy_reg as copyreg # noqa F401
from collections import Hashable, Mapping # noqa F401
ustr = unicode # noqa: F821
bstr = str
unichar = unichr # noqa: F821
string = basestring # noqa: F821
DEBUG = 0x00001
RE_PATTERN_LINE_SPLIT = re.compile(r'(?:\r\n|(?!\r\n)[\n\r])|$')
LC_A = ord('a')
LC_Z = ord('z')
UC_A = ord('A')
UC_Z = ord('Z')
def lower(string):
"""Lower."""
new_string = []
for c in string:
o = ord(c)
new_string.append(chr(o + 32) if UC_A <= o <= UC_Z else c)
return ''.join(new_string)
def upper(string): # pragma: no cover
"""Lower."""
new_string = []
for c in string:
o = ord(c)
new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c)
return ''.join(new_string)
def uchr(i):
"""Allow getting Unicode character on narrow python builds."""
try:
return unichar(i)
except ValueError: # pragma: no cover
return struct.pack('i', i).decode('utf-32')
def uord(c):
"""Get Unicode ordinal."""
if len(c) == 2: # pragma: no cover
high, low = [ord(p) for p in c]
ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
else:
ordinal = ord(c)
return ordinal
class SelectorSyntaxError(SyntaxError):
"""Syntax error in a CSS selector."""
def __init__(self, msg, pattern=None, index=None):
"""Initialize."""
self.line = None
self.col = None
self.context = None
if pattern is not None and index is not None:
# Format pattern to show line and column position
self.context, self.line, self.col = get_pattern_context(pattern, index)
msg = '{}\n line {}:\n{}'.format(msg, self.line, self.context)
super(SelectorSyntaxError, self).__init__(msg)
def deprecated(message, stacklevel=2): # pragma: no cover
"""
Raise a `DeprecationWarning` when wrapped function/method is called.
Borrowed from https://stackoverflow.com/a/48632082/866026
"""
def _decorator(func):
@wraps(func)
def _func(*args, **kwargs):
warnings.warn(
"'{}' is deprecated. {}".format(func.__name__, message),
category=DeprecationWarning,
stacklevel=stacklevel
)
return func(*args, **kwargs)
return _func
return _decorator
def warn_deprecated(message, stacklevel=2): # pragma: no cover
"""Warn deprecated."""
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=stacklevel
)
def get_pattern_context(pattern, index):
"""Get the pattern context."""
last = 0
current_line = 1
col = 1
text = []
line = 1
# Split pattern by newline and handle the text before the newline
for m in RE_PATTERN_LINE_SPLIT.finditer(pattern):
linetext = pattern[last:m.start(0)]
if not len(m.group(0)) and not len(text):
indent = ''
offset = -1
col = index - last + 1
elif last <= index < m.end(0):
indent = '--> '
offset = (-1 if index > m.start(0) else 0) + 3
col = index - last + 1
else:
indent = ' '
offset = None
if len(text):
# Regardless of whether we are presented with `\r\n`, `\r`, or `\n`,
# we will render the output with just `\n`. We will still log the column
# correctly though.
text.append('\n')
text.append('{}{}'.format(indent, linetext))
if offset is not None:
text.append('\n')
text.append(' ' * (col + offset) + '^')
line = current_line
current_line += 1
last = m.end(0)
return ''.join(text), line, col

@ -90,11 +90,11 @@ class Sqlite3Worker(threading.Thread):
"""
LOGGER.debug("run: Thread started")
execute_count = 0
for token, query, values, only_one in iter(self.sql_queue.get, None):
for token, query, values, only_one, execute_many in iter(self.sql_queue.get, None):
LOGGER.debug("sql_queue: %s", self.sql_queue.qsize())
if token != self.exit_token:
LOGGER.debug("run: %s, %s", query, values)
self.run_query(token, query, values, only_one)
self.run_query(token, query, values, only_one, execute_many)
execute_count += 1
# Let the executes build up a little before committing to disk
# to speed things up.
@ -112,7 +112,7 @@ class Sqlite3Worker(threading.Thread):
self.thread_running = False
return
def run_query(self, token, query, values, only_one):
def run_query(self, token, query, values, only_one=False, execute_many=False):
"""Run a query.
Args:
@ -136,9 +136,14 @@ class Sqlite3Worker(threading.Thread):
"Query returned error: %s: %s: %s", query, values, err)
else:
try:
self.sqlite3_cursor.execute(query, values)
if query.lower().strip().startswith(("insert", "update")):
self.results[token] = self.sqlite3_cursor.rowcount
if execute_many:
self.sqlite3_cursor.executemany(query, values)
if query.lower().strip().startswith(("insert", "update", "delete")):
self.results[token] = self.sqlite3_cursor.rowcount
else:
self.sqlite3_cursor.execute(query, values)
if query.lower().strip().startswith(("insert", "update", "delete")):
self.results[token] = self.sqlite3_cursor.rowcount
except sqlite3.Error as err:
self.results[token] = (
"Query returned error: %s: %s: %s" % (query, values, err))
@ -148,7 +153,7 @@ class Sqlite3Worker(threading.Thread):
def close(self):
"""Close down the thread and close the sqlite3 database file."""
self.exit_set = True
self.sql_queue.put((self.exit_token, "", "", ""), timeout=5)
self.sql_queue.put((self.exit_token, "", "", "", ""), timeout=5)
# Sleep and check that the thread is done before returning.
while self.thread_running:
time.sleep(.01) # Don't kill the CPU waiting.
@ -181,7 +186,7 @@ class Sqlite3Worker(threading.Thread):
if delay < 8:
delay += delay
def execute(self, query, values=None, only_one=False):
def execute(self, query, values=None, only_one=False, execute_many=False):
"""Execute a query.
Args:
@ -200,11 +205,11 @@ class Sqlite3Worker(threading.Thread):
token = str(uuid.uuid4())
# If it's a select we queue it up with a token to mark the results
# into the output queue so we know what results are ours.
if query.lower().strip().startswith(("select", "insert", "update")):
self.sql_queue.put((token, query, values, only_one), timeout=5)
if query.lower().strip().startswith(("select", "insert", "update", "delete")):
self.sql_queue.put((token, query, values, only_one, execute_many), timeout=5)
return self.query_results(token)
else:
self.sql_queue.put((token, query, values, only_one), timeout=5)
self.sql_queue.put((token, query, values, only_one, execute_many), timeout=5)
def dict_factory(cursor, row):

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__title__ = 'subliminal'
__version__ = '2.1.0.dev'
__short_version__ = '.'.join(__version__.split('.')[:2])

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from dogpile.cache import make_region

@ -4,6 +4,7 @@ Subliminal uses `click <http://click.pocoo.org>`_ to provide a powerful :abbr:`C
"""
from __future__ import division
from __future__ import absolute_import
from collections import defaultdict
from datetime import timedelta
import glob

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter, language_converters

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from babelfish import LanguageReverseConverter, language_converters

@ -1,12 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import defaultdict
import platform
from six.moves import range
is_windows_special_path = False
if platform.system() == "Windows":
try:
__file__.decode("ascii")
__file__
except UnicodeDecodeError:
is_windows_special_path = True

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from pkg_resources import EntryPoint
from stevedore import ExtensionManager

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from bs4 import BeautifulSoup, FeatureNotFound

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import io
import json
import logging

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from babelfish import Language
@ -7,6 +8,7 @@ from requests import Session
from . import Provider
from .. import __short_version__
from ..subtitle import Subtitle
from six.moves import range
logger = logging.getLogger(__name__)

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import logging
import os

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import io
import logging
import re
@ -138,7 +139,7 @@ class PodnapisiProvider(Provider):
if subtitle_xml.find('release').text:
for release in subtitle_xml.find('release').text.split():
release = re.sub(r'\.+$', '', release) # remove trailing dots
release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters
release = ''.join([x for x in release if ord(x) < 128]) # remove non-ascii characters
releases.append(release)
title = subtitle_xml.find('title').text
season = int(subtitle_xml.find('tvSeason').text)

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import logging
import os

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import bisect
from collections import defaultdict
import io
@ -121,7 +122,7 @@ class SubsCenterProvider(Provider):
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def _search_url_titles(self, title):
"""Search the URL titles by kind for the given `title`.
@ -209,7 +210,7 @@ class SubsCenterProvider(Provider):
logger.debug('Found subtitle %r', subtitle)
subtitles[subtitle_id] = subtitle
return subtitles.values()
return list(subtitles.values())
def list_subtitles(self, video, languages):
season = episode = None

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from babelfish import Language, language_converters

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import io
import logging
import re

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import operator

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from functools import wraps
import logging

@ -28,6 +28,7 @@ Available matches:
"""
from __future__ import division, print_function
from __future__ import absolute_import
import logging
from .video import Episode, Movie

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import codecs
import logging
import os

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime
import hashlib
import os
import re
import struct
from six.moves import range
def hash_opensubtitles(video_path):

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from datetime import datetime, timedelta
import logging
import os

@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import absolute_import
import subliminal
# patch subliminal's subtitle and provider base
@ -12,8 +13,8 @@ from .core import scan_video, search_external_subtitles, list_all_subtitles, sav
download_best_subtitles
from .score import compute_score
from .video import Video
import extensions
import http
from . import extensions
from . import http
# patch subliminal's core functions
subliminal.scan_video = subliminal.core.scan_video = scan_video

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save