@ -0,0 +1,79 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# fmt: off
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import requests
|
||||||
|
from collections import namedtuple
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from app.config import settings
|
||||||
|
from subliminal import Episode, region, __short_version__
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
refined_providers = {'jimaku'}
|
||||||
|
|
||||||
|
|
||||||
|
class AniListClient(object):
|
||||||
|
def __init__(self, session=None, timeout=10):
|
||||||
|
self.session = session or requests.Session()
|
||||||
|
self.session.timeout = timeout
|
||||||
|
self.session.headers['Content-Type'] = 'application/json'
|
||||||
|
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
|
||||||
|
|
||||||
|
@region.cache_on_arguments(expiration_time=timedelta(days=1).total_seconds())
|
||||||
|
def get_series_mappings(self):
|
||||||
|
r = self.session.get(
|
||||||
|
'https://raw.githubusercontent.com/Fribb/anime-lists/master/anime-list-mini.json'
|
||||||
|
)
|
||||||
|
|
||||||
|
r.raise_for_status()
|
||||||
|
return r.json()
|
||||||
|
|
||||||
|
def get_series_id(self, candidate_id_name, candidate_id_value):
|
||||||
|
anime_list = self.get_series_mappings()
|
||||||
|
|
||||||
|
tag_map = {
|
||||||
|
"series_anidb_id": "anidb_id",
|
||||||
|
"imdb_id": "imdb_id"
|
||||||
|
}
|
||||||
|
mapped_tag = tag_map.get(candidate_id_name, candidate_id_name)
|
||||||
|
|
||||||
|
obj = [obj for obj in anime_list if mapped_tag in obj and str(obj[mapped_tag]) == str(candidate_id_value)]
|
||||||
|
logger.debug(f"Based on '{mapped_tag}': '{candidate_id_value}', anime-list matched: {obj}")
|
||||||
|
|
||||||
|
if len(obj) > 0:
|
||||||
|
return obj[0]["anilist_id"]
|
||||||
|
else:
|
||||||
|
logger.debug(f"Could not find corresponding AniList ID with '{mapped_tag}': {candidate_id_value}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def refine_from_anilist(path, video):
|
||||||
|
# Safety checks
|
||||||
|
if isinstance(video, Episode):
|
||||||
|
if not video.series_anidb_id:
|
||||||
|
return
|
||||||
|
|
||||||
|
if refined_providers.intersection(settings.general.enabled_providers) and video.anilist_id is None:
|
||||||
|
refine_anilist_ids(video)
|
||||||
|
|
||||||
|
|
||||||
|
def refine_anilist_ids(video):
|
||||||
|
anilist_client = AniListClient()
|
||||||
|
|
||||||
|
if isinstance(video, Episode):
|
||||||
|
candidate_id_name = "series_anidb_id"
|
||||||
|
else:
|
||||||
|
candidate_id_name = "imdb_id"
|
||||||
|
|
||||||
|
candidate_id_value = getattr(video, candidate_id_name, None)
|
||||||
|
if not candidate_id_value:
|
||||||
|
logger.error(f"Found no value for property {candidate_id_name} of video.")
|
||||||
|
return video
|
||||||
|
|
||||||
|
anilist_id = anilist_client.get_series_id(candidate_id_name, candidate_id_value)
|
||||||
|
if not anilist_id:
|
||||||
|
return video
|
||||||
|
|
||||||
|
video.anilist_id = anilist_id
|
@ -0,0 +1,90 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from babelfish import LanguageReverseConverter
|
||||||
|
from subliminal.exceptions import ConfigurationError
|
||||||
|
|
||||||
|
|
||||||
|
class SubdlConverter(LanguageReverseConverter):
|
||||||
|
def __init__(self):
|
||||||
|
self.from_subdl = {
|
||||||
|
"AR": ("ara", None, None), # Arabic
|
||||||
|
"DA": ("dan", None, None), # Danish
|
||||||
|
"NL": ("nld", None, None), # Dutch
|
||||||
|
"EN": ("eng", None, None), # English
|
||||||
|
"FA": ("fas", None, None), # Farsi_Persian
|
||||||
|
"FI": ("fin", None, None), # Finnish
|
||||||
|
"FR": ("fra", None, None), # French
|
||||||
|
"ID": ("ind", None, None), # Indonesian
|
||||||
|
"IT": ("ita", None, None), # Italian
|
||||||
|
"NO": ("nor", None, None), # Norwegian
|
||||||
|
"RO": ("ron", None, None), # Romanian
|
||||||
|
"ES": ("spa", None, None), # Spanish
|
||||||
|
"SV": ("swe", None, None), # Swedish
|
||||||
|
"VI": ("vie", None, None), # Vietnamese
|
||||||
|
"SQ": ("sqi", None, None), # Albanian
|
||||||
|
"AZ": ("aze", None, None), # Azerbaijani
|
||||||
|
"BE": ("bel", None, None), # Belarusian
|
||||||
|
"BN": ("ben", None, None), # Bengali
|
||||||
|
"BS": ("bos", None, None), # Bosnian
|
||||||
|
"BG": ("bul", None, None), # Bulgarian
|
||||||
|
"MY": ("mya", None, None), # Burmese
|
||||||
|
"CA": ("cat", None, None), # Catalan
|
||||||
|
"ZH": ("zho", None, None), # Chinese BG code
|
||||||
|
"HR": ("hrv", None, None), # Croatian
|
||||||
|
"CS": ("ces", None, None), # Czech
|
||||||
|
"EO": ("epo", None, None), # Esperanto
|
||||||
|
"ET": ("est", None, None), # Estonian
|
||||||
|
"KA": ("kat", None, None), # Georgian
|
||||||
|
"DE": ("deu", None, None), # German
|
||||||
|
"EL": ("ell", None, None), # Greek
|
||||||
|
"KL": ("kal", None, None), # Greenlandic
|
||||||
|
"HE": ("heb", None, None), # Hebrew
|
||||||
|
"HI": ("hin", None, None), # Hindi
|
||||||
|
"HU": ("hun", None, None), # Hungarian
|
||||||
|
"IS": ("isl", None, None), # Icelandic
|
||||||
|
"JA": ("jpn", None, None), # Japanese
|
||||||
|
"KO": ("kor", None, None), # Korean
|
||||||
|
"KU": ("kur", None, None), # Kurdish
|
||||||
|
"LV": ("lav", None, None), # Latvian
|
||||||
|
"LT": ("lit", None, None), # Lithuanian
|
||||||
|
"MK": ("mkd", None, None), # Macedonian
|
||||||
|
"MS": ("msa", None, None), # Malay
|
||||||
|
"ML": ("mal", None, None), # Malayalam
|
||||||
|
"PL": ("pol", None, None), # Polish
|
||||||
|
"PT": ("por", None, None), # Portuguese
|
||||||
|
"RU": ("rus", None, None), # Russian
|
||||||
|
"SR": ("srp", None, None), # Serbian
|
||||||
|
"SI": ("sin", None, None), # Sinhala
|
||||||
|
"SK": ("slk", None, None), # Slovak
|
||||||
|
"SL": ("slv", None, None), # Slovenian
|
||||||
|
"TL": ("tgl", None, None), # Tagalog
|
||||||
|
"TA": ("tam", None, None), # Tamil
|
||||||
|
"TE": ("tel", None, None), # Telugu
|
||||||
|
"TH": ("tha", None, None), # Thai
|
||||||
|
"TR": ("tur", None, None), # Turkish
|
||||||
|
"UK": ("ukr", None, None), # Ukrainian
|
||||||
|
"UR": ("urd", None, None), # Urdu
|
||||||
|
# custom languages
|
||||||
|
"BR_PT": ("por", "BR", None), # Brazilian Portuguese
|
||||||
|
"ZH_BG": ("zho", None, "Hant"), # Big 5 code
|
||||||
|
# unsupported language in Bazarr
|
||||||
|
# "BG_EN": "Bulgarian_English",
|
||||||
|
# "NL_EN": "Dutch_English",
|
||||||
|
# "EN_DE": "English_German",
|
||||||
|
# "HU_EN": "Hungarian_English",
|
||||||
|
# "MNI": "Manipuri",
|
||||||
|
}
|
||||||
|
self.to_subdl = {v: k for k, v in self.from_subdl.items()}
|
||||||
|
self.codes = set(self.from_subdl.keys())
|
||||||
|
|
||||||
|
def convert(self, alpha3, country=None, script=None):
|
||||||
|
if (alpha3, country, script) in self.to_subdl:
|
||||||
|
return self.to_subdl[(alpha3, country, script)]
|
||||||
|
|
||||||
|
raise ConfigurationError('Unsupported language for subdl: %s, %s, %s' % (alpha3, country, script))
|
||||||
|
|
||||||
|
def reverse(self, subdl):
|
||||||
|
if subdl in self.from_subdl:
|
||||||
|
return self.from_subdl[subdl]
|
||||||
|
|
||||||
|
raise ConfigurationError('Unsupported language code for subdl: %s' % subdl)
|
@ -0,0 +1,419 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from datetime import timedelta
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from requests import Session
|
||||||
|
from subliminal import region, __short_version__
|
||||||
|
from subliminal.cache import REFINER_EXPIRATION_TIME
|
||||||
|
from subliminal.exceptions import ConfigurationError, AuthenticationError, ServiceUnavailable
|
||||||
|
from subliminal.utils import sanitize
|
||||||
|
from subliminal.video import Episode, Movie
|
||||||
|
from subliminal_patch.providers import Provider
|
||||||
|
from subliminal_patch.subtitle import Subtitle
|
||||||
|
from subliminal_patch.exceptions import APIThrottled
|
||||||
|
from subliminal_patch.providers.utils import get_subtitle_from_archive, get_archive_from_bytes
|
||||||
|
from urllib.parse import urlencode, urljoin
|
||||||
|
from guessit import guessit
|
||||||
|
from subzero.language import Language, FULL_LANGUAGE_LIST
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Unhandled formats, such files will always get filtered out
|
||||||
|
unhandled_archive_formats = (".7z",)
|
||||||
|
accepted_archive_formats = (".zip", ".rar")
|
||||||
|
|
||||||
|
class JimakuSubtitle(Subtitle):
|
||||||
|
'''Jimaku Subtitle.'''
|
||||||
|
provider_name = 'jimaku'
|
||||||
|
|
||||||
|
hash_verifiable = False
|
||||||
|
|
||||||
|
def __init__(self, language, video, download_url, filename):
|
||||||
|
super(JimakuSubtitle, self).__init__(language, page_link=download_url)
|
||||||
|
|
||||||
|
self.video = video
|
||||||
|
self.download_url = download_url
|
||||||
|
self.filename = filename
|
||||||
|
self.release_info = filename
|
||||||
|
self.is_archive = filename.endswith(accepted_archive_formats)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
return self.download_url
|
||||||
|
|
||||||
|
def get_matches(self, video):
|
||||||
|
matches = set()
|
||||||
|
|
||||||
|
# Episode/Movie specific matches
|
||||||
|
if isinstance(video, Episode):
|
||||||
|
if sanitize(video.series) and sanitize(self.video.series) in (
|
||||||
|
sanitize(name) for name in [video.series] + video.alternative_series):
|
||||||
|
matches.add('series')
|
||||||
|
|
||||||
|
if video.season and self.video.season is None or video.season and video.season == self.video.season:
|
||||||
|
matches.add('season')
|
||||||
|
elif isinstance(video, Movie):
|
||||||
|
if sanitize(video.title) and sanitize(self.video.title) in (
|
||||||
|
sanitize(name) for name in [video.title] + video.alternative_titles):
|
||||||
|
matches.add('title')
|
||||||
|
|
||||||
|
# General matches
|
||||||
|
if video.year and video.year == self.video.year:
|
||||||
|
matches.add('year')
|
||||||
|
|
||||||
|
video_type = 'movie' if isinstance(video, Movie) else 'episode'
|
||||||
|
matches.add(video_type)
|
||||||
|
|
||||||
|
guess = guessit(self.filename, {'type': video_type})
|
||||||
|
for g in guess:
|
||||||
|
if g[0] == "release_group" or "source":
|
||||||
|
if video.release_group == g[1]:
|
||||||
|
matches.add('release_group')
|
||||||
|
break
|
||||||
|
|
||||||
|
# Prioritize .srt by repurposing the audio_codec match
|
||||||
|
if self.filename.endswith(".srt"):
|
||||||
|
matches.add('audio_codec')
|
||||||
|
|
||||||
|
return matches
|
||||||
|
|
||||||
|
class JimakuProvider(Provider):
|
||||||
|
'''Jimaku Provider.'''
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
|
api_url = 'https://jimaku.cc/api'
|
||||||
|
api_ratelimit_max_delay_seconds = 5
|
||||||
|
api_ratelimit_backoff_limit = 3
|
||||||
|
|
||||||
|
corrupted_file_size_threshold = 500
|
||||||
|
|
||||||
|
languages = {Language.fromietf("ja")}
|
||||||
|
|
||||||
|
def __init__(self, enable_name_search_fallback, enable_archives_download, enable_ai_subs, api_key):
|
||||||
|
if api_key:
|
||||||
|
self.api_key = api_key
|
||||||
|
else:
|
||||||
|
raise ConfigurationError('Missing api_key.')
|
||||||
|
|
||||||
|
self.enable_name_search_fallback = enable_name_search_fallback
|
||||||
|
self.download_archives = enable_archives_download
|
||||||
|
self.enable_ai_subs = enable_ai_subs
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.session = Session()
|
||||||
|
self.session.headers['Content-Type'] = 'application/json'
|
||||||
|
self.session.headers['Authorization'] = self.api_key
|
||||||
|
self.session.headers['User-Agent'] = os.environ.get("SZ_USER_AGENT")
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
self.session.close()
|
||||||
|
|
||||||
|
def _query(self, video):
|
||||||
|
if isinstance(video, Movie):
|
||||||
|
media_name = video.title.lower()
|
||||||
|
elif isinstance(video, Episode):
|
||||||
|
media_name = video.series.lower()
|
||||||
|
|
||||||
|
# With entries that have a season larger than 1, Jimaku appends the corresponding season number to the name.
|
||||||
|
# We'll reassemble media_name here to account for cases where we can only search by name alone.
|
||||||
|
season_addendum = str(video.season) if video.season > 1 else None
|
||||||
|
media_name = f"{media_name} {season_addendum}" if season_addendum else media_name
|
||||||
|
|
||||||
|
# Search for entry
|
||||||
|
searching_for_entry_attempts = 0
|
||||||
|
additional_url_params = {}
|
||||||
|
while searching_for_entry_attempts < 2:
|
||||||
|
searching_for_entry_attempts += 1
|
||||||
|
url = self._assemble_jimaku_search_url(video, media_name, additional_url_params)
|
||||||
|
if not url:
|
||||||
|
return None
|
||||||
|
|
||||||
|
searching_for_entry = "query" in url
|
||||||
|
data = self._search_for_entry(url)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
if searching_for_entry and searching_for_entry_attempts < 2:
|
||||||
|
logger.info("Maybe this is live action media? Will retry search without anime parameter...")
|
||||||
|
additional_url_params = {'anime': "false"}
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
# We only go for the first entry
|
||||||
|
entry = data[0]
|
||||||
|
|
||||||
|
entry_id = entry.get('id')
|
||||||
|
anilist_id = entry.get('anilist_id', None)
|
||||||
|
entry_name = entry.get('name')
|
||||||
|
is_movie = entry.get('flags', {}).get('movie', False)
|
||||||
|
|
||||||
|
if isinstance(video, Episode) and is_movie:
|
||||||
|
logger.warn("Bazarr thinks this is a series, but Jimaku says this is a movie! May not be able to match subtitles...")
|
||||||
|
|
||||||
|
logger.info(f"Matched entry: ID: '{entry_id}', anilist_id: '{anilist_id}', name: '{entry_name}', english_name: '{entry.get('english_name')}', movie: {is_movie}")
|
||||||
|
if entry.get("flags").get("unverified"):
|
||||||
|
logger.warning(f"This entry '{entry_id}' is unverified, subtitles might be incomplete or have quality issues!")
|
||||||
|
|
||||||
|
# Get a list of subtitles for entry
|
||||||
|
episode_number = video.episode if "episode" in dir(video) else None
|
||||||
|
url_params = {'episode': episode_number} if isinstance(video, Episode) and not is_movie else {}
|
||||||
|
only_look_for_archives = False
|
||||||
|
|
||||||
|
has_offset = isinstance(video, Episode) and video.series_anidb_season_episode_offset is not None
|
||||||
|
|
||||||
|
retry_count = 0
|
||||||
|
adjusted_ep_num = None
|
||||||
|
while retry_count <= 1:
|
||||||
|
# Account for positive episode offset first
|
||||||
|
if isinstance(video, Episode) and not is_movie and retry_count < 1:
|
||||||
|
if video.season > 1 and has_offset:
|
||||||
|
offset_value = video.series_anidb_season_episode_offset
|
||||||
|
offset_value = offset_value if offset_value > 0 else -offset_value
|
||||||
|
|
||||||
|
if episode_number < offset_value:
|
||||||
|
adjusted_ep_num = episode_number + offset_value
|
||||||
|
logger.warning(f"Will try using adjusted episode number {adjusted_ep_num} first")
|
||||||
|
url_params = {'episode': adjusted_ep_num}
|
||||||
|
|
||||||
|
url = f"entries/{entry_id}/files"
|
||||||
|
data = self._search_for_subtitles(url, url_params)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
if isinstance(video, Episode) and not is_movie and has_offset and retry_count < 1:
|
||||||
|
logger.warning(f"Found no subtitles for adjusted episode number, but will retry with normal episode number {episode_number}")
|
||||||
|
url_params = {'episode': episode_number}
|
||||||
|
elif isinstance(video, Episode) and not is_movie and retry_count < 1:
|
||||||
|
logger.warning(f"Found no subtitles for episode number {episode_number}, but will retry without 'episode' parameter")
|
||||||
|
url_params = {}
|
||||||
|
only_look_for_archives = True
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
retry_count += 1
|
||||||
|
else:
|
||||||
|
if adjusted_ep_num:
|
||||||
|
video.episode = adjusted_ep_num
|
||||||
|
logger.debug(f"This videos episode attribute has been updated to: {video.episode}")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Filter subtitles
|
||||||
|
list_of_subtitles = []
|
||||||
|
|
||||||
|
data = [item for item in data if not item['name'].endswith(unhandled_archive_formats)]
|
||||||
|
|
||||||
|
# Detect only archives being uploaded
|
||||||
|
archive_entries = [item for item in data if item['name'].endswith(accepted_archive_formats)]
|
||||||
|
subtitle_entries = [item for item in data if not item['name'].endswith(accepted_archive_formats)]
|
||||||
|
has_only_archives = len(archive_entries) > 0 and len(subtitle_entries) == 0
|
||||||
|
if has_only_archives:
|
||||||
|
logger.warning("Have only found archived subtitles")
|
||||||
|
|
||||||
|
elif only_look_for_archives:
|
||||||
|
data = [item for item in data if item['name'].endswith(accepted_archive_formats)]
|
||||||
|
|
||||||
|
for item in data:
|
||||||
|
filename = item.get('name')
|
||||||
|
download_url = item.get('url')
|
||||||
|
is_archive = filename.endswith(accepted_archive_formats)
|
||||||
|
|
||||||
|
# Archives will still be considered if they're the only files available, as is mostly the case for movies.
|
||||||
|
if is_archive and not has_only_archives and not self.download_archives:
|
||||||
|
logger.warning(f"Skipping archive '{filename}' because normal subtitles are available instead")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not self.enable_ai_subs:
|
||||||
|
p = re.compile(r'[\[\(]?(whisperai)[\]\)]?|[\[\(]whisper[\]\)]', re.IGNORECASE)
|
||||||
|
if p.search(filename):
|
||||||
|
logger.warning(f"Skipping subtitle '{filename}' as it's suspected of being AI generated")
|
||||||
|
continue
|
||||||
|
|
||||||
|
sub_languages = self._try_determine_subtitle_languages(filename)
|
||||||
|
if len(sub_languages) > 1:
|
||||||
|
logger.warning(f"Skipping subtitle '{filename}' as it's suspected of containing multiple languages")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if file is obviously corrupt. If no size is returned, assume OK
|
||||||
|
filesize = item.get('size', self.corrupted_file_size_threshold)
|
||||||
|
if filesize < self.corrupted_file_size_threshold:
|
||||||
|
logger.warning(f"Skipping possibly corrupt file '{filename}': Filesize is just {filesize} bytes")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not filename.endswith(unhandled_archive_formats):
|
||||||
|
lang = sub_languages[0] if len(sub_languages) > 1 else Language("jpn")
|
||||||
|
list_of_subtitles.append(JimakuSubtitle(lang, video, download_url, filename))
|
||||||
|
else:
|
||||||
|
logger.debug(f"Skipping archive '{filename}' as it's not a supported format")
|
||||||
|
|
||||||
|
return list_of_subtitles
|
||||||
|
|
||||||
|
def list_subtitles(self, video, languages=None):
|
||||||
|
subtitles = self._query(video)
|
||||||
|
if not subtitles:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return [s for s in subtitles]
|
||||||
|
|
||||||
|
def download_subtitle(self, subtitle: JimakuSubtitle):
|
||||||
|
target_url = subtitle.download_url
|
||||||
|
response = self.session.get(target_url, timeout=10)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
if subtitle.is_archive:
|
||||||
|
archive = get_archive_from_bytes(response.content)
|
||||||
|
if archive:
|
||||||
|
if isinstance(subtitle.video, Episode):
|
||||||
|
subtitle.content = get_subtitle_from_archive(
|
||||||
|
archive,
|
||||||
|
episode=subtitle.video.episode,
|
||||||
|
episode_title=subtitle.video.title
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
subtitle.content = get_subtitle_from_archive(
|
||||||
|
archive
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning("Archive seems to not be an archive! File possibly corrupt?")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
subtitle.content = response.content
|
||||||
|
|
||||||
|
def _do_jimaku_request(self, url_path, url_params={}):
|
||||||
|
url = urljoin(f"{self.api_url}/{url_path}", '?' + urlencode(url_params))
|
||||||
|
|
||||||
|
retry_count = 0
|
||||||
|
while retry_count < self.api_ratelimit_backoff_limit:
|
||||||
|
response = self.session.get(url, timeout=10)
|
||||||
|
|
||||||
|
if response.status_code == 429:
|
||||||
|
reset_time = 5
|
||||||
|
retry_count + 1
|
||||||
|
|
||||||
|
logger.warning(f"Jimaku ratelimit hit, waiting for '{reset_time}' seconds ({retry_count}/{self.api_ratelimit_backoff_limit} tries)")
|
||||||
|
time.sleep(reset_time)
|
||||||
|
continue
|
||||||
|
elif response.status_code == 401:
|
||||||
|
raise AuthenticationError("Unauthorized. API key possibly invalid")
|
||||||
|
else:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
logger.debug(f"Length of response on {url}: {len(data)}")
|
||||||
|
if len(data) == 0:
|
||||||
|
logger.error(f"Jimaku returned no items for our our query: {url}")
|
||||||
|
return None
|
||||||
|
elif 'error' in data:
|
||||||
|
raise ServiceUnavailable(f"Jimaku returned an error: '{data.get('error')}', Code: '{data.get('code')}'")
|
||||||
|
else:
|
||||||
|
return data
|
||||||
|
|
||||||
|
raise APIThrottled(f"Jimaku ratelimit max backoff limit of {self.api_ratelimit_backoff_limit} reached, aborting")
|
||||||
|
|
||||||
|
# Wrapper functions to indirectly call _do_jimaku_request with different cache configs
|
||||||
|
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
|
||||||
|
def _search_for_entry(self, url_path, url_params={}):
|
||||||
|
return self._do_jimaku_request(url_path, url_params)
|
||||||
|
|
||||||
|
@region.cache_on_arguments(expiration_time=timedelta(minutes=1).total_seconds())
|
||||||
|
def _search_for_subtitles(self, url_path, url_params={}):
|
||||||
|
return self._do_jimaku_request(url_path, url_params)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _try_determine_subtitle_languages(filename):
|
||||||
|
# This is more like a guess and not a 100% fool-proof way of detecting multi-lang subs:
|
||||||
|
# It assumes that language codes, if present, are in the last metadata group of the subs filename.
|
||||||
|
# If such codes are not present, or we failed to match any at all, then we'll just assume that the sub is purely Japanese.
|
||||||
|
default_language = Language("jpn")
|
||||||
|
|
||||||
|
dot_delimit = filename.split(".")
|
||||||
|
bracket_delimit = re.split(r'[\[\]\(\)]+', filename)
|
||||||
|
|
||||||
|
candidate_list = list()
|
||||||
|
if len(dot_delimit) > 2:
|
||||||
|
candidate_list = dot_delimit[-2]
|
||||||
|
elif len(bracket_delimit) > 2:
|
||||||
|
candidate_list = bracket_delimit[-2]
|
||||||
|
|
||||||
|
candidates = [] if len(candidate_list) == 0 else re.split(r'[,\-\+\& ]+', candidate_list)
|
||||||
|
|
||||||
|
# Discard match group if any candidate...
|
||||||
|
# ...contains any numbers, as the group is likely encoding information
|
||||||
|
if any(re.compile(r'\d').search(string) for string in candidates):
|
||||||
|
return [default_language]
|
||||||
|
# ...is >= 5 chars long, as the group is likely other unrelated metadata
|
||||||
|
if any(len(string) >= 5 for string in candidates):
|
||||||
|
return [default_language]
|
||||||
|
|
||||||
|
languages = list()
|
||||||
|
for candidate in candidates:
|
||||||
|
candidate = candidate.lower()
|
||||||
|
if candidate in ["ass", "srt"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sometimes, languages are hidden in 4 character blocks, i.e. "JPSC"
|
||||||
|
if len(candidate) == 4:
|
||||||
|
for addendum in [candidate[:2], candidate[2:]]:
|
||||||
|
candidates.append(addendum)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sometimes, language codes can have additional info such as 'cc' or 'sdh'. For example: "ja[cc]"
|
||||||
|
if len(dot_delimit) > 2 and any(c in candidate for c in '[]()'):
|
||||||
|
candidate = re.split(r'[\[\]\(\)]+', candidate)[0]
|
||||||
|
|
||||||
|
try:
|
||||||
|
language_squash = {
|
||||||
|
"jp": "ja",
|
||||||
|
"jap": "ja",
|
||||||
|
"chs": "zho",
|
||||||
|
"cht": "zho",
|
||||||
|
"zhi": "zho",
|
||||||
|
"cn": "zho"
|
||||||
|
}
|
||||||
|
|
||||||
|
candidate = language_squash[candidate] if candidate in language_squash else candidate
|
||||||
|
if len(candidate) > 2:
|
||||||
|
language = Language(candidate)
|
||||||
|
else:
|
||||||
|
language = Language.fromietf(candidate)
|
||||||
|
|
||||||
|
if not any(l.alpha3 == language.alpha3 for l in languages):
|
||||||
|
languages.append(language)
|
||||||
|
except:
|
||||||
|
if candidate in FULL_LANGUAGE_LIST:
|
||||||
|
# Create a dummy for the unknown language
|
||||||
|
languages.append(Language("zul"))
|
||||||
|
|
||||||
|
if len(languages) > 1:
|
||||||
|
# Sometimes a metadata group that actually contains info about codecs gets processed as valid languages.
|
||||||
|
# To prevent false positives, we'll check if Japanese language codes are in the processed languages list.
|
||||||
|
# If not, then it's likely that we didn't actually match language codes -> Assume Japanese only subtitle.
|
||||||
|
contains_jpn = any([l for l in languages if l.alpha3 == "jpn"])
|
||||||
|
|
||||||
|
return languages if contains_jpn else [Language("jpn")]
|
||||||
|
else:
|
||||||
|
return [default_language]
|
||||||
|
|
||||||
|
def _assemble_jimaku_search_url(self, video, media_name, additional_params={}):
|
||||||
|
endpoint = "entries/search"
|
||||||
|
anilist_id = video.anilist_id
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
if anilist_id:
|
||||||
|
params = {'anilist_id': anilist_id}
|
||||||
|
else:
|
||||||
|
if self.enable_name_search_fallback or isinstance(video, Movie):
|
||||||
|
params = {'query': media_name}
|
||||||
|
else:
|
||||||
|
logger.error(f"Skipping '{media_name}': Got no AniList ID and fuzzy matching using name is disabled")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if additional_params:
|
||||||
|
params.update(additional_params)
|
||||||
|
|
||||||
|
logger.info(f"Will search for entry based on params: {params}")
|
||||||
|
return urljoin(endpoint, '?' + urlencode(params))
|
@ -0,0 +1,264 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
|
||||||
|
from zipfile import ZipFile, is_zipfile
|
||||||
|
from urllib.parse import urljoin
|
||||||
|
from requests import Session
|
||||||
|
|
||||||
|
from subzero.language import Language
|
||||||
|
from subliminal import Episode, Movie
|
||||||
|
from subliminal.exceptions import ConfigurationError, ProviderError, DownloadLimitExceeded
|
||||||
|
from subliminal_patch.exceptions import APIThrottled
|
||||||
|
from .mixins import ProviderRetryMixin
|
||||||
|
from subliminal_patch.subtitle import Subtitle
|
||||||
|
from subliminal.subtitle import fix_line_ending
|
||||||
|
from subliminal_patch.providers import Provider
|
||||||
|
from subliminal_patch.providers import utils
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
retry_amount = 3
|
||||||
|
retry_timeout = 5
|
||||||
|
|
||||||
|
|
||||||
|
class LegendasNetSubtitle(Subtitle):
|
||||||
|
provider_name = 'legendasnet'
|
||||||
|
hash_verifiable = False
|
||||||
|
|
||||||
|
def __init__(self, language, forced, page_link, download_link, file_id, release_names, uploader,
|
||||||
|
season=None, episode=None):
|
||||||
|
super().__init__(language)
|
||||||
|
language = Language.rebuild(language, forced=forced)
|
||||||
|
|
||||||
|
self.season = season
|
||||||
|
self.episode = episode
|
||||||
|
self.releases = release_names
|
||||||
|
self.release_info = ', '.join(release_names)
|
||||||
|
self.language = language
|
||||||
|
self.forced = forced
|
||||||
|
self.file_id = file_id
|
||||||
|
self.page_link = page_link
|
||||||
|
self.download_link = download_link
|
||||||
|
self.uploader = uploader
|
||||||
|
self.matches = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
return self.file_id
|
||||||
|
|
||||||
|
def get_matches(self, video):
|
||||||
|
matches = set()
|
||||||
|
|
||||||
|
# handle movies and series separately
|
||||||
|
if isinstance(video, Episode):
|
||||||
|
# series
|
||||||
|
matches.add('series')
|
||||||
|
# season
|
||||||
|
if video.season == self.season:
|
||||||
|
matches.add('season')
|
||||||
|
# episode
|
||||||
|
if video.episode == self.episode:
|
||||||
|
matches.add('episode')
|
||||||
|
# imdb
|
||||||
|
matches.add('series_imdb_id')
|
||||||
|
else:
|
||||||
|
# title
|
||||||
|
matches.add('title')
|
||||||
|
# imdb
|
||||||
|
matches.add('imdb_id')
|
||||||
|
|
||||||
|
utils.update_matches(matches, video, self.release_info)
|
||||||
|
|
||||||
|
self.matches = matches
|
||||||
|
|
||||||
|
return matches
|
||||||
|
|
||||||
|
|
||||||
|
class LegendasNetProvider(ProviderRetryMixin, Provider):
|
||||||
|
"""Legendas.Net Provider"""
|
||||||
|
server_hostname = 'legendas.net/api'
|
||||||
|
|
||||||
|
languages = {Language('por', 'BR')}
|
||||||
|
video_types = (Episode, Movie)
|
||||||
|
|
||||||
|
def __init__(self, username, password):
|
||||||
|
self.session = Session()
|
||||||
|
self.session.headers = {'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")}
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.access_token = None
|
||||||
|
self.video = None
|
||||||
|
self._started = None
|
||||||
|
self.login()
|
||||||
|
|
||||||
|
def login(self):
|
||||||
|
headersList = {
|
||||||
|
"Accept": "*/*",
|
||||||
|
"User-Agent": self.session.headers['User-Agent'],
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = json.dumps({
|
||||||
|
"email": self.username,
|
||||||
|
"password": self.password
|
||||||
|
})
|
||||||
|
|
||||||
|
response = self.session.request("POST", self.server_url() + 'login', data=payload, headers=headersList)
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise ConfigurationError('Failed to login and retrieve access token')
|
||||||
|
self.access_token = response.json().get('access_token')
|
||||||
|
if not self.access_token:
|
||||||
|
raise ConfigurationError('Access token not found in login response')
|
||||||
|
self.session.headers.update({'Authorization': f'Bearer {self.access_token}'})
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self._started = time.time()
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
self.session.close()
|
||||||
|
|
||||||
|
def server_url(self):
|
||||||
|
return f'https://{self.server_hostname}/v1/'
|
||||||
|
|
||||||
|
def query(self, languages, video):
|
||||||
|
self.video = video
|
||||||
|
|
||||||
|
# query the server
|
||||||
|
if isinstance(self.video, Episode):
|
||||||
|
res = self.retry(
|
||||||
|
lambda: self.session.get(self.server_url() + 'search/tv',
|
||||||
|
json={
|
||||||
|
'name': video.series,
|
||||||
|
'page': 1,
|
||||||
|
'per_page': 25,
|
||||||
|
'tv_episode': video.episode,
|
||||||
|
'tv_season': video.season,
|
||||||
|
'imdb_id': video.series_imdb_id
|
||||||
|
},
|
||||||
|
headers={'Content-Type': 'application/json'},
|
||||||
|
timeout=30),
|
||||||
|
amount=retry_amount,
|
||||||
|
retry_timeout=retry_timeout
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
res = self.retry(
|
||||||
|
lambda: self.session.get(self.server_url() + 'search/movie',
|
||||||
|
json={
|
||||||
|
'name': video.title,
|
||||||
|
'page': 1,
|
||||||
|
'per_page': 25,
|
||||||
|
'imdb_id': video.imdb_id
|
||||||
|
},
|
||||||
|
headers={'Content-Type': 'application/json'},
|
||||||
|
timeout=30),
|
||||||
|
amount=retry_amount,
|
||||||
|
retry_timeout=retry_timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if res.status_code == 404:
|
||||||
|
logger.error(f"Endpoint not found: {res.url}")
|
||||||
|
raise ProviderError("Endpoint not found")
|
||||||
|
elif res.status_code == 429:
|
||||||
|
raise APIThrottled("Too many requests")
|
||||||
|
elif res.status_code == 403:
|
||||||
|
raise ConfigurationError("Invalid access token")
|
||||||
|
elif res.status_code != 200:
|
||||||
|
res.raise_for_status()
|
||||||
|
|
||||||
|
subtitles = []
|
||||||
|
|
||||||
|
result = res.json()
|
||||||
|
|
||||||
|
if ('success' in result and not result['success']) or ('status' in result and not result['status']):
|
||||||
|
logger.debug(result["error"])
|
||||||
|
return []
|
||||||
|
|
||||||
|
if isinstance(self.video, Episode):
|
||||||
|
if len(result['tv_shows']):
|
||||||
|
for item in result['tv_shows']:
|
||||||
|
subtitle = LegendasNetSubtitle(
|
||||||
|
language=Language('por', 'BR'),
|
||||||
|
forced=self._is_forced(item),
|
||||||
|
page_link=f"https://legendas.net/tv_legenda?movie_id={result['tv_shows'][0]['tmdb_id']}&"
|
||||||
|
f"legenda_id={item['id']}",
|
||||||
|
download_link=item['path'],
|
||||||
|
file_id=item['id'],
|
||||||
|
release_names=[item.get('release_name', '')],
|
||||||
|
uploader=item['uploader'],
|
||||||
|
season=item.get('season', ''),
|
||||||
|
episode=item.get('episode', '')
|
||||||
|
)
|
||||||
|
subtitle.get_matches(self.video)
|
||||||
|
if subtitle.language in languages:
|
||||||
|
subtitles.append(subtitle)
|
||||||
|
else:
|
||||||
|
if len(result['movies']):
|
||||||
|
for item in result['movies']:
|
||||||
|
subtitle = LegendasNetSubtitle(
|
||||||
|
language=Language('por', 'BR'),
|
||||||
|
forced=self._is_forced(item),
|
||||||
|
page_link=f"https://legendas.net/legenda?movie_id={result['movies'][0]['tmdb_id']}&"
|
||||||
|
f"legenda_id={item['id']}",
|
||||||
|
download_link=item['path'],
|
||||||
|
file_id=item['id'],
|
||||||
|
release_names=[item.get('release_name', '')],
|
||||||
|
uploader=item['uploader'],
|
||||||
|
season=None,
|
||||||
|
episode=None
|
||||||
|
)
|
||||||
|
subtitle.get_matches(self.video)
|
||||||
|
if subtitle.language in languages:
|
||||||
|
subtitles.append(subtitle)
|
||||||
|
|
||||||
|
return subtitles
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _is_forced(item):
|
||||||
|
forced_tags = ['forced', 'foreign']
|
||||||
|
for tag in forced_tags:
|
||||||
|
if tag in item.get('comment', '').lower():
|
||||||
|
return True
|
||||||
|
|
||||||
|
# nothing match so we consider it as normal subtitles
|
||||||
|
return False
|
||||||
|
|
||||||
|
def list_subtitles(self, video, languages):
|
||||||
|
return self.query(languages, video)
|
||||||
|
|
||||||
|
def download_subtitle(self, subtitle):
|
||||||
|
logger.debug('Downloading subtitle %r', subtitle)
|
||||||
|
download_link = urljoin("https://legendas.net", subtitle.download_link)
|
||||||
|
|
||||||
|
r = self.retry(
|
||||||
|
lambda: self.session.get(download_link, timeout=30),
|
||||||
|
amount=retry_amount,
|
||||||
|
retry_timeout=retry_timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if r.status_code == 429:
|
||||||
|
raise DownloadLimitExceeded("Daily download limit exceeded")
|
||||||
|
elif r.status_code == 403:
|
||||||
|
raise ConfigurationError("Invalid access token")
|
||||||
|
elif r.status_code != 200:
|
||||||
|
r.raise_for_status()
|
||||||
|
|
||||||
|
if not r:
|
||||||
|
logger.error(f'Could not download subtitle from {download_link}')
|
||||||
|
subtitle.content = None
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
archive_stream = io.BytesIO(r.content)
|
||||||
|
if is_zipfile(archive_stream):
|
||||||
|
archive = ZipFile(archive_stream)
|
||||||
|
for name in archive.namelist():
|
||||||
|
subtitle_content = archive.read(name)
|
||||||
|
subtitle.content = fix_line_ending(subtitle_content)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
subtitle_content = r.content
|
||||||
|
subtitle.content = fix_line_ending(subtitle_content)
|
||||||
|
return
|
@ -1,7 +1,7 @@
|
|||||||
node_modules
|
|
||||||
dist
|
|
||||||
*.local
|
*.local
|
||||||
|
*.tsbuildinfo
|
||||||
build
|
build
|
||||||
coverage
|
coverage
|
||||||
|
dev-dist
|
||||||
*.tsbuildinfo
|
dist
|
||||||
|
node_modules
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
20.13
|
@ -0,0 +1,14 @@
|
|||||||
|
module.exports = {
|
||||||
|
plugins: {
|
||||||
|
"postcss-preset-mantine": {},
|
||||||
|
"postcss-simple-vars": {
|
||||||
|
variables: {
|
||||||
|
"mantine-breakpoint-xs": "36em",
|
||||||
|
"mantine-breakpoint-sm": "48em",
|
||||||
|
"mantine-breakpoint-md": "62em",
|
||||||
|
"mantine-breakpoint-lg": "75em",
|
||||||
|
"mantine-breakpoint-xl": "88em",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
After Width: | Height: | Size: 1.7 KiB |
Before Width: | Height: | Size: 4.6 KiB |
After Width: | Height: | Size: 8.4 KiB |
After Width: | Height: | Size: 2.9 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 866 B |
After Width: | Height: | Size: 82 KiB |
After Width: | Height: | Size: 93 KiB |
After Width: | Height: | Size: 132 KiB |
After Width: | Height: | Size: 241 KiB |
@ -1,26 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "Bazarr",
|
|
||||||
"short_name": "Bazarr",
|
|
||||||
"description": "Bazarr is a companion application to Sonarr and Radarr. It manages and downloads subtitles based on your requirements.",
|
|
||||||
"start_url": "/",
|
|
||||||
"display": "standalone",
|
|
||||||
"theme_color": "#be4bdb",
|
|
||||||
"background_color": "#ffffff",
|
|
||||||
"icons": [
|
|
||||||
{
|
|
||||||
"src": "/images/android-chrome-96x96.png",
|
|
||||||
"sizes": "96x96",
|
|
||||||
"type": "image/png"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"src": "/images/apple-touch-icon.png",
|
|
||||||
"sizes": "180x180",
|
|
||||||
"type": "image/png"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"src": "/images/mstile-150x150.png",
|
|
||||||
"sizes": "150x150",
|
|
||||||
"type": "image/png"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@ -0,0 +1,9 @@
|
|||||||
|
.header {
|
||||||
|
@include light {
|
||||||
|
color: var(--mantine-color-gray-0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
color: var(--mantine-color-dark-0);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,56 @@
|
|||||||
|
.anchor {
|
||||||
|
border-color: var(--mantine-color-gray-5);
|
||||||
|
text-decoration: none;
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
border-color: var(--mantine-color-dark-5);
|
||||||
|
}
|
||||||
|
|
||||||
|
&.active {
|
||||||
|
border-left: 2px solid $color-brand-4;
|
||||||
|
background-color: var(--mantine-color-gray-1);
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
border-left: 2px solid $color-brand-8;
|
||||||
|
background-color: var(--mantine-color-dark-8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
&.hover {
|
||||||
|
background-color: var(--mantine-color-gray-0);
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
background-color: var(--mantine-color-dark-7);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.badge {
|
||||||
|
margin-left: auto;
|
||||||
|
text-decoration: none;
|
||||||
|
box-shadow: var(--mantine-shadow-xs);
|
||||||
|
}
|
||||||
|
|
||||||
|
.icon {
|
||||||
|
width: 1.4rem;
|
||||||
|
margin-right: var(--mantine-spacing-xs);
|
||||||
|
}
|
||||||
|
|
||||||
|
.nav {
|
||||||
|
background-color: var(--mantine-color-gray-2);
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
background-color: var(--mantine-color-dark-8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
.text {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
width: 100%;
|
||||||
|
color: var(--mantine-color-gray-8);
|
||||||
|
|
||||||
|
@include dark {
|
||||||
|
color: var(--mantine-color-gray-5);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,39 @@
|
|||||||
|
import { useCallback, useEffect, useState } from "react";
|
||||||
|
import { MantineColorScheme, useMantineColorScheme } from "@mantine/core";
|
||||||
|
import { useSystemSettings } from "@/apis/hooks";
|
||||||
|
|
||||||
|
const ThemeProvider = () => {
|
||||||
|
const [localScheme, setLocalScheme] = useState<MantineColorScheme | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
|
const { setColorScheme } = useMantineColorScheme();
|
||||||
|
|
||||||
|
const settings = useSystemSettings();
|
||||||
|
|
||||||
|
const settingsColorScheme = settings.data?.general
|
||||||
|
.theme as MantineColorScheme;
|
||||||
|
|
||||||
|
const setScheme = useCallback(
|
||||||
|
(colorScheme: MantineColorScheme) => {
|
||||||
|
setColorScheme(colorScheme);
|
||||||
|
},
|
||||||
|
[setColorScheme],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!settingsColorScheme) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (localScheme === settingsColorScheme) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
setScheme(settingsColorScheme);
|
||||||
|
setLocalScheme(settingsColorScheme);
|
||||||
|
}, [settingsColorScheme, setScheme, localScheme]);
|
||||||
|
|
||||||
|
return <></>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ThemeProvider;
|
@ -0,0 +1,61 @@
|
|||||||
|
import { FunctionComponent, PropsWithChildren } from "react";
|
||||||
|
import {
|
||||||
|
ActionIcon,
|
||||||
|
Badge,
|
||||||
|
Button,
|
||||||
|
createTheme,
|
||||||
|
MantineProvider,
|
||||||
|
Pagination,
|
||||||
|
} from "@mantine/core";
|
||||||
|
import ThemeLoader from "@/App/ThemeLoader";
|
||||||
|
import "@mantine/core/styles.layer.css";
|
||||||
|
import "@mantine/notifications/styles.layer.css";
|
||||||
|
import styleVars from "@/assets/_variables.module.scss";
|
||||||
|
import actionIconClasses from "@/assets/action_icon.module.scss";
|
||||||
|
import badgeClasses from "@/assets/badge.module.scss";
|
||||||
|
import buttonClasses from "@/assets/button.module.scss";
|
||||||
|
import paginationClasses from "@/assets/pagination.module.scss";
|
||||||
|
|
||||||
|
const themeProvider = createTheme({
|
||||||
|
fontFamily: "Roboto, open sans, Helvetica Neue, Helvetica, Arial, sans-serif",
|
||||||
|
colors: {
|
||||||
|
brand: [
|
||||||
|
styleVars.colorBrand0,
|
||||||
|
styleVars.colorBrand1,
|
||||||
|
styleVars.colorBrand2,
|
||||||
|
styleVars.colorBrand3,
|
||||||
|
styleVars.colorBrand4,
|
||||||
|
styleVars.colorBrand5,
|
||||||
|
styleVars.colorBrand6,
|
||||||
|
styleVars.colorBrand7,
|
||||||
|
styleVars.colorBrand8,
|
||||||
|
styleVars.colorBrand9,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
primaryColor: "brand",
|
||||||
|
components: {
|
||||||
|
ActionIcon: ActionIcon.extend({
|
||||||
|
classNames: actionIconClasses,
|
||||||
|
}),
|
||||||
|
Badge: Badge.extend({
|
||||||
|
classNames: badgeClasses,
|
||||||
|
}),
|
||||||
|
Button: Button.extend({
|
||||||
|
classNames: buttonClasses,
|
||||||
|
}),
|
||||||
|
Pagination: Pagination.extend({
|
||||||
|
classNames: paginationClasses,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const ThemeProvider: FunctionComponent<PropsWithChildren> = ({ children }) => {
|
||||||
|
return (
|
||||||
|
<MantineProvider theme={themeProvider} defaultColorScheme="auto">
|
||||||
|
<ThemeLoader />
|
||||||
|
{children}
|
||||||
|
</MantineProvider>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ThemeProvider;
|
@ -1,87 +0,0 @@
|
|||||||
import { useSystemSettings } from "@/apis/hooks";
|
|
||||||
import {
|
|
||||||
ColorScheme,
|
|
||||||
ColorSchemeProvider,
|
|
||||||
createEmotionCache,
|
|
||||||
MantineProvider,
|
|
||||||
MantineThemeOverride,
|
|
||||||
} from "@mantine/core";
|
|
||||||
import { useColorScheme } from "@mantine/hooks";
|
|
||||||
import {
|
|
||||||
FunctionComponent,
|
|
||||||
PropsWithChildren,
|
|
||||||
useCallback,
|
|
||||||
useEffect,
|
|
||||||
useState,
|
|
||||||
} from "react";
|
|
||||||
|
|
||||||
const theme: MantineThemeOverride = {
|
|
||||||
fontFamily: "Roboto, open sans, Helvetica Neue, Helvetica, Arial, sans-serif",
|
|
||||||
colors: {
|
|
||||||
brand: [
|
|
||||||
"#F8F0FC",
|
|
||||||
"#F3D9FA",
|
|
||||||
"#EEBEFA",
|
|
||||||
"#E599F7",
|
|
||||||
"#DA77F2",
|
|
||||||
"#CC5DE8",
|
|
||||||
"#BE4BDB",
|
|
||||||
"#AE3EC9",
|
|
||||||
"#9C36B5",
|
|
||||||
"#862E9C",
|
|
||||||
],
|
|
||||||
},
|
|
||||||
primaryColor: "brand",
|
|
||||||
};
|
|
||||||
|
|
||||||
function useAutoColorScheme() {
|
|
||||||
const settings = useSystemSettings();
|
|
||||||
const settingsColorScheme = settings.data?.general.theme;
|
|
||||||
|
|
||||||
let preferredColorScheme: ColorScheme = useColorScheme();
|
|
||||||
switch (settingsColorScheme) {
|
|
||||||
case "light":
|
|
||||||
preferredColorScheme = "light" as ColorScheme;
|
|
||||||
break;
|
|
||||||
case "dark":
|
|
||||||
preferredColorScheme = "dark" as ColorScheme;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
const [colorScheme, setColorScheme] = useState(preferredColorScheme);
|
|
||||||
|
|
||||||
// automatically switch dark/light theme
|
|
||||||
useEffect(() => {
|
|
||||||
setColorScheme(preferredColorScheme);
|
|
||||||
}, [preferredColorScheme]);
|
|
||||||
|
|
||||||
const toggleColorScheme = useCallback((value?: ColorScheme) => {
|
|
||||||
setColorScheme((scheme) => value || (scheme === "dark" ? "light" : "dark"));
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return { colorScheme, setColorScheme, toggleColorScheme };
|
|
||||||
}
|
|
||||||
|
|
||||||
const emotionCache = createEmotionCache({ key: "bazarr" });
|
|
||||||
|
|
||||||
const ThemeProvider: FunctionComponent<PropsWithChildren> = ({ children }) => {
|
|
||||||
const { colorScheme, toggleColorScheme } = useAutoColorScheme();
|
|
||||||
|
|
||||||
return (
|
|
||||||
<ColorSchemeProvider
|
|
||||||
colorScheme={colorScheme}
|
|
||||||
toggleColorScheme={toggleColorScheme}
|
|
||||||
>
|
|
||||||
<MantineProvider
|
|
||||||
withGlobalStyles
|
|
||||||
withNormalizeCSS
|
|
||||||
theme={{ colorScheme, ...theme }}
|
|
||||||
emotionCache={emotionCache}
|
|
||||||
>
|
|
||||||
{children}
|
|
||||||
</MantineProvider>
|
|
||||||
</ColorSchemeProvider>
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
export default ThemeProvider;
|
|
@ -1,23 +1,19 @@
|
|||||||
import { useQuery } from "react-query";
|
import { useQuery } from "@tanstack/react-query";
|
||||||
import { QueryKeys } from "../queries/keys";
|
import { QueryKeys } from "@/apis/queries/keys";
|
||||||
import api from "../raw";
|
import api from "@/apis/raw";
|
||||||
|
|
||||||
export function useLanguages(history?: boolean) {
|
export function useLanguages(history?: boolean) {
|
||||||
return useQuery(
|
return useQuery({
|
||||||
[QueryKeys.System, QueryKeys.Languages, history ?? false],
|
queryKey: [QueryKeys.System, QueryKeys.Languages, history ?? false],
|
||||||
() => api.system.languages(history),
|
queryFn: () => api.system.languages(history),
|
||||||
{
|
staleTime: Infinity,
|
||||||
staleTime: Infinity,
|
});
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function useLanguageProfiles() {
|
export function useLanguageProfiles() {
|
||||||
return useQuery(
|
return useQuery({
|
||||||
[QueryKeys.System, QueryKeys.LanguagesProfiles],
|
queryKey: [QueryKeys.System, QueryKeys.LanguagesProfiles],
|
||||||
() => api.system.languagesProfileList(),
|
queryFn: () => api.system.languagesProfileList(),
|
||||||
{
|
staleTime: Infinity,
|
||||||
staleTime: Infinity,
|
});
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|