Fixed scan disk function to not use cached ffprobe result and force a refresh of the cache. #1434

pull/1439/head
morpheus65535 4 years ago
parent a39a9e8bd5
commit a3d0e1d192

@ -24,8 +24,8 @@ def _handle_alpha3(detected_language: dict):
return alpha3
def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=None):
data = parse_video_metadata(file, file_size, episode_file_id, movie_file_id)
def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=None, use_cache=True):
data = parse_video_metadata(file, file_size, episode_file_id, movie_file_id, use_cache=use_cache)
subtitles_list = []
if data["ffprobe"] and "subtitle" in data["ffprobe"]:
@ -64,7 +64,7 @@ def embedded_subs_reader(file, file_size, episode_file_id=None, movie_file_id=No
return subtitles_list
def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=None):
def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=None, use_cache=True):
# Define default data keys value
data = {
"ffprobe": {},
@ -73,32 +73,33 @@ def parse_video_metadata(file, file_size, episode_file_id=None, movie_file_id=No
"file_size": file_size,
}
# Get the actual cache value form database
if episode_file_id:
cache_key = TableEpisodes.select(TableEpisodes.ffprobe_cache)\
.where((TableEpisodes.episode_file_id == episode_file_id) and
(TableEpisodes.file_size == file_size))\
.dicts()\
.get()
elif movie_file_id:
cache_key = TableMovies.select(TableMovies.ffprobe_cache)\
.where(TableMovies.movie_file_id == movie_file_id and
TableMovies.file_size == file_size)\
.dicts()\
.get()
else:
cache_key = None
# check if we have a value for that cache key
try:
# Unpickle ffprobe cache
cached_value = pickle.loads(cache_key['ffprobe_cache'])
except:
pass
else:
# Check if file size and file id matches and if so, we return the cached value
if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]:
return cached_value
if use_cache:
# Get the actual cache value form database
if episode_file_id:
cache_key = TableEpisodes.select(TableEpisodes.ffprobe_cache)\
.where((TableEpisodes.episode_file_id == episode_file_id) and
(TableEpisodes.file_size == file_size))\
.dicts()\
.get()
elif movie_file_id:
cache_key = TableMovies.select(TableMovies.ffprobe_cache)\
.where(TableMovies.movie_file_id == movie_file_id and
TableMovies.file_size == file_size)\
.dicts()\
.get()
else:
cache_key = None
# check if we have a value for that cache key
try:
# Unpickle ffprobe cache
cached_value = pickle.loads(cache_key['ffprobe_cache'])
except:
pass
else:
# Check if file size and file id matches and if so, we return the cached value
if cached_value['file_size'] == file_size and cached_value['file_id'] in [episode_file_id, movie_file_id]:
return cached_value
# if not, we retrieve the metadata from the file
from utils import get_binary

@ -26,7 +26,7 @@ global hi_regex
hi_regex = re.compile(r'[*¶♫♪].{3,}[*¶♫♪]|[\[\(\{].{3,}[\]\)\}](?<!{\\an\d})')
def store_subtitles(original_path, reversed_path):
def store_subtitles(original_path, reversed_path, use_cache=True):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(reversed_path):
@ -39,7 +39,8 @@ def store_subtitles(original_path, reversed_path):
.get()
subtitle_languages = embedded_subs_reader(reversed_path,
file_size=item['file_size'],
episode_file_id=item['episode_file_id'])
episode_file_id=item['episode_file_id'],
use_cache=use_cache)
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
try:
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
@ -121,7 +122,7 @@ def store_subtitles(original_path, reversed_path):
return actual_subtitles
def store_subtitles_movie(original_path, reversed_path):
def store_subtitles_movie(original_path, reversed_path, use_cache=True):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(reversed_path):
@ -134,7 +135,8 @@ def store_subtitles_movie(original_path, reversed_path):
.get()
subtitle_languages = embedded_subs_reader(reversed_path,
file_size=item['file_size'],
movie_file_id=item['movie_file_id'])
movie_file_id=item['movie_file_id'],
use_cache=use_cache)
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
try:
if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
@ -499,7 +501,7 @@ def series_scan_subtitles(no):
for episode in episodes:
sleep()
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']))
store_subtitles(episode['path'], path_mappings.path_replace(episode['path']), use_cache=False)
def movies_scan_subtitles(no):
@ -510,7 +512,7 @@ def movies_scan_subtitles(no):
for movie in movies:
sleep()
store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']))
store_subtitles_movie(movie['path'], path_mappings.path_replace_movie(movie['path']), use_cache=False)
def get_external_subtitles_path(file, subtitle):

Loading…
Cancel
Save