diff --git a/.travis.yml b/.travis.yml index 158ab7e5a6..2ea842f132 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ cache: - node_modules before_install: - python .github/check_version.py + - sudo rm -f /etc/boto.cfg install: false script: false jobs: diff --git a/CHANGELOG.md b/CHANGELOG.md index cc057ebba1..d9e35897c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## 0.2.14 (2018-12-19) + +#### New Features +- Added provider nordicbits ([#5854](https://github.com/pymedusa/Medusa/pull/5854)) + +#### Improvements +- Change the way we calculate and check the daily search interval for providers ([#5855](https://github.com/pymedusa/Medusa/issues/5855)) +- During a backlog search, we searched for "any" cache result. And if the case, didn't attempt pulling new results from the provider. Now we search the provider when we didn't get any "candidates" from cache. ([#5816](https://github.com/pymedusa/Medusa/issues/5816)) + +#### Fixes +- Fixed double absolute numbers for anime shows where thexem sets an absolute which already exists ([#5801](https://github.com/pymedusa/Medusa/pull/5801)) +- Fixed image cache not properly created from metadata for images other then posters ([#5810](https://github.com/pymedusa/Medusa/pull/5810)) +- Fixed episode status comparison in subtitleMissedPP ([#5813](https://github.com/pymedusa/Medusa/pull/5813)) +- Fixed anidex title parsing ([#5837](https://github.com/pymedusa/Medusa/pull/5837)) +- Fixed (restore) the posibilty or configuring the default daily search search interval ([#5823](https://github.com/pymedusa/Medusa/pull/5823)) +- Fixed notifications - kodi, 'allways on' config option ([#5871](https://github.com/pymedusa/Medusa/pull/5871)) +- Fixed mis-mapped proper search interval config option of 24 hours, added 30 minutes ([#5896](https://github.com/pymedusa/Medusa/pull/5896)) +- Fixed config - search settings, test nzb client connectivity ([#5897](https://github.com/pymedusa/Medusa/pull/5897)) +- Fixed adding an episode to the my anidb list on post processing when enabled ([#5897](https://github.com/pymedusa/Medusa/pull/5897)) +- Fixed creating banner and fanart from metadata. Any metadata images in the shows folder other then the poster, will now also become visible in medusa ([#5808](https://github.com/pymedusa/Medusa/pull/5808)) + +----- + ## 0.2.13 (2018-11-21) #### Improvements diff --git a/Dockerfile b/Dockerfile index 6f9ee78aca..cf1680dcbf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM lsiobase/alpine.python:3.8 -MAINTAINER bobbysteel +MAINTAINER a10kiloham # set version label ARG BUILD_DATE diff --git a/SickBeard.py b/SickBeard.py index 68d1d2ff2c..286a33c45f 100755 --- a/SickBeard.py +++ b/SickBeard.py @@ -1,9 +1,14 @@ -#!/usr/bin/env python2.7 +#!/usr/bin/env python # -*- coding: utf-8 -* """Script for backwards compatibility.""" from __future__ import unicode_literals +import sys + from medusa.__main__ import main if __name__ == '__main__': + if sys.version_info.major == 3 and sys.version_info.minor < 5: + print('Medusa supports Python 2 from version 2.7.10 and Python 3 from version 3.5.0, exiting!') + raise Exception('Incorrect Python version. Shutting down!') main() diff --git a/medusa/__main__.py b/medusa/__main__.py index 2ec7e5b04f..43fa69c615 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -425,6 +425,7 @@ def initialize(self, console_logging=True): app.GIT_PASSWORD = check_setting_str(app.CFG, 'General', 'git_password', '', censor_log='low') app.GIT_TOKEN = check_setting_str(app.CFG, 'General', 'git_token', '', censor_log='low', encrypted=True) app.DEVELOPER = bool(check_setting_int(app.CFG, 'General', 'developer', 0)) + app.PYTHON_VERSION = check_setting_list(app.CFG, 'General', 'python_version', [], transform=int) # debugging app.DEBUG = bool(check_setting_int(app.CFG, 'General', 'debug', 0)) @@ -1077,9 +1078,12 @@ def initialize(self, console_logging=True): # Disable flag to erase cache app.SUBTITLES_ERASE_CACHE = False + # Check if we start with a different Python version since last start + python_version_changed = self.migrate_python_version() + # Check if we need to perform a restore of the cache folder Application.restore_cache_folder(app.CACHE_DIR) - cache.configure(app.CACHE_DIR) + cache.configure(app.CACHE_DIR, replace=python_version_changed) # Rebuild the censored list app_logger.rebuild_censored_list() @@ -1247,6 +1251,27 @@ def path_leaf(path): folder_path = os.path.join(cache_folder, name) helpers.remove_folder(folder_path) + @staticmethod + def migrate_python_version(): + """ + Perform some cleanups in case we switch between major Python versions. + + It's possible to switch from Python version 2 to 3 or vice versa. + In that case we might wanna run some sanity actions, to make sure everything keeps working. + + :return: True if the major Python version has changed since last start + :return type: Boolean + """ + # TODO: Leaving this here as a marking for when we merge the python3 changes. + current_version = app.PYTHON_VERSION + app.PYTHON_VERSION = list(sys.version_info)[:3] + + # Run some sanitation when switching between Python versions + if current_version and current_version[0] != app.PYTHON_VERSION[0]: + return True + + return False + @staticmethod def start_threads(): """Start application threads.""" @@ -1545,6 +1570,7 @@ def save_config(): new_config['General']['calendar_icons'] = int(app.CALENDAR_ICONS) new_config['General']['no_restart'] = int(app.NO_RESTART) new_config['General']['developer'] = int(app.DEVELOPER) + new_config['General']['python_version'] = app.PYTHON_VERSION new_config['General']['display_all_seasons'] = int(app.DISPLAY_ALL_SEASONS) new_config['General']['news_last_read'] = app.NEWS_LAST_READ new_config['General']['broken_providers'] = helpers.get_broken_providers() or app.BROKEN_PROVIDERS diff --git a/medusa/app.py b/medusa/app.py index d8b0bcd2db..5f3d6a0b5c 100644 --- a/medusa/app.py +++ b/medusa/app.py @@ -52,6 +52,7 @@ def __init__(self): # static configuration self.LOCALE = None, None self.OS_USER = None + self.PYTHON_VERSION = [] self.OPENSSL_VERSION = None self.APP_VERSION = None self.MAJOR_DB_VERSION = None diff --git a/medusa/cache.py b/medusa/cache.py index 5f07f0c840..e7c358ad4b 100644 --- a/medusa/cache.py +++ b/medusa/cache.py @@ -47,7 +47,7 @@ def release_write_lock(self): anidb_cache = make_region() -def configure(cache_dir): +def configure(cache_dir, replace=False): """Configure caches.""" # memory cache from subliminal.cache import region as subliminal_cache @@ -55,26 +55,26 @@ def configure(cache_dir): memory_cache.configure('dogpile.cache.memory', expiration_time=timedelta(hours=1)) # subliminal cache - subliminal_cache.configure('dogpile.cache.dbm', + subliminal_cache.configure('dogpile.cache.dbm', replace_existing_backend=replace, expiration_time=timedelta(days=30), arguments={ 'filename': os.path.join(cache_dir, 'subliminal.dbm'), 'lock_factory': MutexLock}) # application cache - cache.configure('dogpile.cache.dbm', + cache.configure('dogpile.cache.dbm', replace_existing_backend=replace, expiration_time=timedelta(days=1), arguments={'filename': os.path.join(cache_dir, 'application.dbm'), 'lock_factory': MutexLock}) # recommended series cache - recommended_series_cache.configure('dogpile.cache.dbm', + recommended_series_cache.configure('dogpile.cache.dbm', replace_existing_backend=replace, expiration_time=timedelta(days=7), arguments={'filename': os.path.join(cache_dir, 'recommended.dbm'), 'lock_factory': MutexLock}) # anidb (adba) series cache - anidb_cache.configure('dogpile.cache.dbm', + anidb_cache.configure('dogpile.cache.dbm', replace_existing_backend=replace, expiration_time=timedelta(days=3), arguments={'filename': os.path.join(cache_dir, 'anidb.dbm'), 'lock_factory': MutexLock}) diff --git a/medusa/common.py b/medusa/common.py index 93747e7fc6..a460410dff 100644 --- a/medusa/common.py +++ b/medusa/common.py @@ -39,7 +39,7 @@ long = int INSTANCE_ID = str(uuid.uuid1()) -VERSION = '0.2.13' +VERSION = '0.2.14' USER_AGENT = 'Medusa/{version} ({system}; {release}; {instance})'.format( version=VERSION, system=platform.system(), release=platform.release(), instance=INSTANCE_ID) diff --git a/medusa/helpers/__init__.py b/medusa/helpers/__init__.py index 2d2a9332f9..2fd98b672c 100644 --- a/medusa/helpers/__init__.py +++ b/medusa/helpers/__init__.py @@ -404,7 +404,7 @@ def move_and_symlink_file(src_file, dest_file): u'Failed to create symlink of {source} at {destination}.' u' Error: {error!r}', { 'source': src_file, - 'dest': dest_file, + 'destination': dest_file, 'error': msg, } ) @@ -413,7 +413,7 @@ def move_and_symlink_file(src_file, dest_file): u'Failed to create symlink of {source} at {destination}.' u' Error: {error!r}. Copying instead', { 'source': src_file, - 'dest': dest_file, + 'destination': dest_file, 'error': msg, } ) diff --git a/medusa/image_cache.py b/medusa/image_cache.py index 709fc9c844..30010d7455 100644 --- a/medusa/image_cache.py +++ b/medusa/image_cache.py @@ -301,8 +301,8 @@ def fill_cache(series_obj): log.debug('Checking {provider.name} metadata for {img}', {'provider': provider, 'img': IMAGE_TYPES[img_type]}) - if os.path.isfile(provider.get_poster_path(series_obj)): - path = provider.get_poster_path(series_obj) + path = provider.get_image_path(series_obj, img_type) + if os.path.isfile(path): filename = os.path.abspath(path) file_type = which_type(filename) diff --git a/medusa/metadata/generic.py b/medusa/metadata/generic.py index dc1ab5b004..7c232f64ee 100644 --- a/medusa/metadata/generic.py +++ b/medusa/metadata/generic.py @@ -35,6 +35,12 @@ log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) +BANNER = 1 +POSTER = 2 +BANNER_THUMB = 3 +POSTER_THUMB = 4 +FANART = 5 + class GenericMetadata(object): """ @@ -173,6 +179,16 @@ def get_poster_path(self, show_obj): def get_banner_path(self, show_obj): return os.path.join(show_obj.location, self.banner_name) + def get_image_path(self, show_obj, image_type): + """Based on the image_type (banner, poster, fanart) call the correct method, and return the path.""" + banner_path = { + BANNER: self.get_banner_path, + POSTER: self.get_poster_path, + FANART: self.get_fanart_path + } + if banner_path.get(image_type): + return banner_path[image_type](show_obj) + @staticmethod def get_episode_thumb_path(ep_obj): """ diff --git a/medusa/name_parser/rules/rules.py b/medusa/name_parser/rules/rules.py index fe34920a80..a69ba7f44a 100644 --- a/medusa/name_parser/rules/rules.py +++ b/medusa/name_parser/rules/rules.py @@ -397,6 +397,10 @@ def when(self, matches, context): :type context: dict :return: """ + # Don't add additional alias if we already have one from the previous rules + if matches.named('alias'): + return + fileparts = matches.markers.named('path') for filepart in marker_sorted(fileparts, matches): title = matches.range(filepart.start, filepart.end, predicate=lambda match: match.name == 'title', index=0) diff --git a/medusa/notifiers/trakt.py b/medusa/notifiers/trakt.py index 2ebf252510..ced74b97b2 100644 --- a/medusa/notifiers/trakt.py +++ b/medusa/notifiers/trakt.py @@ -92,7 +92,7 @@ def update_library(ep_obj): trakt_api.request('sync/collection', data, method='POST') except (TokenExpiredException, TraktException, AuthException) as error: - log.debug('Unable to update Trakt: {0}', error.message) + log.debug('Unable to update Trakt: {0!r}', error) @staticmethod def update_watchlist(show_obj=None, s=None, e=None, data_show=None, data_episode=None, update='add'): @@ -176,7 +176,7 @@ def update_watchlist(show_obj=None, s=None, e=None, data_show=None, data_episode trakt_api.request(trakt_url, data, method='POST') except (TokenExpiredException, TraktException, AuthException) as error: - log.debug('Unable to update Trakt watchlist: {0}', error.message) + log.debug('Unable to update Trakt watchlist: {0!r}', error) return False return True @@ -244,5 +244,5 @@ def test_notify(username, blacklist_name=None): else: return 'Test notice sent successfully to Trakt' except (TokenExpiredException, TraktException, AuthException) as error: - log.warning('Unable to test TRAKT: {0}', error.message) - return 'Test notice failed to Trakt: {0}'.format(error.message) + log.warning('Unable to test TRAKT: {0!r}', error) + return 'Test notice failed to Trakt: {0!r}'.format(error) diff --git a/medusa/post_processor.py b/medusa/post_processor.py index 095cd84628..c1d4d9cb56 100644 --- a/medusa/post_processor.py +++ b/medusa/post_processor.py @@ -550,7 +550,7 @@ def _add_to_anidb_mylist(self, file_path): self.log(u'Adding the file to the anidb mylist', logger.DEBUG) try: - self.anidbEpisode.add_to_mylist(status=1) # status = 1 sets the status of the file to "internal HDD" + self.anidbEpisode.add_to_mylist(state=1) # state = 1 sets the state of the file to "internal HDD" except Exception as e: self.log(u'Exception message: {0!r}'.format(e)) diff --git a/medusa/providers/__init__.py b/medusa/providers/__init__.py index 726d43b2a0..c9ea575a0e 100644 --- a/medusa/providers/__init__.py +++ b/medusa/providers/__init__.py @@ -40,6 +40,7 @@ nebulance, newpct, norbits, + nordicbits, nyaa, pretome, privatehd, @@ -69,7 +70,7 @@ 'speedcd', 'nyaa', 'torrentbytes', 'torrent9', 'morethantv', 'tokyotoshokan', 'iptorrents', 'hebits', 'alpharatio', 'sdbits', 'shazbat', 'rarbg', 'tntvillage', 'binsearch', 'xthor', 'abnormal', 'scenetime', 'nebulance', 'tvchaosuk', 'bitcannon', 'torrentz2', 'pretome', 'anizb', - 'hdspace', 'newpct', 'danishbits', 'limetorrents', 'norbits', 'bithdtv', + 'hdspace', 'newpct', 'nordicbits', 'danishbits', 'limetorrents', 'norbits', 'bithdtv', 'zooqle', 'animebytes', 'animetorrents', 'horriblesubs', 'anidex', 'shanaproject', 'torrenting', 'yggtorrent', 'elitetracker', 'archetorrent', 'privatehd', 'cinemaz', 'avistaz', 'bjshare', 'btdb' ] diff --git a/medusa/providers/generic_provider.py b/medusa/providers/generic_provider.py index 8782394384..65890cddfb 100644 --- a/medusa/providers/generic_provider.py +++ b/medusa/providers/generic_provider.py @@ -238,9 +238,33 @@ def remove_duplicate_mappings(items, pk='link'): )) ) + def search_results_in_cache(self, episodes): + """ + Search episodes based on param in cache. + + Search the cache (db) for this provider + :param episodes: List of Episode objects + + :return: A dict of search results, ordered by episode number + """ + return self.cache.find_episodes(episodes) + def find_search_results(self, series, episodes, search_mode, forced_search=False, download_current_quality=False, manual_search=False, manual_search_type='episode'): - """Search episodes based on param.""" + """ + Search episodes based on param. + + Search the provider using http queries. + :param series: Series object + :param episodes: List of Episode objects + :param search_mode: 'eponly' or 'sponly' + :param forced_search: Flag if the search was triggered by a forced search + :param download_current_quality: Flag if we want to include an already downloaded quality in the new search + :param manual_search: Flag if the search was triggered by a manual search + :param manual_search_type: How the manual search was started: For example an 'episode' or 'season' + + :return: A dict of search results, ordered by episode number. + """ self._check_auth() self.series = series @@ -249,18 +273,6 @@ def find_search_results(self, series, episodes, search_mode, forced_search=False season_search = (len(episodes) > 1 or manual_search_type == 'season') and search_mode == 'sponly' for episode in episodes: - if not manual_search: - cache_results = self.cache.find_needed_episodes( - episode, forced_search=forced_search, down_cur_quality=download_current_quality - ) - if cache_results: - for episode_no in cache_results: - if episode_no not in results: - results[episode_no] = cache_results[episode_no] - else: - results[episode_no] += cache_results[episode_no] - continue - search_strings = [] if season_search: search_strings = self._get_season_search_strings(episode) @@ -471,6 +483,7 @@ def find_search_results(self, series, episodes, search_mode, forced_search=False ', '.join(map(str, search_result.parsed_result.episode_numbers)), search_result.name, search_result.url) + if episode_number not in results: results[episode_number] = [search_result] else: diff --git a/medusa/providers/nzb/anizb.py b/medusa/providers/nzb/anizb.py index 1a0bb30e7b..a5d7fc8f53 100644 --- a/medusa/providers/nzb/anizb.py +++ b/medusa/providers/nzb/anizb.py @@ -8,7 +8,7 @@ from medusa import tv from medusa.bs4_parser import BS4Parser -from medusa.helper.common import try_int +from medusa.helper.common import convert_size, try_int from medusa.logger.adapters.style import BraceAdapter from medusa.providers.nzb.nzb_provider import NZBProvider @@ -83,7 +83,7 @@ def search(self, search_strings, age=0, ep_obj=None, **kwargs): continue # description = item.find('description') - size = try_int(item.enclosure.get('length', -1)) + size = convert_size(item.enclosure.get('length'), default=-1) item = { 'title': title, diff --git a/medusa/providers/torrent/__init__.py b/medusa/providers/torrent/__init__.py index ba268643d2..2baa1b3caf 100644 --- a/medusa/providers/torrent/__init__.py +++ b/medusa/providers/torrent/__init__.py @@ -24,6 +24,7 @@ morethantv, nebulance, newpct, + nordicbits, pretome, privatehd, scenetime, @@ -66,8 +67,8 @@ __all__ = [ 'abnormal', 'alpharatio', 'animebytes', 'archetorrent', 'bithdtv', 'torrent9', 'danishbits', - 'hdspace', 'hdtorrents', 'iptorrents', 'limetorrents', 'morethantv', 'torznab', - 'newpct', 'pretome', 'sdbits', 'scenetime', 'speedcd', 'thepiratebay', 'tntvillage', 'tokyotoshokan', + 'hdspace', 'hdtorrents', 'iptorrents', 'limetorrents', 'morethantv', 'torznab', 'newpct', 'nordicbits', + 'pretome', 'sdbits', 'scenetime', 'speedcd', 'thepiratebay', 'tntvillage', 'tokyotoshokan', 'torrentbytes', 'torrentleech', 'nebulance', 'tvchaosuk', 'xthor', 'zooqle', 'bitcannon', 'btn', 'hdbits', 'norbits', 'rarbg', 'torrentday', 'nyaa', 'rsstorrent', 'shazbat', 'hebits', 'torrentz2', 'animetorrents', 'horriblesubs', 'anidex', 'shanaproject', 'torrenting', 'yggtorrent', diff --git a/medusa/providers/torrent/html/anidex.py b/medusa/providers/torrent/html/anidex.py index 7940fbd907..266d4efcb0 100644 --- a/medusa/providers/torrent/html/anidex.py +++ b/medusa/providers/torrent/html/anidex.py @@ -112,7 +112,7 @@ def parse(self, data, mode): cells = row.find_all('td') try: - title = cells[labels.index('Filename')].span.get_text() + title = cells[labels.index('Filename')].span.get('title') download_url = cells[labels.index('Torrent')].a.get('href') if not all([title, download_url]): continue diff --git a/medusa/providers/torrent/html/limetorrents.py b/medusa/providers/torrent/html/limetorrents.py index c64115c238..d7382bfff1 100644 --- a/medusa/providers/torrent/html/limetorrents.py +++ b/medusa/providers/torrent/html/limetorrents.py @@ -36,7 +36,7 @@ def __init__(self): self.public = True # URLs - self.url = 'https://www.limetorrents.cc' + self.url = 'https://www.limetorrents.info' self.urls = { 'update': urljoin(self.url, '/post/updatestats.php'), 'search': urljoin(self.url, '/search/tv/{query}/'), diff --git a/medusa/providers/torrent/html/nordicbits.py b/medusa/providers/torrent/html/nordicbits.py new file mode 100644 index 0000000000..002c1e2237 --- /dev/null +++ b/medusa/providers/torrent/html/nordicbits.py @@ -0,0 +1,206 @@ +# coding=utf-8 + +"""Provider code for NordicBits.""" + +from __future__ import unicode_literals + +import logging + +from medusa import tv +from medusa.bs4_parser import BS4Parser +from medusa.helper.common import ( + convert_size, + try_int, +) +from medusa.logger.adapters.style import BraceAdapter +from medusa.providers.torrent.torrent_provider import TorrentProvider + +from requests.compat import urljoin +from requests.utils import dict_from_cookiejar + +log = BraceAdapter(logging.getLogger(__name__)) +log.logger.addHandler(logging.NullHandler()) + + +class NordicBitsProvider(TorrentProvider): + """NordicBits Torrent provider.""" + + def __init__(self): + """Initialize the class.""" + super(NordicBitsProvider, self).__init__('NordicBits') + + # Credentials + self.username = None + self.password = None + + # URLs + self.url = 'https://nordicb.org' + self.urls = { + 'login': urljoin(self.url, 'takelogin.php'), + 'search': urljoin(self.url, 'browse.php'), + } + + # Proper Strings + self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP'] + + # Miscellaneous Options + self.freeleech = False + + # Torrent Stats + self.minseed = None + self.minleech = None + + # Cache + self.cache = tv.Cache(self) + + def search(self, search_strings, **kwargs): + """ + Search a provider and parse the results. + + :param search_strings: A dict with mode (key) and the search value (value) + :returns: A list of search results (structure) + """ + results = [] + if not self.login(): + return results + + search_params = { + 'cats2[]': [48, 57, 66, 11, 7, 5, 30, 31, 32], + 'searchin': 'title', + 'incldead': 0 # Fixed to not include dead torrents for now + } + + if self.freeleech: + search_params['only_free'] = 1 + + for mode in search_strings: + log.debug('Search mode: {0}', mode) + + for search_string in search_strings[mode]: + + if mode != 'RSS': + search_params['search'] = search_string + log.debug('Search string: {search}', + {'search': search_string}) + + response = self.session.get(self.urls['search'], params=search_params) + if not response or not response.text: + log.debug('No data returned from provider') + continue + + results += self.parse(response.text, mode) + + return results + + def parse(self, data, mode): + """ + Parse search results for items. + + :param data: The raw response from a search + :param mode: The current mode used to search, e.g. RSS + + :return: A list of items found + """ + def get_label_title(label): + """Get table row header labels.""" + if label.get_text(): + return label.get_text(strip=True) + if label.a and label.a.get_text(strip=True): + return label.a.get_text(strip=True) + if label.img: + return label.img.get('title') + + items = [] + if '

Nothing found!

' in data: + log.debug('Data returned from provider does not contain any torrents') + return items + + with BS4Parser(data, 'html.parser') as html: + torrent_table = html.find('table', width='100%') + torrent_rows = torrent_table('tr') if torrent_table else [] + + # Continue only if at least one release is found + if len(torrent_rows) < 1: + log.debug('Data returned from provider does not contain any torrents') + return items + + # Cat., Active, Name, Download, Added, Size, Uploader, Seeders, Leechers + labels = [get_label_title(label) for label in + torrent_rows[0]('td')] + + for row in torrent_rows[1:]: + try: + cells = row.findChildren('td')[:len(labels)] + if len(cells) < len(labels): + continue + + title = cells[labels.index('Name')].a + title = title.get_text(strip=True) if title else None + link = cells[labels.index('Download')].a + link = link.get('href') if link else None + download_url = urljoin(self.url, link) if link else None + if not all([title, download_url]): + continue + + seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True)) + leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True)) + + # Filter unseeded torrent + if seeders < min(self.minseed, 1): + if mode != 'RSS': + log.debug("Discarding torrent because it doesn't meet the" + ' minimum seeders: {0}. Seeders: {1}', + title, seeders) + continue + + torrent_size, _, unit = cells[labels.index('Size')].contents + size = convert_size('{0} {1}'.format(torrent_size, unit)) or -1 + + pubdate_raw = cells[labels.index('Added')].get_text() + pubdate = self.parse_pubdate(pubdate_raw) + + item = { + 'title': title, + 'link': download_url, + 'size': size, + 'seeders': seeders, + 'leechers': leechers, + 'pubdate': pubdate, + } + if mode != 'RSS': + log.debug('Found result: {0} with {1} seeders and {2} leechers', + title, seeders, leechers) + + items.append(item) + except (AttributeError, TypeError, KeyError, ValueError, IndexError): + log.exception('Failed parsing provider.') + + return items + + def login(self): + """Login method used for logging in before doing search and torrent downloads.""" + cookies = dict_from_cookiejar(self.session.cookies) + if any(cookies.values()) and cookies.get('uid'): + return True + + login_params = { + 'username': self.username, + 'password': self.password, + 'use_ssl': 1, + 'perm_ssl': 1 + } + + response = self.session.post(self.urls['login'], data=login_params) + + if not response or not response.text: + log.warning('Unable to connect to provider') + return False + + if 'Welcome back' in response.text: + return True + else: + log.warning('Invalid username or password. Check your settings') + return False + + +provider = NordicBitsProvider() diff --git a/medusa/providers/torrent/json/animebytes.py b/medusa/providers/torrent/json/animebytes.py index 2de8e65e56..062f2ffb06 100644 --- a/medusa/providers/torrent/json/animebytes.py +++ b/medusa/providers/torrent/json/animebytes.py @@ -8,6 +8,7 @@ import re from medusa import tv +from medusa.helper.common import convert_size from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider @@ -141,7 +142,8 @@ def parse(self, data, mode): # Hack for the h264 10bit stuff properties_string = properties_string.replace('h26410-bit', 'h264|hi10p') properties = properties_string.split('|') - if not all(properties): + download_url = row.get('Link') + if not (download_url or all(properties)): continue # Get rid of freeleech from properties @@ -245,10 +247,12 @@ def parse(self, data, mode): title, seeders) continue + size = convert_size(row.get('Size'), default=-1) + item = { 'title': title, - 'link': row.get('Link'), - 'size': row.get('Size'), + 'link': download_url, + 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': pubdate, diff --git a/medusa/providers/torrent/json/btn.py b/medusa/providers/torrent/json/btn.py index feb42ea434..1a1efad61a 100644 --- a/medusa/providers/torrent/json/btn.py +++ b/medusa/providers/torrent/json/btn.py @@ -16,7 +16,7 @@ tv, ) from medusa.common import cpu_presets -from medusa.helper.common import episode_num +from medusa.helper.common import convert_size, episode_num from medusa.indexers.indexer_config import INDEXER_TVDBV2 from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider @@ -151,7 +151,7 @@ def parse(self, data, mode): title, seeders) continue - size = row.get('Size') or -1 + size = convert_size(row.get('Size'), default=-1) pubdate_raw = row.get('Time') pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True) diff --git a/medusa/providers/torrent/json/danishbits.py b/medusa/providers/torrent/json/danishbits.py index 8918cdab7b..64e3ee837b 100644 --- a/medusa/providers/torrent/json/danishbits.py +++ b/medusa/providers/torrent/json/danishbits.py @@ -80,7 +80,7 @@ def search(self, search_strings, age=0, ep_obj=None, **kwargs): search_params['search'] = search_string response = self.session.get(self.urls['search'], params=search_params) - if not response: + if not response or not response.content: log.debug('No data returned from provider') continue @@ -89,7 +89,7 @@ def search(self, search_strings, age=0, ep_obj=None, **kwargs): except ValueError as e: log.warning( 'Could not decode the response as json for the result,' - 'searching {provider} with error {err_msg}', + ' searching {provider} with error {err_msg}', provider=self.name, err_msg=e ) diff --git a/medusa/providers/torrent/json/hdbits.py b/medusa/providers/torrent/json/hdbits.py index f7f7959e0e..df290babaf 100644 --- a/medusa/providers/torrent/json/hdbits.py +++ b/medusa/providers/torrent/json/hdbits.py @@ -7,6 +7,7 @@ import logging from medusa import tv +from medusa.helper.common import convert_size from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider @@ -139,7 +140,7 @@ def parse(self, data, mode): title, seeders) continue - size = row.get('size') or -1 + size = convert_size(row.get('size'), default=-1) pubdate_raw = row.get('added') pubdate = self.parse_pubdate(pubdate_raw) diff --git a/medusa/providers/torrent/json/xthor.py b/medusa/providers/torrent/json/xthor.py index eadddaee24..f32a7cc4c3 100644 --- a/medusa/providers/torrent/json/xthor.py +++ b/medusa/providers/torrent/json/xthor.py @@ -7,7 +7,7 @@ import logging from medusa import tv -from medusa.common import USER_AGENT +from medusa.helper.common import convert_size from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider @@ -34,7 +34,6 @@ def __init__(self): # Proper Strings # Miscellaneous Options - self.headers.update({'User-Agent': USER_AGENT}) self.subcategories = [433, 637, 455, 639] # Torrent Stats @@ -140,7 +139,7 @@ def parse(self, data, mode): title, seeders) continue - size = row.get('size') or -1 + size = convert_size(row.get('size'), default=-1) item = { 'title': title, diff --git a/medusa/scene_exceptions.py b/medusa/scene_exceptions.py index 72174d4124..95c06becb9 100644 --- a/medusa/scene_exceptions.py +++ b/medusa/scene_exceptions.py @@ -152,6 +152,7 @@ def get_scene_exceptions_by_name(show_name): 'FROM scene_exceptions ' 'WHERE show_name = ? ORDER BY season ASC', [show_name]) + if scene_exceptions: # FIXME: Need to add additional layer indexer. return [(int(exception['indexer_id']), int(exception['season']), int(exception['indexer'])) @@ -187,7 +188,12 @@ def get_scene_exceptions_by_name(show_name): def update_scene_exceptions(series_obj, scene_exceptions, season=-1): """Update database with all show scene exceptions by indexer_id.""" - logger.info('Updating scene exceptions...') + logger.info('Updating scene exceptions for series {series.name} and ' + 'indexer {series.indexer} with id {series.series_id}', + {'series': series_obj}) + + if not all([series_obj.indexer, series_obj.series_id]): + return cache_db_con = db.DBConnection('cache.db') cache_db_con.action( diff --git a/medusa/scene_numbering.py b/medusa/scene_numbering.py index d2bf1a007b..d7038e588b 100644 --- a/medusa/scene_numbering.py +++ b/medusa/scene_numbering.py @@ -501,12 +501,22 @@ def xem_refresh(series_obj, force=False): entry[indexerApi(indexer_id).config['xem_origin']]['season'], entry[indexerApi(indexer_id).config['xem_origin']]['episode']] ]) + # Update the absolute_number from xem, but do not set it when it has already been set by tvdb. + # We want to prevent doubles and tvdb is leading in that case. cl.append([ 'UPDATE tv_episodes SET absolute_number = ? ' - 'WHERE indexer = ? AND showid = ? AND season = ? AND episode = ? AND absolute_number = 0', + 'WHERE indexer = ? AND showid = ? AND season = ? AND episode = ? AND absolute_number = 0 ' + 'AND {absolute_number} NOT IN ' + '(SELECT absolute_number ' + 'FROM tv_episodes ' + 'WHERE absolute_number = ? AND indexer = ? AND showid = ?)'.format( + absolute_number=entry[indexerApi(indexer_id).config['xem_origin']]['absolute'] + ), [entry[indexerApi(indexer_id).config['xem_origin']]['absolute'], indexer_id, series_id, entry[indexerApi(indexer_id).config['xem_origin']]['season'], - entry[indexerApi(indexer_id).config['xem_origin']]['episode']] + entry[indexerApi(indexer_id).config['xem_origin']]['episode'], + entry[indexerApi(indexer_id).config['xem_origin']]['absolute'], + indexer_id, series_id] ]) if 'scene_2' in entry: # for doubles cl.append([ diff --git a/medusa/search/core.py b/medusa/search/core.py index a39d9614f5..69f8c84d1f 100644 --- a/medusa/search/core.py +++ b/medusa/search/core.py @@ -480,10 +480,11 @@ def wanted_episodes(series_obj, from_date): return wanted -def search_for_needed_episodes(force=False): +def search_for_needed_episodes(scheduler_start_time, force=False): """Search providers for needed episodes. :param force: run the search even if no episodes are needed + :param scheduler_start_time: timestamp of the start of the search scheduler :return: list of found episodes """ show_list = app.showList @@ -520,7 +521,7 @@ def search_for_needed_episodes(force=False): threading.currentThread().name = u'{thread} :: [{provider}]'.format( thread=original_thread_name, provider=cur_provider.name ) - cur_provider.cache.update_cache() + cur_provider.cache.update_cache(scheduler_start_time) single_results = {} multi_results = [] @@ -647,6 +648,8 @@ def search_providers(series_obj, episodes, forced_search=False, down_cur_quality manual_search_results = [] multi_results = [] single_results = [] + cache_multi_results = [] + cache_single_results = [] # build name cache for show name_cache.build_name_cache(series_obj) @@ -667,14 +670,15 @@ def search_providers(series_obj, episodes, forced_search=False, down_cur_quality threading.currentThread().name = original_thread_name for cur_provider in providers: - threading.currentThread().name = original_thread_name + u' :: [' + cur_provider.name + u']' + threading.currentThread().name = '{original_thread_name} :: [{provider}]'.format( + original_thread_name=original_thread_name, provider=cur_provider.name + ) if cur_provider.anime_only and not series_obj.is_anime: log.debug(u'{0} is not an anime, skipping', series_obj.name) continue found_results[cur_provider.name] = {} - search_count = 0 search_mode = cur_provider.search_mode @@ -694,24 +698,41 @@ def search_providers(series_obj, episodes, forced_search=False, down_cur_quality log.info(u'Performing season pack search for {0}', series_obj.name) try: - search_results = cur_provider.find_search_results(series_obj, episodes, search_mode, forced_search, - down_cur_quality, manual_search, manual_search_type) + search_results = [] + cache_search_results = [] + cache_multi = [] + cache_single = [] + + if not manual_search: + cache_search_results = cur_provider.search_results_in_cache(episodes) + if cache_search_results: + # From our provider multi_episode and single_episode results, collect candidates. + found_cache_results = list_results_for_provider(cache_search_results, found_results, cur_provider) + # We're passing the empty lists, because we don't want to include previous candidates + cache_multi, cache_single = collect_candidates(found_cache_results, cur_provider, cache_multi, + cache_single, series_obj, down_cur_quality) + + # Check if we got any candidates from cache add add them to the list. + # If we found candidates in cache, we don't need to search the provider. + if cache_multi: + cache_multi_results += cache_multi + if cache_single: + cache_single_results += cache_single + + # For now we only search if we didn't get any results back from cache, + # but we might wanna check if there was something useful in cache. + if not (cache_multi or cache_single): + log.debug(u'Could not find any candidates in cache, searching provider.') + search_results = cur_provider.find_search_results(series_obj, episodes, search_mode, forced_search, + down_cur_quality, manual_search, manual_search_type) + # Update the list found_results + found_results = list_results_for_provider(search_results, found_results, cur_provider) + except AuthException as error: - log.error(u'Authentication error: {0}', ex(error)) + log.error(u'Authentication error: {0!r}', error) break - if search_results: - # make a list of all the results for this provider - for cur_ep in search_results: - if cur_ep in found_results[cur_provider.name]: - found_results[cur_provider.name][cur_ep] += search_results[cur_ep] - else: - found_results[cur_provider.name][cur_ep] = search_results[cur_ep] - - # Sort the list by seeders if possible - if cur_provider.provider_type == u'torrent' or getattr(cur_provider, u'torznab', None): - found_results[cur_provider.name][cur_ep].sort(key=lambda d: int(d.seeders), reverse=True) - + if search_results or cache_search_results: break elif not cur_provider.search_fallback or search_count == 2: break @@ -747,16 +768,13 @@ def search_providers(series_obj, episodes, forced_search=False, down_cur_quality # Continue because we don't want to pick best results as we are running a manual search by user continue - # Collect candidates for multi-episode or season results - candidates = (candidate for result, candidate in iteritems(found_results[cur_provider.name]) - if result in (SEASON_RESULT, MULTI_EP_RESULT)) - candidates = list(itertools.chain(*candidates)) - if candidates: - multi_results += collect_multi_candidates(candidates, series_obj, episodes, down_cur_quality) - - # Collect candidates for single-episode results - single_results = collect_single_candidates(found_results[cur_provider.name], - single_results) + # From our providers multi_episode and single_episode results, collect candidates. + # Only collect the candidates if we didn't get any from cache. + if not (cache_multi_results or cache_single_results): + multi_results, single_results = collect_candidates(found_results, cur_provider, multi_results, + single_results, series_obj, down_cur_quality) + else: + multi_results, single_results = cache_multi_results, cache_single_results # Remove provider from thread name before return results threading.currentThread().name = original_thread_name @@ -768,6 +786,43 @@ def search_providers(series_obj, episodes, forced_search=False, down_cur_quality return combine_results(multi_results, single_results) +def collect_candidates(found_results, provider, multi_results, single_results, series_obj, down_cur_quality): + """Collect candidates for episode, multi-episode or season results.""" + candidates = (candidate for result, candidate in iteritems(found_results[provider.name]) + if result in (SEASON_RESULT, MULTI_EP_RESULT)) + candidates = list(itertools.chain(*candidates)) + if candidates: + multi_results += collect_multi_candidates(candidates, series_obj, down_cur_quality) + + # Collect candidates for single-episode results + single_results = collect_single_candidates(found_results[provider.name], single_results) + + return multi_results, single_results + + +def list_results_for_provider(search_results, found_results, provider): + """ + Add results for this provider to the search_results dict. + + The structure is based on [provider_name][episode_number][search_result] + :param search_results: New dictionary with search results for this provider + :param found_results: Dictionary with existing per provider search results + :param provider: Provider object + :return: Updated dict found_results + """ + for cur_ep in search_results: + if cur_ep in found_results[provider.name]: + found_results[provider.name][cur_ep] += search_results[cur_ep] + else: + found_results[provider.name][cur_ep] = search_results[cur_ep] + + # Sort the list by seeders if possible + if provider.provider_type == u'torrent' or getattr(provider, u'torznab', None): + found_results[provider.name][cur_ep].sort(key=lambda d: int(d.seeders), reverse=True) + + return found_results + + def collect_single_candidates(candidates, results): """Collect single-episode result candidates.""" single_candidates = list(results) @@ -798,7 +853,7 @@ def collect_single_candidates(candidates, results): return single_candidates + new_candidates -def collect_multi_candidates(candidates, series_obj, episodes, down_cur_quality): +def collect_multi_candidates(candidates, series_obj, down_cur_quality): """Collect mutli-episode and season result candidates.""" multi_candidates = [] diff --git a/medusa/search/daily.py b/medusa/search/daily.py index 32a6759845..1978cc93ab 100644 --- a/medusa/search/daily.py +++ b/medusa/search/daily.py @@ -8,6 +8,7 @@ import threading from builtins import object from datetime import date, datetime, timedelta +from time import time from medusa import app, common from medusa.db import DBConnection @@ -23,6 +24,7 @@ from medusa.search.queue import DailySearchQueueItem from medusa.show.show import Show + log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) @@ -49,6 +51,9 @@ def run(self, force=False): # pylint:disable=too-many-branches return self.amActive = True + # Let's keep track of the exact time the scheduler kicked in, + # as we need to compare to this time for each provider. + scheduler_start_time = int(time()) if not network_dict: update_network_dict() @@ -110,9 +115,9 @@ def run(self, force=False): # pylint:disable=too-many-branches main_db_con = DBConnection() main_db_con.mass_action(new_releases) - # queue episode for daily search + # queue a daily search app.search_queue_scheduler.action.add_item( - DailySearchQueueItem(force=force) + DailySearchQueueItem(scheduler_start_time, force=force) ) self.amActive = False diff --git a/medusa/search/queue.py b/medusa/search/queue.py index 31bff45fd5..fb2f927bf3 100644 --- a/medusa/search/queue.py +++ b/medusa/search/queue.py @@ -242,14 +242,15 @@ def add_item(self, item): class DailySearchQueueItem(generic_queue.QueueItem): - """Daily searche queue item class.""" + """Daily search queue item class.""" - def __init__(self, force): + def __init__(self, scheduler_start_time, force): """Initialize the class.""" generic_queue.QueueItem.__init__(self, u'Daily Search', DAILY_SEARCH) self.success = None self.started = None + self.scheduler_start_time = scheduler_start_time self.force = force def run(self): @@ -259,7 +260,7 @@ def run(self): try: log.info('Beginning daily search for new episodes') - found_results = search_for_needed_episodes(force=self.force) + found_results = search_for_needed_episodes(self.scheduler_start_time, force=self.force) if not found_results: log.info('No needed episodes found') diff --git a/medusa/server/api/v2/config.py b/medusa/server/api/v2/config.py index 0f7c9b4e5e..be170440df 100644 --- a/medusa/server/api/v2/config.py +++ b/medusa/server/api/v2/config.py @@ -193,7 +193,7 @@ class ConfigHandler(BaseRequestHandler): 'search.filters.ignoreUnknownSubs': BooleanField(app, 'IGNORE_UND_SUBS'), 'notifiers.kodi.enabled': BooleanField(app, 'USE_KODI'), - 'notifiers.kodi.alwaysOn': BooleanField(app, 'USE_KODI'), + 'notifiers.kodi.alwaysOn': BooleanField(app, 'KODI_ALWAYS_ON'), 'notifiers.kodi.notifyOnSnatch': BooleanField(app, 'KODI_NOTIFY_ONSNATCH'), 'notifiers.kodi.notifyOnDownload': BooleanField(app, 'KODI_NOTIFY_ONDOWNLOAD'), 'notifiers.kodi.notifyOnSubtitleDownload': BooleanField(app, 'KODI_NOTIFY_ONSUBTITLEDOWNLOAD'), diff --git a/medusa/server/web/manage/handler.py b/medusa/server/web/manage/handler.py index 0e664ff1dc..c1c1927bfe 100644 --- a/medusa/server/web/manage/handler.py +++ b/medusa/server/web/manage/handler.py @@ -335,7 +335,7 @@ def subtitleMissedPP(self): ep_status = tv_episode.status if ep_status in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST): status = 'snatched' - elif ep_status in DOWNLOADED: + elif ep_status == DOWNLOADED: status = 'downloaded' else: continue diff --git a/medusa/tv/cache.py b/medusa/tv/cache.py index 15eb097303..b2d97a5ddc 100644 --- a/medusa/tv/cache.py +++ b/medusa/tv/cache.py @@ -9,6 +9,7 @@ import traceback from builtins import object from builtins import str +from collections import defaultdict from time import time from medusa import ( @@ -31,7 +32,7 @@ from medusa.show import naming from medusa.show.show import Show -from six import text_type +from six import text_type, viewitems log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) @@ -196,10 +197,10 @@ def _check_item_auth(self, title, url): """Check item auth.""" return True - def update_cache(self): + def update_cache(self, search_start_time): """Update provider cache.""" # check if we should update - if not self.should_update(): + if not self.should_update(search_start_time): return try: @@ -210,7 +211,7 @@ def update_cache(self): self._clear_cache() # set updated - self.updated = time() + self.updated = search_start_time # get last 5 rss cache results recent_results = self.provider.recent_results @@ -362,13 +363,14 @@ def _set_time(self, table, value): {'provider': self.provider_id} ) - def should_update(self): + def should_update(self, scheduler_start_time): """Check if we should update provider cache.""" # if we've updated recently then skip the update - if time() - self.updated < self.minTime * 60: - log.debug('Last update was too soon, using old cache: {0}.' - ' Updated less than {1} minutes ago', - self.updated, self.minTime) + if scheduler_start_time - self.updated < self.minTime * 60: + log.debug('Last update was too soon, using old cache.' + ' Last update ran {0} seconds ago.' + ' Updated less than {1} minutes ago.', + scheduler_start_time - self.updated, self.minTime) return False log.debug('Updating providers cache') @@ -454,38 +456,101 @@ def item_in_cache(self, url): 'WHERE url=?'.format(provider=self.provider_id), [url] )[0]['count'] - def find_needed_episodes(self, episode, forced_search=False, - down_cur_quality=False): - """Find needed episodes.""" - needed_eps = {} + def find_needed_episodes(self, episodes, forced_search=False, down_cur_quality=False): + """ + Search cache for needed episodes. + + NOTE: This is currently only used by the Daily Search. + The following checks are performed on the cache results: + * Use the episodes current quality / wanted quality to decide if we want it + * Filtered on ignored/required words, and non-tv junk + * Filter out non-anime results on Anime only providers + * Check if the series is still in our library + + :param episodes: Single or list of episode object(s) + :param forced_search: Flag to mark that this is searched through a forced search + :param down_cur_quality: Flag to mark that we want to include the episode(s) current quality + + :return dict(episode: [list of SearchResult objects]). + """ + results = defaultdict(list) + cache_results = self.find_episodes(episodes) + + for episode_number, search_results in viewitems(cache_results): + for search_result in search_results: + + # ignored/required words, and non-tv junk + if not naming.filter_bad_releases(search_result.name): + continue + + all_wanted = True + for cur_ep in search_result.actual_episodes: + # if the show says we want that episode then add it to the list + if not search_result.series.want_episode(search_result.actual_season, cur_ep, search_result.quality, + forced_search, down_cur_quality): + log.debug('Ignoring {0} because one or more episodes are unwanted', search_result.name) + all_wanted = False + break + + if not all_wanted: + continue + + log.debug( + '{id}: Using cached results from {provider} for series {show_name!r} episode {ep}', { + 'id': search_result.series.series_id, + 'provider': self.provider.name, + 'show_name': search_result.series.name, + 'ep': episode_num(search_result.episodes[0].season, search_result.episodes[0].episode), + } + ) + + # FIXME: Should be changed to search_result.search_type + search_result.forced_search = forced_search + search_result.download_current_quality = down_cur_quality + + # add it to the list + results[episode_number].append(search_result) + + return results + + def find_episodes(self, episodes): + """ + Search cache for episodes. + + NOTE: This is currently only used by the Backlog/Forced Search. As we determine the candidates there. + The following checks are performed on the cache results: + * Filter out non-anime results on Anime only providers + * Check if the series is still in our library + :param episodes: Single or list of episode object(s) + + :return list of SearchResult objects. + """ + cache_results = defaultdict(list) results = [] cache_db_con = self._get_db() - if not episode: + if not episodes: sql_results = cache_db_con.select( 'SELECT * FROM [{name}]'.format(name=self.provider_id)) - elif not isinstance(episode, list): + elif not isinstance(episodes, list): sql_results = cache_db_con.select( 'SELECT * FROM [{name}] ' - 'WHERE indexer = ? AND' - ' indexerid = ? AND' - ' season = ? AND' - ' episodes LIKE ?'.format(name=self.provider_id), - [episode.series.indexer, episode.series.series_id, episode.season, - '%|{0}|%'.format(episode.episode)] + 'WHERE indexer = ? AND ' + 'indexerid = ? AND ' + 'season = ? AND ' + 'episodes LIKE ?'.format(name=self.provider_id), + [episodes.series.indexer, episodes.series.series_id, episodes.season, + '%|{0}|%'.format(episodes.episode)] ) else: - for ep_obj in episode: + for ep_obj in episodes: results.append([ 'SELECT * FROM [{name}] ' 'WHERE indexer = ? AND ' - ' indexerid = ? AND' - ' season = ? AND' - ' episodes LIKE ? AND ' - ' quality IN ({qualities})'.format( - name=self.provider_id, - qualities=','.join((str(x) - for x in ep_obj.wanted_quality)) + 'indexerid = ? AND ' + 'season = ? AND ' + 'episodes LIKE ?'.format( + name=self.provider_id ), [ep_obj.series.indexer, ep_obj.series.series_id, ep_obj.season, '%|{0}|%'.format(ep_obj.episode)]] @@ -499,10 +564,10 @@ def find_needed_episodes(self, episode, forced_search=False, sql_results = [] log.debug( '{id}: No cached results in {provider} for series {show_name!r} episode {ep}', { - 'id': episode[0].series.series_id, + 'id': episodes[0].series.series_id, 'provider': self.provider.name, - 'show_name': episode[0].series.name, - 'ep': episode_num(episode[0].season, episode[0].episode), + 'show_name': episodes[0].series.name, + 'ep': episode_num(episodes[0].season, episodes[0].episode), } ) @@ -515,10 +580,6 @@ def find_needed_episodes(self, episode, forced_search=False, search_result = self.provider.get_result() - # ignored/required words, and non-tv junk - if not naming.filter_bad_releases(cur_result['name']): - continue - # get the show, or ignore if it's not one of our shows series_obj = Show.find_by_id(app.showList, int(cur_result['indexer']), int(cur_result['indexerid'])) if not series_obj: @@ -535,49 +596,32 @@ def find_needed_episodes(self, episode, forced_search=False, search_result.version = cur_result['version'] search_result.name = cur_result['name'] search_result.url = cur_result['url'] - search_result.season = int(cur_result['season']) - search_result.actual_season = search_result.season + search_result.actual_season = int(cur_result['season']) - sql_episodes = cur_result['episodes'].strip('|') # TODO: Add support for season results + sql_episodes = cur_result['episodes'].strip('|') # Season result if not sql_episodes: - ep_objs = series_obj.get_all_episodes(search_result.season) + ep_objs = series_obj.get_all_episodes(search_result.actual_season) + if not ep_objs: + # We couldn't get any episodes for this season, which is odd, skip the result. + log.debug("We couldn't get any episodes for season {0} of {1}, skipping", + search_result.actual_season, search_result.name) + continue actual_episodes = [ep.episode for ep in ep_objs] episode_number = SEASON_RESULT # Multi or single episode result else: actual_episodes = [int(ep) for ep in sql_episodes.split('|')] - ep_objs = [series_obj.get_episode(search_result.season, ep) for ep in actual_episodes] + ep_objs = [series_obj.get_episode(search_result.actual_season, ep) for ep in actual_episodes] if len(actual_episodes) == 1: episode_number = actual_episodes[0] else: episode_number = MULTI_EP_RESULT - all_wanted = True - for cur_ep in actual_episodes: - # if the show says we want that episode then add it to the list - if not series_obj.want_episode(search_result.season, cur_ep, search_result.quality, - forced_search, down_cur_quality): - log.debug('Ignoring {0} because one or more episodes are unwanted', cur_result['name']) - all_wanted = False - break - - if not all_wanted: - continue - search_result.episodes = ep_objs search_result.actual_episodes = actual_episodes - log.debug( - '{id}: Using cached results from {provider} for series {show_name!r} episode {ep}', { - 'id': search_result.episodes[0].series.series_id, - 'provider': self.provider.name, - 'show_name': search_result.episodes[0].series.name, - 'ep': episode_num(search_result.episodes[0].season, search_result.episodes[0].episode), - } - ) - # Map the remaining attributes search_result.series = series_obj search_result.seeders = cur_result['seeders'] @@ -587,17 +631,10 @@ def find_needed_episodes(self, episode, forced_search=False, search_result.proper_tags = cur_result['proper_tags'].split('|') if cur_result['proper_tags'] else '' search_result.content = None - # FIXME: Should be changed to search_result.search_type - search_result.forced_search = forced_search - search_result.download_current_quality = down_cur_quality - # add it to the list - if episode_number not in needed_eps: - needed_eps[episode_number] = [search_result] - else: - needed_eps[episode_number].append(search_result) + cache_results[episode_number].append(search_result) # datetime stamp this search so cache gets cleared self.searched = time() - return needed_eps + return cache_results diff --git a/readme.md b/readme.md index 4991f5b179..2aec64b25c 100644 --- a/readme.md +++ b/readme.md @@ -62,6 +62,18 @@ #### Dependencies To run Medusa from source you will need Python 2.7.10 + +#### Installation - direct + Start [here](https://github.com/pymedusa/Medusa/wiki/Installation-&-Configuration-Guides) to read the installation guides for different setups. + +#### Installation - Docker + There's a direct build available on [Dockerhub](https://hub.docker.com/r/pymedusa/medusa/) which is updated directly from this repo on every commit to master. + + For alternate architectures, the [linuxserver.io](https://www.linuxserver.io) team have kindly produced docker images for X86, armhf and aarch64 platforms. This is built from an older intermediary Dockerfile. + +* X86 - [Dockerhub](https://hub.docker.com/r/linuxserver/medusa/), [Github](https://github.com/linuxserver/docker-medusa) +* armhf - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa/), [Github](https://github.com/linuxserver/docker-medusa-armhf) +* aarch64 - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa-aarch64/), [Github](https://github.com/linuxserver/docker-medusa-arm64) #### [![Feature Requests](https://cloud.githubusercontent.com/assets/390379/10127973/045b3a96-6560-11e5-9b20-31a2032956b2.png)](https://github.com/pymedusa/Medusa/issues?q=is%3Aopen+is%3Aissue+label%3A%22Feature+Request%22) @@ -107,13 +119,3 @@ This product uses [MediaInfo](http://mediaarea.net/MediaInfo) library, Copyright Binaries for Windows and MacOS are included. Linux distributions need to manually install MediaInfo. MediaInfo is optional, but highly recommended since it increases the number of supported formats for video metadata extraction. Basic MKV metadata is supported when MediaInfo is not installed. - -#### Docker images - -The [linuxserver.io](https://www.linuxserver.io) team have kindly produced docker images for X86, armhf and aarch64 platforms. This is built from an older intermediary Dockerfile. - -* X86 - [Dockerhub](https://hub.docker.com/r/linuxserver/medusa/), [Github](https://github.com/linuxserver/docker-medusa) -* armhf - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa/), [Github](https://github.com/linuxserver/docker-medusa-armhf) -* aarch64 - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa-aarch64/), [Github](https://github.com/linuxserver/docker-medusa-arm64) - -There's also a direct build available here [Dockerhub](https://hub.docker.com/r/pymedusa/medusa/) which is updated directly from this repo on every commit. diff --git a/start.py b/start.py index ed842c9165..fb07c8d633 100755 --- a/start.py +++ b/start.py @@ -1,9 +1,14 @@ -#!/usr/bin/env python2.7 +#!/usr/bin/env python # -*- coding: utf-8 -* """Startup script.""" from __future__ import unicode_literals +import sys + from medusa.__main__ import main if __name__ == '__main__': + if sys.version_info.major == 3 and sys.version_info.minor < 5: + print('Medusa supports Python 2 from version 2.7.10 and Python 3 from version 3.5.0, exiting!') + raise Exception('Incorrect Python version. Shutting down!') main() diff --git a/tests/test_guessit.yml b/tests/test_guessit.yml index e9b6cab8cf..2b0e45d1e5 100644 --- a/tests/test_guessit.yml +++ b/tests/test_guessit.yml @@ -4359,3 +4359,16 @@ container: mkv mimetype: video/x-matroska type: episode + +? JoJo's Bizarre Adventure (2012)\Season 02\[HorribleSubs] JoJo's Bizarre Adventure - Stardust Crusaders - 12 [1080p].mkv +: title: JoJo's Bizarre Adventure + year: 2012 + season: 2 + episode: 12 + release_group: HorribleSubs + alias: JoJo's Bizarre Adventure - Stardust Crusaders + alternative_title: Stardust Crusaders + screen_size: 1080p + container: mkv + mimetype: video/x-matroska + type: episode diff --git a/tests/test_process_tv.py b/tests/test_process_tv.py index a34e95e72e..acfca15d13 100644 --- a/tests/test_process_tv.py +++ b/tests/test_process_tv.py @@ -155,7 +155,7 @@ def test_paths(monkeypatch, p, create_structure): 'resource_name': 'show.name.s02e01.webrip.x264-kovalski.nzb', 'failed': False, 'expected': [('media/postprocess', - ['sample.mkv', 'show.name.s02e01.webrip.x264-kovalski.mkv']), + ['show.name.s02e01.webrip.x264-kovalski.mkv', 'sample.mkv']), ('media/postprocess/subfolder', ['readme.txt']) ], 'structure': ( diff --git a/themes-default/slim/package.json b/themes-default/slim/package.json index cbfa0d4e8c..0498e3bbc5 100644 --- a/themes-default/slim/package.json +++ b/themes-default/slim/package.json @@ -49,7 +49,7 @@ "cross-env": "5.2.0", "css-loader": "1.0.1", "date-fns": "1.29.0", - "eslint": "5.9.0", + "eslint": "5.10.0", "eslint-config-xo": "0.25.0", "eslint-plugin-vue": "5.0.0-beta.4", "esm": "3.0.84", @@ -63,13 +63,13 @@ "is-visible": "2.2.0", "jquery": "3.3.1", "lodash": "4.17.11", - "mini-css-extract-plugin": "0.4.4", + "mini-css-extract-plugin": "0.4.5", "nyc": "13.1.0", "require-extension-hooks": "0.3.3", "require-extension-hooks-babel": "1.0.0-beta.1", "require-extension-hooks-vue": "1.1.0", "run-sequence": "2.2.1", - "stylelint": "9.8.0", + "stylelint": "9.9.0", "stylelint-config-standard": "18.2.0", "tablesorter": "2.31.0", "vue": "2.5.17", diff --git a/themes-default/slim/src/components/helpers/name-pattern.vue b/themes-default/slim/src/components/helpers/name-pattern.vue index abb03f179a..b7125de49a 100644 --- a/themes-default/slim/src/components/helpers/name-pattern.vue +++ b/themes-default/slim/src/components/helpers/name-pattern.vue @@ -106,6 +106,16 @@ %0XE 03 + + Absolute Episode Number: + %AB + 003 + + + Xem Absolute Episode Number: + %XAB + 003 + Episode Name: %EN diff --git a/themes-default/slim/static/images/providers/nordicbits.png b/themes-default/slim/static/images/providers/nordicbits.png new file mode 100644 index 0000000000..649c556628 Binary files /dev/null and b/themes-default/slim/static/images/providers/nordicbits.png differ diff --git a/themes-default/slim/views/config_search.mako b/themes-default/slim/views/config_search.mako index 001643fa7e..318e56986f 100644 --- a/themes-default/slim/views/config_search.mako +++ b/themes-default/slim/views/config_search.mako @@ -23,7 +23,8 @@ window.app = new Vue({ { text: '4 hours', value: '4h' }, { text: '90 mins', value: '90m' }, { text: '45 mins', value: '45m' }, - { text: '24 hours', value: '15m' } + { text: '30 mins', value: '30m' }, + { text: '15 mins', value: '15m' } ], nzbGetPriorityOptions: [ { text: 'Very low', value: -100 }, @@ -257,13 +258,13 @@ window.app = new Vue({ return this.$store.state.search; }, stateClients() { - return this.$store.state.clients; + return this.$store.state.clients; }, torrentUsernameIsDisabled() { const { clients } = this; const { torrents } = clients; const { host, method } = torrents; - let torrentHost = host || '' + let torrentHost = host || ''; if (!['rtorrent', 'deluge'].includes(method) || method === 'rtorrent' && !torrentHost.startsWith('scgi://')) { return false; } @@ -273,7 +274,7 @@ window.app = new Vue({ const { clients } = this; const { torrents } = clients; const { host, method } = torrents; - let torrentHost = host || '' + let torrentHost = host || ''; if (method !== 'rtorrent' || method === 'rtorrent' && !torrentHost.startsWith('scgi://')) { return false; } @@ -283,7 +284,7 @@ window.app = new Vue({ const { clients } = this; const { torrents } = clients; const { host, method } = torrents; - let torrentHost = host || '' + let torrentHost = host || ''; if (method === 'rtorrent' && !torrentHost.startsWith('scgi://')) { return false; } @@ -437,12 +438,12 @@ window.app = new Vue({

General Search Settings

How to manage searching with providers.

- +
- +
@@ -540,9 +545,9 @@ window.app = new Vue({
- + - + - +