diff --git a/app/chain/subscribe.py b/app/chain/subscribe.py index 06ac0bd3..2d661e56 100644 --- a/app/chain/subscribe.py +++ b/app/chain/subscribe.py @@ -79,6 +79,7 @@ class SubscribeChain(_ChainBase): subscribes = self.subscribes.list(state) # 遍历订阅 for subscribe in subscribes: + logger.info(f'开始搜索订阅,标题:{subscribe.name} ...') # 如果状态为N则更新为R if subscribe.state == 'N': self.subscribes.update(subscribe.id, {'state': 'R'}) @@ -121,10 +122,18 @@ class SubscribeChain(_ChainBase): indexers = self.siteshelper.get_indexers() # 遍历站点缓存资源 for indexer in indexers: + logger.info(f'开始刷新站点资源,站点:{indexer.get("name")} ...') domain = StringUtils.get_url_domain(indexer.get("domain")) torrents: List[TorrentInfo] = self.run_module("refresh_torrents", sites=[indexer]) if torrents: self._torrents_cache[domain] = [] + # 过滤种子 + result: List[TorrentInfo] = self.run_module("filter_torrents", torrent_list=torrents) + if result is not None: + torrents = result + if not torrents: + logger.warn(f'{indexer.get("name")} 没有符合过滤条件的资源') + continue for torrent in torrents: # 识别 meta = MetaInfo(torrent.title, torrent.description) @@ -147,6 +156,7 @@ class SubscribeChain(_ChainBase): subscribes = self.subscribes.list('R') # 遍历订阅 for subscribe in subscribes: + logger.info(f'开始匹配订阅,标题:{subscribe.name} ...') # 生成元数据 meta = MetaInfo(subscribe.name) meta.year = subscribe.year diff --git a/app/log.py b/app/log.py index ae72f8a1..0d821887 100644 --- a/app/log.py +++ b/app/log.py @@ -12,7 +12,7 @@ console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) # 创建文件输出Handler -file_handler = RotatingFileHandler(filename=settings.LOG_PATH / 'nasbot.log', +file_handler = RotatingFileHandler(filename=settings.LOG_PATH / 'moviepilot.log', mode='w', maxBytes=5 * 1024 * 1024, backupCount=3, diff --git a/app/modules/indexer/__init__.py b/app/modules/indexer/__init__.py index 46da3e92..314db3ed 100644 --- a/app/modules/indexer/__init__.py +++ b/app/modules/indexer/__init__.py @@ -100,21 +100,17 @@ class IndexerModule(_ModuleBase): mtype=mediainfo.type ) except Exception as err: - error_flag = True - print(str(err)) + logger.error(f"{site.get('name')} 搜索出错:{err}") # 索引花费的时间 seconds = round((datetime.now() - start_time).seconds, 1) - if error_flag: - logger.error(f"{site.get('name')} 搜索发生错误,耗时 {seconds} 秒") - else: - logger.info(f"{site.get('name')} 搜索完成,耗时 {seconds} 秒") + # 返回结果 if len(result_array) == 0: - logger.warn(f"{site.get('name')} 未搜索到数据") + logger.warn(f"{site.get('name')} 未搜索到数据,耗时 {seconds} 秒") return [] else: - logger.warn(f"{site.get('name')} 返回数据:{len(result_array)}") + logger.warn(f"{site.get('name')} 搜索完成,耗时 {seconds} 秒,返回数据:{len(result_array)}") # 合并站点信息,以TorrentInfo返回 return [TorrentInfo(site=site.get("id"), site_name=site.get("name"), diff --git a/app/modules/indexer/spider.py b/app/modules/indexer/spider.py index 483c7a33..4d913a5b 100644 --- a/app/modules/indexer/spider.py +++ b/app/modules/indexer/spider.py @@ -113,7 +113,7 @@ class TorrentSpider(feapder.AirSpider): if self.domain and not str(self.domain).endswith("/"): self.domain = self.domain + "/" if indexer.get('ua'): - self.ua = indexer.get('ua') + self.ua = indexer.get('ua') or settings.USER_AGENT else: self.ua = settings.USER_AGENT if indexer.get('proxy'):