This commit is contained in:
jxxghp 2023-06-07 18:28:54 +08:00
parent b6ae0886af
commit 852a255eaf
10 changed files with 39 additions and 12 deletions

View File

@ -1,3 +1,4 @@
import traceback
from abc import abstractmethod from abc import abstractmethod
from typing import Optional, Any from typing import Optional, Any
@ -54,5 +55,5 @@ class _ChainBase(AbstractSingleton, metaclass=Singleton):
if temp: if temp:
result = temp result = temp
except Exception as err: except Exception as err:
logger.error(f"运行模块出错:{module.__class__.__name__} - {err}") logger.error(f"运行模块 {method} 出错:{module.__class__.__name__} - {err}\n{traceback.print_exc()}")
return result return result

View File

@ -27,11 +27,14 @@ class TransferChain(_ChainBase):
for torrent in torrents: for torrent in torrents:
# 识别元数据 # 识别元数据
meta = MetaInfo(torrent.get("title")) meta = MetaInfo(torrent.get("title"))
if not meta.get_name():
logger.warn(f'未识别到元数据,标题:{torrent.get("title")}')
continue
# 识别媒体信息 # 识别媒体信息
mediainfo: MediaInfo = self.run_module('recognize_media', meta=meta) mediainfo: MediaInfo = self.run_module('recognize_media', meta=meta)
if not mediainfo: if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{torrent.get("title")}') logger.warn(f'未识别到媒体信息,标题:{torrent.get("title")}')
return False continue
logger.info(f"{torrent.get('title')} 识别为:{mediainfo.type.value} {mediainfo.get_title_string()}") logger.info(f"{torrent.get('title')} 识别为:{mediainfo.type.value} {mediainfo.get_title_string()}")
# 更新媒体图片 # 更新媒体图片
self.run_module("obtain_image", mediainfo=mediainfo) self.run_module("obtain_image", mediainfo=mediainfo)
@ -39,7 +42,7 @@ class TransferChain(_ChainBase):
dest_path: Path = self.run_module("transfer", mediainfo=mediainfo, path=torrent.get("path")) dest_path: Path = self.run_module("transfer", mediainfo=mediainfo, path=torrent.get("path"))
if not dest_path: if not dest_path:
logger.warn(f"{torrent.get('title')} 转移失败") logger.warn(f"{torrent.get('title')} 转移失败")
return False continue
# 刮剥 # 刮剥
self.run_module("scrape_metadata", path=dest_path, mediainfo=mediainfo) self.run_module("scrape_metadata", path=dest_path, mediainfo=mediainfo)

View File

@ -3,6 +3,7 @@ from pathlib import Path
from typing import Optional, List, Tuple, Union, Set from typing import Optional, List, Tuple, Union, Set
from fastapi import Request from fastapi import Request
from ruamel.yaml import CommentedMap
from app.core.context import MediaInfo, TorrentInfo from app.core.context import MediaInfo, TorrentInfo
from app.core.meta import MetaBase from app.core.meta import MetaBase
@ -93,7 +94,7 @@ class _ModuleBase(metaclass=ABCMeta):
""" """
pass pass
def search_torrents(self, mediainfo: Optional[MediaInfo], sites: List[dict], def search_torrents(self, mediainfo: Optional[MediaInfo], sites: List[CommentedMap],
keyword: str = None) -> Optional[List[TorrentInfo]]: keyword: str = None) -> Optional[List[TorrentInfo]]:
""" """
搜索站点多个站点需要多线程处理 搜索站点多个站点需要多线程处理
@ -104,7 +105,7 @@ class _ModuleBase(metaclass=ABCMeta):
""" """
pass pass
def refresh_torrents(self, sites: List[dict]) -> Optional[List[TorrentInfo]]: def refresh_torrents(self, sites: List[CommentedMap]) -> Optional[List[TorrentInfo]]:
""" """
获取站点最新一页的种子多个站点需要多线程处理 获取站点最新一页的种子多个站点需要多线程处理
:param sites: 站点列表 :param sites: 站点列表

View File

@ -14,6 +14,11 @@ class Emby(metaclass=Singleton):
def __init__(self): def __init__(self):
self._host = settings.EMBY_HOST self._host = settings.EMBY_HOST
if self._host:
if not self._host.endswith("/"):
self._host += "/"
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.EMBY_API_KEY self._apikey = settings.EMBY_API_KEY
self._user = self.get_user() self._user = self.get_user()
self._folders = self.get_emby_folders() self._folders = self.get_emby_folders()

View File

@ -3,6 +3,8 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime from datetime import datetime
from typing import List, Optional, Tuple, Union from typing import List, Optional, Tuple, Union
from ruamel.yaml import CommentedMap
from app.core import MediaInfo, TorrentInfo from app.core import MediaInfo, TorrentInfo
from app.log import logger from app.log import logger
from app.modules import _ModuleBase from app.modules import _ModuleBase
@ -23,7 +25,7 @@ class IndexerModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]: def init_setting(self) -> Tuple[str, Union[str, bool]]:
return "INDEXER", "builtin" return "INDEXER", "builtin"
def search_torrents(self, mediainfo: Optional[MediaInfo], sites: List[dict], def search_torrents(self, mediainfo: Optional[MediaInfo], sites: List[CommentedMap],
keyword: str = None) -> Optional[List[TorrentInfo]]: keyword: str = None) -> Optional[List[TorrentInfo]]:
""" """
搜索站点多个站点需要多线程处理 搜索站点多个站点需要多线程处理
@ -54,7 +56,7 @@ class IndexerModule(_ModuleBase):
# 返回 # 返回
return results return results
def __search(self, mediainfo: MediaInfo, site: dict, def __search(self, mediainfo: MediaInfo, site: CommentedMap,
keyword: str = None) -> Optional[List[TorrentInfo]]: keyword: str = None) -> Optional[List[TorrentInfo]]:
""" """
搜索一个站点 搜索一个站点
@ -111,7 +113,7 @@ class IndexerModule(_ModuleBase):
**result) for result in result_array] **result) for result in result_array]
@staticmethod @staticmethod
def __spider_search(indexer: dict, def __spider_search(indexer: CommentedMap,
keyword: str = None, keyword: str = None,
mtype: MediaType = None, mtype: MediaType = None,
page: int = None, timeout: int = 30) -> (bool, List[dict]): page: int = None, timeout: int = 30) -> (bool, List[dict]):
@ -145,7 +147,7 @@ class IndexerModule(_ModuleBase):
_spider.torrents_info_array.clear() _spider.torrents_info_array.clear()
return result_flag, result_array return result_flag, result_array
def refresh_torrents(self, sites: List[dict]) -> Optional[List[TorrentInfo]]: def refresh_torrents(self, sites: List[CommentedMap]) -> Optional[List[TorrentInfo]]:
""" """
获取站点最新一页的种子多个站点需要多线程处理 获取站点最新一页的种子多个站点需要多线程处理
:param sites: 站点列表 :param sites: 站点列表

View File

@ -7,6 +7,7 @@ import feapder
from feapder.utils.tools import urlencode from feapder.utils.tools import urlencode
from jinja2 import Template from jinja2 import Template
from pyquery import PyQuery from pyquery import PyQuery
from ruamel.yaml import CommentedMap
from app.core import settings from app.core import settings
from app.log import logger from app.log import logger
@ -81,7 +82,7 @@ class TorrentSpider(feapder.AirSpider):
# 种子列表 # 种子列表
torrents_info_array: list = [] torrents_info_array: list = []
def setparam(self, indexer, def setparam(self, indexer: CommentedMap,
keyword: [str, list] = None, keyword: [str, list] = None,
page=None, page=None,
referer=None, referer=None,

View File

@ -1,6 +1,8 @@
import re import re
from typing import Tuple, List from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core import settings from app.core import settings
from app.log import logger from app.log import logger
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
@ -20,7 +22,7 @@ class TNodeSpider:
_downloadurl = "%sapi/torrent/download/%s" _downloadurl = "%sapi/torrent/download/%s"
_pageurl = "%storrent/info/%s" _pageurl = "%storrent/info/%s"
def __init__(self, indexer: dict): def __init__(self, indexer: CommentedMap):
if indexer: if indexer:
self._indexerid = indexer.get('id') self._indexerid = indexer.get('id')
self._domain = indexer.get('domain') self._domain = indexer.get('domain')

View File

@ -1,6 +1,8 @@
from typing import List, Tuple from typing import List, Tuple
from urllib.parse import quote from urllib.parse import quote
from ruamel.yaml import CommentedMap
from app.core import settings from app.core import settings
from app.log import logger from app.log import logger
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
@ -16,7 +18,7 @@ class TorrentLeech:
_downloadurl = "%sdownload/%s/%s" _downloadurl = "%sdownload/%s/%s"
_pageurl = "%storrent/%s" _pageurl = "%storrent/%s"
def __init__(self, indexer: dict): def __init__(self, indexer: CommentedMap):
self._indexer = indexer self._indexer = indexer
if indexer.get('proxy'): if indexer.get('proxy'):
self._proxy = settings.PROXY self._proxy = settings.PROXY

View File

@ -12,6 +12,11 @@ class Jellyfin(metaclass=Singleton):
def __init__(self): def __init__(self):
self._host = settings.JELLYFIN_HOST self._host = settings.JELLYFIN_HOST
if self._host:
if not self._host.endswith("/"):
self._host += "/"
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.JELLYFIN_API_KEY self._apikey = settings.JELLYFIN_API_KEY
self._user = self.get_user() self._user = self.get_user()
self._serverid = self.get_server_id() self._serverid = self.get_server_id()

View File

@ -14,6 +14,11 @@ class Plex(metaclass=Singleton):
def __init__(self): def __init__(self):
self._host = settings.PLEX_HOST self._host = settings.PLEX_HOST
if self._host:
if not self._host.endswith("/"):
self._host += "/"
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._token = settings.PLEX_TOKEN self._token = settings.PLEX_TOKEN
if self._host and self._token: if self._host and self._token:
try: try: