Merge remote-tracking branch 'origin/main'

This commit is contained in:
jxxghp 2023-10-16 07:04:59 +08:00
commit 6e3ebd73c6
12 changed files with 104 additions and 33 deletions

View File

@ -398,14 +398,15 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("post_torrents_message", message=message, torrents=torrents) return self.run_module("post_torrents_message", message=message, torrents=torrents)
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
""" """
刮削元数据 刮削元数据
:param path: 媒体文件路径 :param path: 媒体文件路径
:param mediainfo: 识别的媒体信息 :param mediainfo: 识别的媒体信息
:param transfer_type: 转移模式
:return: 成功或失败 :return: 成功或失败
""" """
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo) self.run_module("scrape_metadata", path=path, mediainfo=mediainfo, transfer_type=transfer_type)
def register_commands(self, commands: Dict[str, dict]) -> None: def register_commands(self, commands: Dict[str, dict]) -> None:
""" """

View File

@ -2,12 +2,14 @@ from typing import Union
from app.chain import ChainBase from app.chain import ChainBase
from app.schemas import Notification, MessageChannel from app.schemas import Notification, MessageChannel
from app.utils.system import SystemUtils
class SystemChain(ChainBase): class SystemChain(ChainBase):
""" """
系统级处理链 系统级处理链
""" """
def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str]): def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str]):
""" """
清理系统缓存 清理系统缓存
@ -15,3 +17,11 @@ class SystemChain(ChainBase):
self.clear_cache() self.clear_cache()
self.post_message(Notification(channel=channel, self.post_message(Notification(channel=channel,
title=f"缓存清理完成!", userid=userid)) title=f"缓存清理完成!", userid=userid))
def restart(self, channel: MessageChannel, userid: Union[int, str]):
"""
重启系统
"""
self.post_message(Notification(channel=channel,
title=f"系统正在重启,请耐心等候!", userid=userid))
SystemUtils.restart()

View File

@ -1,4 +1,3 @@
import glob
import re import re
import shutil import shutil
import threading import threading
@ -237,7 +236,7 @@ class TransferChain(ChainBase):
# 自定义识别 # 自定义识别
if formaterHandler: if formaterHandler:
# 开始集、结束集、PART # 开始集、结束集、PART
begin_ep, end_ep, part = formaterHandler.split_episode(file_path.stem) begin_ep, end_ep, part = formaterHandler.split_episode(file_path.name)
if begin_ep is not None: if begin_ep is not None:
file_meta.begin_episode = begin_ep file_meta.begin_episode = begin_ep
file_meta.part = part file_meta.part = part
@ -358,7 +357,9 @@ class TransferChain(ChainBase):
) )
# 刮削单个文件 # 刮削单个文件
if settings.SCRAP_METADATA: if settings.SCRAP_METADATA:
self.scrape_metadata(path=transferinfo.target_path, mediainfo=file_mediainfo) self.scrape_metadata(path=transferinfo.target_path,
mediainfo=file_mediainfo,
transfer_type=transfer_type)
# 更新进度 # 更新进度
processed_num += 1 processed_num += 1
self.progress.update(value=processed_num / total_num * 100, self.progress.update(value=processed_num / total_num * 100,

View File

@ -19,7 +19,6 @@ from app.schemas import Notification
from app.schemas.types import EventType, MessageChannel from app.schemas.types import EventType, MessageChannel
from app.utils.object import ObjectUtils from app.utils.object import ObjectUtils
from app.utils.singleton import Singleton from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class CommandChian(ChainBase): class CommandChian(ChainBase):
@ -139,7 +138,7 @@ class Command(metaclass=Singleton):
"data": {} "data": {}
}, },
"/restart": { "/restart": {
"func": SystemUtils.restart, "func": SystemChain(self._db).restart,
"description": "重启系统", "description": "重启系统",
"category": "管理", "category": "管理",
"data": {} "data": {}

View File

@ -470,11 +470,12 @@ class DoubanModule(_ModuleBase):
return [] return []
return infos.get("subject_collection_items") return infos.get("subject_collection_items")
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
""" """
刮削元数据 刮削元数据
:param path: 媒体文件路径 :param path: 媒体文件路径
:param mediainfo: 识别的媒体信息 :param mediainfo: 识别的媒体信息
:param transfer_type: 传输类型
:return: 成功或失败 :return: 成功或失败
""" """
if settings.SCRAP_SOURCE != "douban": if settings.SCRAP_SOURCE != "douban":
@ -500,7 +501,8 @@ class DoubanModule(_ModuleBase):
scrape_path = path / path.name scrape_path = path / path.name
self.scraper.gen_scraper_files(meta=meta, self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo), mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=scrape_path) file_path=scrape_path,
transfer_type=transfer_type)
else: else:
# 目录下的所有文件 # 目录下的所有文件
for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT): for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT):
@ -525,7 +527,8 @@ class DoubanModule(_ModuleBase):
# 刮削 # 刮削
self.scraper.gen_scraper_files(meta=meta, self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo), mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=file) file_path=file,
transfer_type=transfer_type)
except Exception as e: except Exception as e:
logger.error(f"刮削文件 {file} 失败,原因:{e}") logger.error(f"刮削文件 {file} 失败,原因:{e}")
logger.info(f"{path} 刮削完成") logger.info(f"{path} 刮削完成")

View File

@ -1,25 +1,34 @@
import time import time
from pathlib import Path from pathlib import Path
from typing import Union
from xml.dom import minidom from xml.dom import minidom
from app.core.config import settings
from app.core.context import MediaInfo from app.core.context import MediaInfo
from app.core.meta import MetaBase from app.core.meta import MetaBase
from app.log import logger from app.log import logger
from app.schemas.types import MediaType from app.schemas.types import MediaType
from app.utils.dom import DomUtils from app.utils.dom import DomUtils
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class DoubanScraper: class DoubanScraper:
def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo, file_path: Path): _transfer_type = settings.TRANSFER_TYPE
def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo,
file_path: Path, transfer_type: str):
""" """
生成刮削文件 生成刮削文件
:param meta: 元数据 :param meta: 元数据
:param mediainfo: 媒体信息 :param mediainfo: 媒体信息
:param file_path: 文件路径或者目录路径 :param file_path: 文件路径或者目录路径
:param transfer_type: 转输类型
""" """
self._transfer_type = transfer_type
try: try:
# 电影 # 电影
if mediainfo.type == MediaType.MOVIE: if mediainfo.type == MediaType.MOVIE:
@ -154,8 +163,7 @@ class DoubanScraper:
# 保存 # 保存
self.__save_nfo(doc, season_path.joinpath("season.nfo")) self.__save_nfo(doc, season_path.joinpath("season.nfo"))
@staticmethod def __save_image(self, url: str, file_path: Path):
def __save_image(url: str, file_path: Path):
""" """
下载图片并保存 下载图片并保存
""" """
@ -171,6 +179,9 @@ class DoubanScraper:
logger.info(f"正在下载{file_path.stem}图片:{url} ...") logger.info(f"正在下载{file_path.stem}图片:{url} ...")
r = RequestUtils().get_res(url=url) r = RequestUtils().get_res(url=url)
if r: if r:
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, r.content)
else:
file_path.write_bytes(r.content) file_path.write_bytes(r.content)
logger.info(f"图片已保存:{file_path}") logger.info(f"图片已保存:{file_path}")
else: else:
@ -178,13 +189,29 @@ class DoubanScraper:
except Exception as err: except Exception as err:
logger.error(f"{file_path.stem}图片下载失败:{err}") logger.error(f"{file_path.stem}图片下载失败:{err}")
@staticmethod def __save_nfo(self, doc, file_path: Path):
def __save_nfo(doc, file_path: Path):
""" """
保存NFO 保存NFO
""" """
if file_path.exists(): if file_path.exists():
return return
xml_str = doc.toprettyxml(indent=" ", encoding="utf-8") xml_str = doc.toprettyxml(indent=" ", encoding="utf-8")
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, xml_str)
else:
file_path.write_bytes(xml_str) file_path.write_bytes(xml_str)
logger.info(f"NFO文件已保存{file_path}") logger.info(f"NFO文件已保存{file_path}")
def __save_remove_file(self, out_file: Path, content: Union[str, bytes]):
"""
保存文件到远端
"""
temp_file = settings.TEMP_PATH / str(out_file)[1:]
temp_file_dir = temp_file.parent
if not temp_file_dir.exists():
temp_file_dir.mkdir(parents=True, exist_ok=True)
temp_file.write_bytes(content)
if self._transfer_type == 'rclone_move':
SystemUtils.rclone_move(temp_file, out_file)
elif self._transfer_type == 'rclone_copy':
SystemUtils.rclone_copy(temp_file, out_file)

View File

@ -389,6 +389,7 @@ class FileTransferModule(_ModuleBase):
return TransferInfo(success=False, return TransferInfo(success=False,
path=in_path, path=in_path,
message=f"{target_dir} 目标路径不存在") message=f"{target_dir} 目标路径不存在")
# 媒体库目的目录 # 媒体库目的目录
target_dir = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_dir) target_dir = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_dir)

View File

@ -187,11 +187,12 @@ class TheMovieDbModule(_ModuleBase):
return [MediaInfo(tmdb_info=info) for info in results] return [MediaInfo(tmdb_info=info) for info in results]
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
""" """
刮削元数据 刮削元数据
:param path: 媒体文件路径 :param path: 媒体文件路径
:param mediainfo: 识别的媒体信息 :param mediainfo: 识别的媒体信息
:param transfer_type: 转移类型
:return: 成功或失败 :return: 成功或失败
""" """
if settings.SCRAP_SOURCE != "themoviedb": if settings.SCRAP_SOURCE != "themoviedb":
@ -202,12 +203,14 @@ class TheMovieDbModule(_ModuleBase):
logger.info(f"开始刮削蓝光原盘:{path} ...") logger.info(f"开始刮削蓝光原盘:{path} ...")
scrape_path = path / path.name scrape_path = path / path.name
self.scraper.gen_scraper_files(mediainfo=mediainfo, self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=scrape_path) file_path=scrape_path,
transfer_type=transfer_type)
elif path.is_file(): elif path.is_file():
# 单个文件 # 单个文件
logger.info(f"开始刮削媒体库文件:{path} ...") logger.info(f"开始刮削媒体库文件:{path} ...")
self.scraper.gen_scraper_files(mediainfo=mediainfo, self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=path) file_path=path,
transfer_type=transfer_type)
else: else:
# 目录下的所有文件 # 目录下的所有文件
logger.info(f"开始刮削目录:{path} ...") logger.info(f"开始刮削目录:{path} ...")
@ -215,7 +218,8 @@ class TheMovieDbModule(_ModuleBase):
if not file: if not file:
continue continue
self.scraper.gen_scraper_files(mediainfo=mediainfo, self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=file) file_path=file,
transfer_type=transfer_type)
logger.info(f"{path} 刮削完成") logger.info(f"{path} 刮削完成")
def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str, with_original_language: str, def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str, with_original_language: str,

View File

@ -1,5 +1,6 @@
import time import time
from pathlib import Path from pathlib import Path
from typing import Union
from xml.dom import minidom from xml.dom import minidom
from requests import RequestException from requests import RequestException
@ -12,21 +13,26 @@ from app.schemas.types import MediaType
from app.utils.common import retry from app.utils.common import retry
from app.utils.dom import DomUtils from app.utils.dom import DomUtils
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class TmdbScraper: class TmdbScraper:
tmdb = None tmdb = None
_transfer_type = settings.TRANSFER_TYPE
def __init__(self, tmdb): def __init__(self, tmdb):
self.tmdb = tmdb self.tmdb = tmdb
def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path): def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path, transfer_type: str):
""" """
生成刮削文件包括NFO和图片传入路径为文件路径 生成刮削文件包括NFO和图片传入路径为文件路径
:param mediainfo: 媒体信息 :param mediainfo: 媒体信息
:param file_path: 文件路径或者目录路径 :param file_path: 文件路径或者目录路径
:param transfer_type: 传输类型
""" """
self._transfer_type = transfer_type
def __get_episode_detail(_seasoninfo: dict, _episode: int): def __get_episode_detail(_seasoninfo: dict, _episode: int):
""" """
根据季信息获取集的信息 根据季信息获取集的信息
@ -328,9 +334,8 @@ class TmdbScraper:
# 保存文件 # 保存文件
self.__save_nfo(doc, file_path.with_suffix(".nfo")) self.__save_nfo(doc, file_path.with_suffix(".nfo"))
@staticmethod
@retry(RequestException, logger=logger) @retry(RequestException, logger=logger)
def __save_image(url: str, file_path: Path): def __save_image(self, url: str, file_path: Path):
""" """
下载图片并保存 下载图片并保存
""" """
@ -340,6 +345,9 @@ class TmdbScraper:
logger.info(f"正在下载{file_path.stem}图片:{url} ...") logger.info(f"正在下载{file_path.stem}图片:{url} ...")
r = RequestUtils().get_res(url=url, raise_exception=True) r = RequestUtils().get_res(url=url, raise_exception=True)
if r: if r:
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, r.content)
else:
file_path.write_bytes(r.content) file_path.write_bytes(r.content)
logger.info(f"图片已保存:{file_path}") logger.info(f"图片已保存:{file_path}")
else: else:
@ -349,13 +357,29 @@ class TmdbScraper:
except Exception as err: except Exception as err:
logger.error(f"{file_path.stem}图片下载失败:{err}") logger.error(f"{file_path.stem}图片下载失败:{err}")
@staticmethod def __save_nfo(self, doc, file_path: Path):
def __save_nfo(doc, file_path: Path):
""" """
保存NFO 保存NFO
""" """
if file_path.exists(): if file_path.exists():
return return
xml_str = doc.toprettyxml(indent=" ", encoding="utf-8") xml_str = doc.toprettyxml(indent=" ", encoding="utf-8")
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, xml_str)
else:
file_path.write_bytes(xml_str) file_path.write_bytes(xml_str)
logger.info(f"NFO文件已保存{file_path}") logger.info(f"NFO文件已保存{file_path}")
def __save_remove_file(self, out_file: Path, content: Union[str, bytes]):
"""
保存文件到远端
"""
temp_file = settings.TEMP_PATH / str(out_file)[1:]
temp_file_dir = temp_file.parent
if not temp_file_dir.exists():
temp_file_dir.mkdir(parents=True, exist_ok=True)
temp_file.write_bytes(content)
if self._transfer_type == 'rclone_move':
SystemUtils.rclone_move(temp_file, out_file)
elif self._transfer_type == 'rclone_copy':
SystemUtils.rclone_copy(temp_file, out_file)

View File

@ -350,7 +350,8 @@ class DirMonitor(_PluginBase):
# 刮削单个文件 # 刮削单个文件
if settings.SCRAP_METADATA: if settings.SCRAP_METADATA:
self.chain.scrape_metadata(path=transferinfo.target_path, self.chain.scrape_metadata(path=transferinfo.target_path,
mediainfo=mediainfo) mediainfo=mediainfo,
transfer_type=transfer_type)
""" """
{ {

View File

@ -390,7 +390,7 @@ class LibraryScraper(_PluginBase):
# 刮削单个文件 # 刮削单个文件
if scrap_metadata: if scrap_metadata:
self.chain.scrape_metadata(path=file, mediainfo=mediainfo) self.chain.scrape_metadata(path=file, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE)
@staticmethod @staticmethod
def __get_tmdbid_from_nfo(file_path: Path): def __get_tmdbid_from_nfo(file_path: Path):

View File

@ -1 +1 @@
APP_VERSION = 'v1.3.2-1' APP_VERSION = 'v1.3.3'