diff --git a/app/chain/__init__.py b/app/chain/__init__.py index fafba887..c63cc00c 100644 --- a/app/chain/__init__.py +++ b/app/chain/__init__.py @@ -398,14 +398,15 @@ class ChainBase(metaclass=ABCMeta): """ return self.run_module("post_torrents_message", message=message, torrents=torrents) - def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: + def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None: """ 刮削元数据 :param path: 媒体文件路径 :param mediainfo: 识别的媒体信息 + :param transfer_type: 转移模式 :return: 成功或失败 """ - self.run_module("scrape_metadata", path=path, mediainfo=mediainfo) + self.run_module("scrape_metadata", path=path, mediainfo=mediainfo, transfer_type=transfer_type) def register_commands(self, commands: Dict[str, dict]) -> None: """ diff --git a/app/chain/transfer.py b/app/chain/transfer.py index ffedca53..0d2b7eab 100644 --- a/app/chain/transfer.py +++ b/app/chain/transfer.py @@ -1,4 +1,3 @@ -import glob import re import shutil import threading @@ -358,7 +357,9 @@ class TransferChain(ChainBase): ) # 刮削单个文件 if settings.SCRAP_METADATA: - self.scrape_metadata(path=transferinfo.target_path, mediainfo=file_mediainfo) + self.scrape_metadata(path=transferinfo.target_path, + mediainfo=file_mediainfo, + transfer_type=transfer_type) # 更新进度 processed_num += 1 self.progress.update(value=processed_num / total_num * 100, diff --git a/app/modules/douban/__init__.py b/app/modules/douban/__init__.py index be953348..1ac48b64 100644 --- a/app/modules/douban/__init__.py +++ b/app/modules/douban/__init__.py @@ -470,11 +470,12 @@ class DoubanModule(_ModuleBase): return [] return infos.get("subject_collection_items") - def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: + def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None: """ 刮削元数据 :param path: 媒体文件路径 :param mediainfo: 识别的媒体信息 + :param transfer_type: 传输类型 :return: 成功或失败 """ if settings.SCRAP_SOURCE != "douban": @@ -500,7 +501,8 @@ class DoubanModule(_ModuleBase): scrape_path = path / path.name self.scraper.gen_scraper_files(meta=meta, mediainfo=MediaInfo(douban_info=doubaninfo), - file_path=scrape_path) + file_path=scrape_path, + transfer_type=transfer_type) else: # 目录下的所有文件 for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT): @@ -525,7 +527,8 @@ class DoubanModule(_ModuleBase): # 刮削 self.scraper.gen_scraper_files(meta=meta, mediainfo=MediaInfo(douban_info=doubaninfo), - file_path=file) + file_path=file, + transfer_type=transfer_type) except Exception as e: logger.error(f"刮削文件 {file} 失败,原因:{e}") logger.info(f"{path} 刮削完成") diff --git a/app/modules/douban/scraper.py b/app/modules/douban/scraper.py index 20fbd340..ce901fca 100644 --- a/app/modules/douban/scraper.py +++ b/app/modules/douban/scraper.py @@ -1,25 +1,34 @@ import time from pathlib import Path +from typing import Union from xml.dom import minidom +from app.core.config import settings from app.core.context import MediaInfo from app.core.meta import MetaBase from app.log import logger from app.schemas.types import MediaType from app.utils.dom import DomUtils from app.utils.http import RequestUtils +from app.utils.system import SystemUtils class DoubanScraper: - def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo, file_path: Path): + _transfer_type = settings.TRANSFER_TYPE + + def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo, + file_path: Path, transfer_type: str): """ 生成刮削文件 :param meta: 元数据 :param mediainfo: 媒体信息 :param file_path: 文件路径或者目录路径 + :param transfer_type: 转输类型 """ + self._transfer_type = transfer_type + try: # 电影 if mediainfo.type == MediaType.MOVIE: @@ -154,8 +163,7 @@ class DoubanScraper: # 保存 self.__save_nfo(doc, season_path.joinpath("season.nfo")) - @staticmethod - def __save_image(url: str, file_path: Path): + def __save_image(self, url: str, file_path: Path): """ 下载图片并保存 """ @@ -171,20 +179,39 @@ class DoubanScraper: logger.info(f"正在下载{file_path.stem}图片:{url} ...") r = RequestUtils().get_res(url=url) if r: - file_path.write_bytes(r.content) + if self._transfer_type in ['rclone_move', 'rclone_copy']: + self.__save_remove_file(file_path, r.content) + else: + file_path.write_bytes(r.content) logger.info(f"图片已保存:{file_path}") else: logger.info(f"{file_path.stem}图片下载失败,请检查网络连通性") except Exception as err: logger.error(f"{file_path.stem}图片下载失败:{err}") - @staticmethod - def __save_nfo(doc, file_path: Path): + def __save_nfo(self, doc, file_path: Path): """ 保存NFO """ if file_path.exists(): return xml_str = doc.toprettyxml(indent=" ", encoding="utf-8") - file_path.write_bytes(xml_str) + if self._transfer_type in ['rclone_move', 'rclone_copy']: + self.__save_remove_file(file_path, xml_str) + else: + file_path.write_bytes(xml_str) logger.info(f"NFO文件已保存:{file_path}") + + def __save_remove_file(self, out_file: Path, content: Union[str, bytes]): + """ + 保存文件到远端 + """ + temp_file = settings.TEMP_PATH / str(out_file)[1:] + temp_file_dir = temp_file.parent + if not temp_file_dir.exists(): + temp_file_dir.mkdir(parents=True, exist_ok=True) + temp_file.write_bytes(content) + if self._transfer_type == 'rclone_move': + SystemUtils.rclone_move(temp_file, out_file) + elif self._transfer_type == 'rclone_copy': + SystemUtils.rclone_copy(temp_file, out_file) diff --git a/app/modules/themoviedb/__init__.py b/app/modules/themoviedb/__init__.py index 87f7a7f4..def52a95 100644 --- a/app/modules/themoviedb/__init__.py +++ b/app/modules/themoviedb/__init__.py @@ -187,11 +187,12 @@ class TheMovieDbModule(_ModuleBase): return [MediaInfo(tmdb_info=info) for info in results] - def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None: + def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None: """ 刮削元数据 :param path: 媒体文件路径 :param mediainfo: 识别的媒体信息 + :param transfer_type: 转移类型 :return: 成功或失败 """ if settings.SCRAP_SOURCE != "themoviedb": @@ -202,12 +203,14 @@ class TheMovieDbModule(_ModuleBase): logger.info(f"开始刮削蓝光原盘:{path} ...") scrape_path = path / path.name self.scraper.gen_scraper_files(mediainfo=mediainfo, - file_path=scrape_path) + file_path=scrape_path, + transfer_type=transfer_type) elif path.is_file(): # 单个文件 logger.info(f"开始刮削媒体库文件:{path} ...") self.scraper.gen_scraper_files(mediainfo=mediainfo, - file_path=path) + file_path=path, + transfer_type=transfer_type) else: # 目录下的所有文件 logger.info(f"开始刮削目录:{path} ...") @@ -215,7 +218,8 @@ class TheMovieDbModule(_ModuleBase): if not file: continue self.scraper.gen_scraper_files(mediainfo=mediainfo, - file_path=file) + file_path=file, + transfer_type=transfer_type) logger.info(f"{path} 刮削完成") def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str, with_original_language: str, diff --git a/app/modules/themoviedb/scraper.py b/app/modules/themoviedb/scraper.py index 966343b3..5f709790 100644 --- a/app/modules/themoviedb/scraper.py +++ b/app/modules/themoviedb/scraper.py @@ -1,5 +1,6 @@ import time from pathlib import Path +from typing import Union from xml.dom import minidom from requests import RequestException @@ -12,21 +13,26 @@ from app.schemas.types import MediaType from app.utils.common import retry from app.utils.dom import DomUtils from app.utils.http import RequestUtils +from app.utils.system import SystemUtils class TmdbScraper: tmdb = None + _transfer_type = settings.TRANSFER_TYPE def __init__(self, tmdb): self.tmdb = tmdb - def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path): + def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path, transfer_type: str): """ 生成刮削文件,包括NFO和图片,传入路径为文件路径 :param mediainfo: 媒体信息 :param file_path: 文件路径或者目录路径 + :param transfer_type: 传输类型 """ + self._transfer_type = transfer_type + def __get_episode_detail(_seasoninfo: dict, _episode: int): """ 根据季信息获取集的信息 @@ -328,9 +334,8 @@ class TmdbScraper: # 保存文件 self.__save_nfo(doc, file_path.with_suffix(".nfo")) - @staticmethod @retry(RequestException, logger=logger) - def __save_image(url: str, file_path: Path): + def __save_image(self, url: str, file_path: Path): """ 下载图片并保存 """ @@ -340,7 +345,10 @@ class TmdbScraper: logger.info(f"正在下载{file_path.stem}图片:{url} ...") r = RequestUtils().get_res(url=url, raise_exception=True) if r: - file_path.write_bytes(r.content) + if self._transfer_type in ['rclone_move', 'rclone_copy']: + self.__save_remove_file(file_path, r.content) + else: + file_path.write_bytes(r.content) logger.info(f"图片已保存:{file_path}") else: logger.info(f"{file_path.stem}图片下载失败,请检查网络连通性") @@ -349,13 +357,29 @@ class TmdbScraper: except Exception as err: logger.error(f"{file_path.stem}图片下载失败:{err}") - @staticmethod - def __save_nfo(doc, file_path: Path): + def __save_nfo(self, doc, file_path: Path): """ 保存NFO """ if file_path.exists(): return xml_str = doc.toprettyxml(indent=" ", encoding="utf-8") - file_path.write_bytes(xml_str) + if self._transfer_type in ['rclone_move', 'rclone_copy']: + self.__save_remove_file(file_path, xml_str) + else: + file_path.write_bytes(xml_str) logger.info(f"NFO文件已保存:{file_path}") + + def __save_remove_file(self, out_file: Path, content: Union[str, bytes]): + """ + 保存文件到远端 + """ + temp_file = settings.TEMP_PATH / str(out_file)[1:] + temp_file_dir = temp_file.parent + if not temp_file_dir.exists(): + temp_file_dir.mkdir(parents=True, exist_ok=True) + temp_file.write_bytes(content) + if self._transfer_type == 'rclone_move': + SystemUtils.rclone_move(temp_file, out_file) + elif self._transfer_type == 'rclone_copy': + SystemUtils.rclone_copy(temp_file, out_file) diff --git a/app/plugins/dirmonitor/__init__.py b/app/plugins/dirmonitor/__init__.py index 28645285..b4efe583 100644 --- a/app/plugins/dirmonitor/__init__.py +++ b/app/plugins/dirmonitor/__init__.py @@ -350,7 +350,8 @@ class DirMonitor(_PluginBase): # 刮削单个文件 if settings.SCRAP_METADATA: self.chain.scrape_metadata(path=transferinfo.target_path, - mediainfo=mediainfo) + mediainfo=mediainfo, + transfer_type=transfer_type) """ { diff --git a/app/plugins/libraryscraper/__init__.py b/app/plugins/libraryscraper/__init__.py index 014841df..00beb143 100644 --- a/app/plugins/libraryscraper/__init__.py +++ b/app/plugins/libraryscraper/__init__.py @@ -390,7 +390,7 @@ class LibraryScraper(_PluginBase): # 刮削单个文件 if scrap_metadata: - self.chain.scrape_metadata(path=file, mediainfo=mediainfo) + self.chain.scrape_metadata(path=file, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE) @staticmethod def __get_tmdbid_from_nfo(file_path: Path):