diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 647a7bc1..e922ccce 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -96,6 +96,11 @@ jobs: New-Item -Path "nginx/temp/__keep__.txt" -ItemType File -Force New-Item -Path "nginx/logs" -ItemType Directory -Force New-Item -Path "nginx/logs/__keep__.txt" -ItemType File -Force + Invoke-WebRequest -Uri "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" -OutFile "MoviePilot-Plugins-main.zip" + Expand-Archive -Path "MoviePilot-Plugins-main.zip" -DestinationPath "MoviePilot-Plugins-main" + Move-Item -Path "MoviePilot-Plugins-main/MoviePilot-Plugins-main/plugins/*" -Destination "plugins/" + Remove-Item -Path "MoviePilot-Plugins-main.zip" + Remove-Item -Path "MoviePilot-Plugins-main" -Recurse -Force shell: pwsh - name: Pyinstaller diff --git a/Dockerfile b/Dockerfile index 7ef9c171..bbd34ebc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -76,7 +76,10 @@ RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \ && locale-gen zh_CN.UTF-8 \ && FRONTEND_VERSION=$(curl -sL "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) \ && curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \ - && mv /dist /public + && mv /dist /public \ + && curl -sL "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" | busybox unzip -d / - \ + && mv /MoviePilot-Plugins-main/plugins/* /app/app/plugins/ \ + && rm -rf /MoviePilot-Plugins-main EXPOSE 3000 VOLUME [ "/config" ] ENTRYPOINT [ "/entrypoint" ] diff --git a/app/plugins/autobackup/__init__.py b/app/plugins/autobackup/__init__.py deleted file mode 100644 index cd10eba3..00000000 --- a/app/plugins/autobackup/__init__.py +++ /dev/null @@ -1,342 +0,0 @@ -import glob -import os -import shutil -import time -from datetime import datetime, timedelta -from pathlib import Path - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app import schemas -from app.core.config import settings -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple, Optional -from app.log import logger -from app.schemas import NotificationType - - -class AutoBackup(_PluginBase): - # 插件名称 - plugin_name = "自动备份" - # 插件描述 - plugin_desc = "自动备份数据和配置文件。" - # 插件图标 - plugin_icon = "backup.png" - # 主题色 - plugin_color = "#4FB647" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "autobackup_" - # 加载顺序 - plugin_order = 17 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _cron = None - _cnt = None - _onlyonce = False - _notify = False - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._cnt = config.get("cnt") - self._notify = config.get("notify") - self._onlyonce = config.get("onlyonce") - - # 加载模块 - if self._enabled: - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - if self._cron: - try: - self._scheduler.add_job(func=self.__backup, - trigger=CronTrigger.from_crontab(self._cron), - name="自动备份") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - if self._onlyonce: - logger.info(f"自动备份服务启动,立即运行一次") - self._scheduler.add_job(func=self.__backup, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="自动备份") - # 关闭一次性开关 - self._onlyonce = False - self.update_config({ - "onlyonce": False, - "cron": self._cron, - "enabled": self._enabled, - "cnt": self._cnt, - "notify": self._notify, - }) - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __backup(self): - """ - 自动备份、删除备份 - """ - logger.info(f"当前时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} 开始备份") - - # docker用默认路径 - bk_path = self.get_data_path() - - # 备份 - zip_file = self.backup_file(bk_path=bk_path) - - if zip_file: - success = True - msg = f"备份完成 备份文件 {zip_file}" - logger.info(msg) - else: - success = False - msg = "创建备份失败" - logger.error(msg) - - # 清理备份 - bk_cnt = 0 - del_cnt = 0 - if self._cnt: - # 获取指定路径下所有以"bk"开头的文件,按照创建时间从旧到新排序 - files = sorted(glob.glob(f"{bk_path}/bk**"), key=os.path.getctime) - bk_cnt = len(files) - # 计算需要删除的文件数 - del_cnt = bk_cnt - int(self._cnt) - if del_cnt > 0: - logger.info( - f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 需要删除备份文件数量 {del_cnt}") - - # 遍历并删除最旧的几个备份 - for i in range(del_cnt): - os.remove(files[i]) - logger.debug(f"删除备份文件 {files[i]} 成功") - else: - logger.info( - f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 无需删除") - - # 发送通知 - if self._notify: - self.post_message( - mtype=NotificationType.SiteMessage, - title="【自动备份任务完成】", - text=f"创建备份{'成功' if zip_file else '失败'}\n" - f"清理备份数量 {del_cnt}\n" - f"剩余备份数量 {bk_cnt - del_cnt}") - - return success, msg - - @staticmethod - def backup_file(bk_path: Path = None): - """ - @param bk_path 自定义备份路径 - """ - try: - # 创建备份文件夹 - config_path = Path(settings.CONFIG_PATH) - backup_file = f"bk_{time.strftime('%Y%m%d%H%M%S')}" - backup_path = bk_path / backup_file - backup_path.mkdir(parents=True) - # 把现有的相关文件进行copy备份 - if settings.LIBRARY_CATEGORY: - shutil.copy(f'{config_path}/category.yaml', backup_path) - shutil.copy(f'{config_path}/user.db', backup_path) - - zip_file = str(backup_path) + '.zip' - if os.path.exists(zip_file): - zip_file = str(backup_path) + '.zip' - shutil.make_archive(str(backup_path), 'zip', str(backup_path)) - shutil.rmtree(str(backup_path)) - return zip_file - except IOError: - return None - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - return [{ - "path": "/backup", - "endpoint": self.__backup, - "methods": ["GET"], - "summary": "MoviePilot备份", - "description": "MoviePilot备份", - }] - - def backup(self) -> schemas.Response: - """ - API调用备份 - """ - success, msg = self.__backup() - return schemas.Response( - success=success, - message=msg - ) - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '开启通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '备份周期' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cnt', - 'label': '最大保留备份数' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '备份文件路径默认为本地映射的config/plugins/AutoBackup。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "request_method": "POST", - "webhook_url": "" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/autoclean/__init__.py b/app/plugins/autoclean/__init__.py deleted file mode 100644 index 5a10038c..00000000 --- a/app/plugins/autoclean/__init__.py +++ /dev/null @@ -1,591 +0,0 @@ -import time -from collections import defaultdict -from datetime import datetime, timedelta -from pathlib import Path - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.transfer import TransferChain -from app.core.config import settings -from app.core.event import eventmanager -from app.db.downloadhistory_oper import DownloadHistoryOper -from app.db.transferhistory_oper import TransferHistoryOper -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple, Optional -from app.log import logger -from app.schemas import NotificationType, DownloadHistory -from app.schemas.types import EventType - - -class AutoClean(_PluginBase): - # 插件名称 - plugin_name = "定时清理媒体库" - # 插件描述 - plugin_desc = "定时清理用户下载的种子、源文件、媒体库文件。" - # 插件图标 - plugin_icon = "clean.png" - # 主题色 - plugin_color = "#3377ed" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "autoclean_" - # 加载顺序 - plugin_order = 23 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _cron = None - _type = None - _onlyonce = False - _notify = False - _cleantype = None - _cleandate = None - _cleanuser = None - _downloadhis = None - _transferhis = None - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._onlyonce = config.get("onlyonce") - self._notify = config.get("notify") - self._cleantype = config.get("cleantype") - self._cleandate = config.get("cleandate") - self._cleanuser = config.get("cleanuser") - - # 加载模块 - if self._enabled: - self._downloadhis = DownloadHistoryOper() - self._transferhis = TransferHistoryOper() - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - if self._cron: - try: - self._scheduler.add_job(func=self.__clean, - trigger=CronTrigger.from_crontab(self._cron), - name="定时清理媒体库") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - if self._onlyonce: - logger.info(f"定时清理媒体库服务启动,立即运行一次") - self._scheduler.add_job(func=self.__clean, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="定时清理媒体库") - # 关闭一次性开关 - self._onlyonce = False - self.update_config({ - "onlyonce": False, - "cron": self._cron, - "cleantype": self._cleantype, - "cleandate": self._cleandate, - "enabled": self._enabled, - "cleanuser": self._cleanuser, - "notify": self._notify, - }) - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __get_clean_date(self, deltatime: str = None): - # 清理日期 - current_time = datetime.now() - if deltatime: - days_ago = current_time - timedelta(days=int(deltatime)) - else: - days_ago = current_time - timedelta(days=int(self._cleandate)) - return days_ago.strftime("%Y-%m-%d") - - def __clean(self): - """ - 定时清理媒体库 - """ - if not self._cleandate: - logger.error("未配置媒体库全局清理时间,停止运行") - return - - # 查询用户清理日期之前的下载历史,不填默认清理全部用户的下载 - if not self._cleanuser: - clean_date = self.__get_clean_date() - downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date) - logger.info(f'获取到日期 {clean_date} 之前的下载历史 {len(downloadhis_list)} 条') - self.__clean_history(date=clean_date, clean_type=self._cleantype, downloadhis_list=downloadhis_list) - - # 根据填写的信息判断怎么清理 - else: - # username:days#cleantype - clean_type = self._cleantype - clean_date = self._cleandate - - # 1.3.7版本及之前处理多位用户 - if str(self._cleanuser).count(','): - for username in str(self._cleanuser).split(","): - downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date, - username=username) - logger.info( - f'获取到用户 {username} 日期 {clean_date} 之前的下载历史 {len(downloadhis_list)} 条') - self.__clean_history(date=clean_date, clean_type=self._cleantype, downloadhis_list=downloadhis_list) - return - - for userinfo in str(self._cleanuser).split("\n"): - if userinfo.count('#'): - clean_type = userinfo.split('#')[1] - username_and_days = userinfo.split('#')[0] - else: - username_and_days = userinfo - if username_and_days.count(':'): - clean_date = username_and_days.split(':')[1] - username = username_and_days.split(':')[0] - else: - username = userinfo - - # 转strftime - clean_date = self.__get_clean_date(clean_date) - logger.info(f'{username} 使用 {clean_type} 清理方式,清理 {clean_date} 之前的下载历史') - downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date, - username=username) - logger.info( - f'获取到用户 {username} 日期 {clean_date} 之前的下载历史 {len(downloadhis_list)} 条') - self.__clean_history(date=clean_date, clean_type=clean_type, - downloadhis_list=downloadhis_list) - - def __clean_history(self, date: str, clean_type: str, downloadhis_list: List[DownloadHistory]): - """ - 清理下载历史、转移记录 - """ - if not downloadhis_list: - logger.warn(f"未获取到日期 {date} 之前的下载记录,停止运行") - return - - # 读取历史记录 - pulgin_history = self.get_data('history') or [] - - # 创建一个字典来保存分组结果 - downloadhis_grouped_dict: Dict[tuple, List[DownloadHistory]] = defaultdict(list) - # 遍历DownloadHistory对象列表 - for downloadhis in downloadhis_list: - # 获取type和tmdbid的值 - dtype = downloadhis.type - tmdbid = downloadhis.tmdbid - - # 将DownloadHistory对象添加到对应分组的列表中 - downloadhis_grouped_dict[(dtype, tmdbid)].append(downloadhis) - - # 输出分组结果 - for key, downloadhis_list in downloadhis_grouped_dict.items(): - logger.info(f"开始清理 {key}") - del_transferhis_cnt = 0 - del_media_name = downloadhis_list[0].title - del_media_user = downloadhis_list[0].username - del_media_type = downloadhis_list[0].type - del_media_year = downloadhis_list[0].year - del_media_season = downloadhis_list[0].seasons - del_media_episode = downloadhis_list[0].episodes - del_image = downloadhis_list[0].image - for downloadhis in downloadhis_list: - if not downloadhis.download_hash: - logger.debug(f'下载历史 {downloadhis.id} {downloadhis.title} 未获取到download_hash,跳过处理') - continue - # 根据hash获取转移记录 - transferhis_list = self._transferhis.list_by_hash(download_hash=downloadhis.download_hash) - if not transferhis_list: - logger.warn(f"下载历史 {downloadhis.download_hash} 未查询到转移记录,跳过处理") - continue - - for history in transferhis_list: - # 册除媒体库文件 - if clean_type in ["dest", "all"]: - TransferChain().delete_files(Path(history.dest)) - # 删除记录 - self._transferhis.delete(history.id) - # 删除源文件 - if clean_type in ["src", "all"]: - TransferChain().delete_files(Path(history.src)) - # 发送事件 - eventmanager.send_event( - EventType.DownloadFileDeleted, - { - "src": history.src - } - ) - - # 累加删除数量 - del_transferhis_cnt += len(transferhis_list) - - if del_transferhis_cnt: - # 发送消息 - if self._notify: - self.post_message( - mtype=NotificationType.MediaServer, - title="【定时清理媒体库任务完成】", - text=f"清理媒体名称 {del_media_name}\n" - f"下载媒体用户 {del_media_user}\n" - f"删除历史记录 {del_transferhis_cnt}") - - pulgin_history.append({ - "type": del_media_type, - "title": del_media_name, - "year": del_media_year, - "season": del_media_season, - "episode": del_media_episode, - "image": del_image, - "del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) - }) - - # 保存历史 - self.save_data("history", pulgin_history) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '开启通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '0 0 ? ? ?' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'cleantype', - 'label': '全局清理方式', - 'items': [ - {'title': '媒体库文件', 'value': 'dest'}, - {'title': '源文件', 'value': 'src'}, - {'title': '所有文件', 'value': 'all'}, - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cleandate', - 'label': '全局清理日期', - 'placeholder': '清理多少天之前的下载记录(天)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'cleanuser', - 'label': '清理配置', - 'rows': 6, - 'placeholder': '每一行一个配置,支持以下几种配置方式,清理方式支持 src、desc、all 分别对应源文件,媒体库文件,所有文件\n' - '用户名缺省默认清理所有用户(慎重留空),清理天数缺省默认使用全局清理天数,清理方式缺省默认使用全局清理方式\n' - '用户名/插件名(豆瓣想看、豆瓣榜单、RSS订阅)\n' - '用户名#清理方式\n' - '用户名:清理天数\n' - '用户名:清理天数#清理方式', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "notify": False, - "cleantype": "dest", - "cron": "", - "cleanuser": "", - "cleandate": 30 - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('del_time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - htype = history.get("type") - title = history.get("title") - year = history.get("year") - season = history.get("season") - episode = history.get("episode") - image = history.get("image") - del_time = history.get("del_time") - - if season: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'年份:{year}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'季:{season}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'集:{episode}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - else: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'年份:{year}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': image, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': sub_contents - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/autosignin/__init__.py b/app/plugins/autosignin/__init__.py deleted file mode 100644 index 26a172e9..00000000 --- a/app/plugins/autosignin/__init__.py +++ /dev/null @@ -1,1043 +0,0 @@ -import re -import traceback -from datetime import datetime, timedelta -from multiprocessing.dummy import Pool as ThreadPool -from multiprocessing.pool import ThreadPool -from typing import Any, List, Dict, Tuple, Optional -from urllib.parse import urljoin - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from ruamel.yaml import CommentedMap - -from app import schemas -from app.core.config import settings -from app.core.event import EventManager, eventmanager, Event -from app.db.site_oper import SiteOper -from app.helper.browser import PlaywrightHelper -from app.helper.cloudflare import under_challenge -from app.helper.module import ModuleHelper -from app.helper.sites import SitesHelper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType, NotificationType -from app.utils.http import RequestUtils -from app.utils.site import SiteUtils -from app.utils.string import StringUtils -from app.utils.timer import TimerUtils - - -class AutoSignIn(_PluginBase): - # 插件名称 - plugin_name = "站点自动签到" - # 插件描述 - plugin_desc = "自动模拟登录站点、签到。" - # 插件图标 - plugin_icon = "signin.png" - # 主题色 - plugin_color = "#4179F4" - # 插件版本 - plugin_version = "1.1" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "autosignin_" - # 加载顺序 - plugin_order = 0 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - sites: SitesHelper = None - siteoper: SiteOper = None - # 事件管理器 - event: EventManager = None - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - # 加载的模块 - _site_schema: list = [] - - # 配置属性 - _enabled: bool = False - _cron: str = "" - _onlyonce: bool = False - _notify: bool = False - _queue_cnt: int = 5 - _sign_sites: list = [] - _login_sites: list = [] - _retry_keyword = None - _clean: bool = False - _start_time: int = None - _end_time: int = None - _auto_cf: int = 0 - - def init_plugin(self, config: dict = None): - self.sites = SitesHelper() - self.siteoper = SiteOper() - self.event = EventManager() - - # 停止现有任务 - self.stop_service() - - # 配置 - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._onlyonce = config.get("onlyonce") - self._notify = config.get("notify") - self._queue_cnt = config.get("queue_cnt") or 5 - self._sign_sites = config.get("sign_sites") or [] - self._login_sites = config.get("login_sites") or [] - self._retry_keyword = config.get("retry_keyword") - self._auto_cf = config.get("auto_cf") - self._clean = config.get("clean") - - # 过滤掉已删除的站点 - all_sites = [site.id for site in self.siteoper.list_order_by_pri()] + [site.get("id") for site in - self.__custom_sites()] - self._sign_sites = [site_id for site_id in all_sites if site_id in self._sign_sites] - self._login_sites = [site_id for site_id in all_sites if site_id in self._login_sites] - # 保存配置 - self.__update_config() - - # 加载模块 - if self._enabled or self._onlyonce: - - self._site_schema = ModuleHelper.load('app.plugins.autosignin.sites', - filter_func=lambda _, obj: hasattr(obj, 'match')) - - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - # 立即运行一次 - if self._onlyonce: - logger.info("站点自动签到服务启动,立即运行一次") - self._scheduler.add_job(func=self.sign_in, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="站点自动签到") - - # 关闭一次性开关 - self._onlyonce = False - # 保存配置 - self.__update_config() - - # 周期运行 - if self._enabled: - if self._cron: - try: - if str(self._cron).strip().count(" ") == 4: - self._scheduler.add_job(func=self.sign_in, - trigger=CronTrigger.from_crontab(self._cron), - name="站点自动签到") - logger.info(f"站点自动签到服务启动,执行周期 {self._cron}") - else: - # 2.3/9-23 - crons = str(self._cron).strip().split("/") - if len(crons) == 2: - # 2.3 - cron = crons[0] - # 9-23 - times = crons[1].split("-") - if len(times) == 2: - # 9 - self._start_time = int(times[0]) - # 23 - self._end_time = int(times[1]) - if self._start_time and self._end_time: - self._scheduler.add_job(func=self.sign_in, - trigger="interval", - hours=float(str(cron).strip()), - name="站点自动签到") - logger.info( - f"站点自动签到服务启动,执行周期 {self._start_time}点-{self._end_time}点 每{cron}小时执行一次") - else: - logger.error("站点自动签到服务启动失败,周期格式错误") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误") - self._cron = "" - self._enabled = False - self.__update_config() - else: - # 默认0-24 按照周期运行 - self._start_time = 0 - self._end_time = 24 - self._scheduler.add_job(func=self.sign_in, - trigger="interval", - hours=float(str(self._cron).strip()), - name="站点自动签到") - logger.info( - f"站点自动签到服务启动,执行周期 {self._start_time}点-{self._end_time}点 每{self._cron}小时执行一次") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - self._cron = "" - self._enabled = False - self.__update_config() - else: - # 随机时间 - triggers = TimerUtils.random_scheduler(num_executions=2, - begin_hour=9, - end_hour=23, - max_interval=6 * 60, - min_interval=2 * 60) - for trigger in triggers: - self._scheduler.add_job(self.sign_in, "cron", - hour=trigger.hour, minute=trigger.minute, - name="站点自动签到") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - def __update_config(self): - # 保存配置 - self.update_config( - { - "enabled": self._enabled, - "notify": self._notify, - "cron": self._cron, - "onlyonce": self._onlyonce, - "queue_cnt": self._queue_cnt, - "sign_sites": self._sign_sites, - "login_sites": self._login_sites, - "retry_keyword": self._retry_keyword, - "auto_cf": self._auto_cf, - "clean": self._clean, - } - ) - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - return [{ - "cmd": "/site_signin", - "event": EventType.SiteSignin, - "desc": "站点签到", - "category": "站点", - "data": {} - }] - - def get_api(self) -> List[Dict[str, Any]]: - """ - 获取插件API - [{ - "path": "/xx", - "endpoint": self.xxx, - "methods": ["GET", "POST"], - "summary": "API说明" - }] - """ - return [{ - "path": "/signin_by_domain", - "endpoint": self.signin_by_domain, - "methods": ["GET"], - "summary": "站点签到", - "description": "使用站点域名签到站点", - }] - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - # 站点的可选项(内置站点 + 自定义站点) - customSites = self.__custom_sites() - - site_options = ([{"title": site.name, "value": site.id} - for site in self.siteoper.list_order_by_pri()] - + [{"title": site.get("name"), "value": site.get("id")} - for site in customSites]) - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clean', - 'label': '清理本日缓存', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'queue_cnt', - 'label': '队列数量' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'retry_keyword', - 'label': '重试关键词', - 'placeholder': '支持正则表达式,命中才重签' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'auto_cf', - 'label': '自动优选', - 'placeholder': '命中重试关键词次数(0-关闭)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'sign_sites', - 'label': '签到站点', - 'items': site_options - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'login_sites', - 'label': '登录站点', - 'items': site_options - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '执行周期支持:' - '1、5位cron表达式;' - '2、配置间隔(小时),如2.3/9-23(9-23点之间每隔2.3小时执行一次);' - '3、周期不填默认9-23点随机执行2次。' - '每天首次全量执行,其余执行命中重试关键词的站点。' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '自动优选:0-关闭,命中重试关键词次数大于该数量时自动执行Cloudflare IP优选(需要开启且则正确配置Cloudflare IP优选插件和自定义Hosts插件)' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "cron": "", - "auto_cf": 0, - "onlyonce": False, - "clean": False, - "queue_cnt": 5, - "sign_sites": [], - "login_sites": [], - "retry_keyword": "错误|失败" - } - - def __custom_sites(self) -> List[Any]: - custom_sites = [] - custom_sites_config = self.get_config("CustomSites") - if custom_sites_config and custom_sites_config.get("enabled"): - custom_sites = custom_sites_config.get("sites") - return custom_sites - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 最近两天的日期数组 - date_list = [(datetime.now() - timedelta(days=i)).date() for i in range(2)] - # 最近一天的签到数据 - current_day = "" - sign_data = [] - for day in date_list: - current_day = f"{day.month}月{day.day}日" - sign_data = self.get_data(current_day) - if sign_data: - break - if sign_data: - contents = [ - { - 'component': 'tr', - 'props': { - 'class': 'text-sm' - }, - 'content': [ - { - 'component': 'td', - 'props': { - 'class': 'whitespace-nowrap break-keep text-high-emphasis' - }, - 'text': current_day - }, - { - 'component': 'td', - 'text': data.get("site") - }, - { - 'component': 'td', - 'text': data.get("status") - } - ] - } for data in sign_data - ] - else: - contents = [ - { - 'component': 'tr', - 'props': { - 'class': 'text-sm' - }, - 'content': [ - { - 'component': 'td', - 'props': { - 'colspan': 3, - 'class': 'text-center' - }, - 'text': '暂无数据' - } - ] - } - ] - return [ - { - 'component': 'VTable', - 'props': { - 'hover': True - }, - 'content': [ - { - 'component': 'thead', - 'content': [ - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '日期' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '站点' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '状态' - } - ] - }, - { - 'component': 'tbody', - 'content': contents - } - ] - } - ] - - @eventmanager.register(EventType.SiteSignin) - def sign_in(self, event: Event = None): - """ - 自动签到|模拟登陆 - """ - # 日期 - today = datetime.today() - if self._start_time and self._end_time: - if int(datetime.today().hour) < self._start_time or int(datetime.today().hour) > self._end_time: - logger.error( - f"当前时间 {int(datetime.today().hour)} 不在 {self._start_time}-{self._end_time} 范围内,暂不执行任务") - return - if event: - logger.info("收到命令,开始站点签到 ...") - self.post_message(channel=event.event_data.get("channel"), - title="开始站点签到 ...", - userid=event.event_data.get("user")) - - if self._sign_sites: - self.__do(today=today, type="签到", do_sites=self._sign_sites, event=event) - if self._login_sites: - self.__do(today=today, type="登录", do_sites=self._login_sites, event=event) - - def __do(self, today: datetime, type: str, do_sites: list, event: Event = None): - """ - 签到逻辑 - """ - yesterday = today - timedelta(days=1) - yesterday_str = yesterday.strftime('%Y-%m-%d') - # 删除昨天历史 - self.del_data(key=type + "-" + yesterday_str) - self.del_data(key=f"{yesterday.month}月{yesterday.day}日") - - # 查看今天有没有签到|登录历史 - today = today.strftime('%Y-%m-%d') - today_history = self.get_data(key=type + "-" + today) - - # 查询所有站点 - all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites() - # 过滤掉没有选中的站点 - if do_sites: - do_sites = [site for site in all_sites if site.get("id") in do_sites] - else: - do_sites = all_sites - - # 今日没数据 - if not today_history or self._clean: - logger.info(f"今日 {today} 未{type},开始{type}已选站点") - if self._clean: - # 关闭开关 - self._clean = False - else: - # 需要重试站点 - retry_sites = today_history.get("retry") or [] - # 今天已签到|登录站点 - already_sites = today_history.get("do") or [] - - # 今日未签|登录站点 - no_sites = [site for site in do_sites if - site.get("id") not in already_sites or site.get("id") in retry_sites] - - if not no_sites: - logger.info(f"今日 {today} 已{type},无重新{type}站点,本次任务结束") - return - - # 任务站点 = 需要重试+今日未do - do_sites = no_sites - logger.info(f"今日 {today} 已{type},开始重试命中关键词站点") - - if not do_sites: - logger.info(f"没有需要{type}的站点") - return - - # 执行签到 - logger.info(f"开始执行{type}任务 ...") - if type == "签到": - with ThreadPool(min(len(do_sites), int(self._queue_cnt))) as p: - status = p.map(self.signin_site, do_sites) - else: - with ThreadPool(min(len(do_sites), int(self._queue_cnt))) as p: - status = p.map(self.login_site, do_sites) - - if status: - logger.info(f"站点{type}任务完成!") - # 获取今天的日期 - key = f"{datetime.now().month}月{datetime.now().day}日" - today_data = self.get_data(key) - if today_data: - if not isinstance(today_data, list): - today_data = [today_data] - for s in status: - today_data.append({ - "site": s[0], - "status": s[1] - }) - else: - today_data = [{ - "site": s[0], - "status": s[1] - } for s in status] - # 保存数据 - self.save_data(key, today_data) - - # 命中重试词的站点id - retry_sites = [] - # 命中重试词的站点签到msg - retry_msg = [] - # 登录成功 - login_success_msg = [] - # 签到成功 - sign_success_msg = [] - # 已签到 - already_sign_msg = [] - # 仿真签到成功 - fz_sign_msg = [] - # 失败|错误 - failed_msg = [] - - sites = {site.get('name'): site.get("id") for site in self.sites.get_indexers() if not site.get("public")} - for s in status: - site_name = s[0] - site_id = None - if site_name: - site_id = sites.get(site_name) - - if 'Cookie已失效' in str(s) and site_id: - # 触发自动登录插件登录 - logger.info(f"触发站点 {site_name} 自动登录更新Cookie和Ua") - self.eventmanager.send_event(EventType.SiteLogin, - { - "site_id": site_id - }) - # 记录本次命中重试关键词的站点 - if self._retry_keyword: - if site_id: - match = re.search(self._retry_keyword, s[1]) - if match: - logger.debug(f"站点 {site_name} 命中重试关键词 {self._retry_keyword}") - retry_sites.append(site_id) - # 命中的站点 - retry_msg.append(s) - continue - - if "登录成功" in str(s): - login_success_msg.append(s) - elif "仿真签到成功" in str(s): - fz_sign_msg.append(s) - continue - elif "签到成功" in str(s): - sign_success_msg.append(s) - elif '已签到' in str(s): - already_sign_msg.append(s) - else: - failed_msg.append(s) - - if not self._retry_keyword: - # 没设置重试关键词则重试已选站点 - retry_sites = self._sign_sites if type == "签到" else self._login_sites - logger.debug(f"下次{type}重试站点 {retry_sites}") - - # 存入历史 - self.save_data(key=type + "-" + today, - value={ - "do": self._sign_sites if type == "签到" else self._login_sites, - "retry": retry_sites - }) - - # 自动Cloudflare IP优选 - if self._auto_cf and int(self._auto_cf) > 0 and retry_msg and len(retry_msg) >= int(self._auto_cf): - self.eventmanager.send_event(EventType.CloudFlareSpeedTest, {}) - - # 发送通知 - if self._notify: - # 签到详细信息 登录成功、签到成功、已签到、仿真签到成功、失败--命中重试 - signin_message = login_success_msg + sign_success_msg + already_sign_msg + fz_sign_msg + failed_msg - if len(retry_msg) > 0: - signin_message += retry_msg - - signin_message = "\n".join([f'【{s[0]}】{s[1]}' for s in signin_message if s]) - self.post_message(title=f"【站点自动{type}】", - mtype=NotificationType.SiteMessage, - text=f"全部{type}数量: {len(self._sign_sites if type == '签到' else self._login_sites)} \n" - f"本次{type}数量: {len(do_sites)} \n" - f"下次{type}数量: {len(retry_sites) if self._retry_keyword else 0} \n" - f"{signin_message}" - ) - if event: - self.post_message(channel=event.event_data.get("channel"), - title=f"站点{type}完成!", userid=event.event_data.get("user")) - else: - logger.error(f"站点{type}任务失败!") - if event: - self.post_message(channel=event.event_data.get("channel"), - title=f"站点{type}任务失败!", userid=event.event_data.get("user")) - # 保存配置 - self.__update_config() - - def __build_class(self, url) -> Any: - for site_schema in self._site_schema: - try: - if site_schema.match(url): - return site_schema - except Exception as e: - logger.error("站点模块加载失败:%s" % str(e)) - return None - - def signin_by_domain(self, url: str) -> schemas.Response: - """ - 签到一个站点,可由API调用 - """ - domain = StringUtils.get_url_domain(url) - site_info = self.sites.get_indexer(domain) - if not site_info: - return schemas.Response( - success=True, - message=f"站点【{url}】不存在" - ) - else: - return schemas.Response( - success=True, - message=self.signin_site(site_info) - ) - - def signin_site(self, site_info: CommentedMap) -> Tuple[str, str]: - """ - 签到一个站点 - """ - site_module = self.__build_class(site_info.get("url")) - if site_module and hasattr(site_module, "signin"): - try: - _, msg = site_module().signin(site_info) - # 特殊站点直接返回签到信息,防止仿真签到、模拟登陆有歧义 - return site_info.get("name"), msg or "" - except Exception as e: - traceback.print_exc() - return site_info.get("name"), f"签到失败:{str(e)}" - else: - return site_info.get("name"), self.__signin_base(site_info) - - @staticmethod - def __signin_base(site_info: CommentedMap) -> str: - """ - 通用签到处理 - :param site_info: 站点信息 - :return: 签到结果信息 - """ - if not site_info: - return "" - site = site_info.get("name") - site_url = site_info.get("url") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - render = site_info.get("render") - proxies = settings.PROXY if site_info.get("proxy") else None - proxy_server = settings.PROXY_SERVER if site_info.get("proxy") else None - if not site_url or not site_cookie: - logger.warn(f"未配置 {site} 的站点地址或Cookie,无法签到") - return "" - # 模拟登录 - try: - # 访问链接 - checkin_url = site_url - if site_url.find("attendance.php") == -1: - # 拼登签到地址 - checkin_url = urljoin(site_url, "attendance.php") - logger.info(f"开始站点签到:{site},地址:{checkin_url}...") - if render: - page_source = PlaywrightHelper().get_page_source(url=checkin_url, - cookies=site_cookie, - ua=ua, - proxies=proxy_server) - if not SiteUtils.is_logged_in(page_source): - if under_challenge(page_source): - return f"无法通过Cloudflare!" - return f"仿真登录失败,Cookie已失效!" - else: - # 判断是否已签到 - if re.search(r'已签|签到已得', page_source, re.IGNORECASE) \ - or SiteUtils.is_checkin(page_source): - return f"签到成功" - return "仿真签到成功" - else: - res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).get_res(url=checkin_url) - if not res and site_url != checkin_url: - logger.info(f"开始站点模拟登录:{site},地址:{site_url}...") - res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).get_res(url=site_url) - # 判断登录状态 - if res and res.status_code in [200, 500, 403]: - if not SiteUtils.is_logged_in(res.text): - if under_challenge(res.text): - msg = "站点被Cloudflare防护,请打开站点浏览器仿真" - elif res.status_code == 200: - msg = "Cookie已失效" - else: - msg = f"状态码:{res.status_code}" - logger.warn(f"{site} 签到失败,{msg}") - return f"签到失败,{msg}!" - else: - logger.info(f"{site} 签到成功") - return f"签到成功" - elif res is not None: - logger.warn(f"{site} 签到失败,状态码:{res.status_code}") - return f"签到失败,状态码:{res.status_code}!" - else: - logger.warn(f"{site} 签到失败,无法打开网站") - return f"签到失败,无法打开网站!" - except Exception as e: - logger.warn("%s 签到失败:%s" % (site, str(e))) - traceback.print_exc() - return f"签到失败:{str(e)}!" - - def login_site(self, site_info: CommentedMap) -> Tuple[str, str]: - """ - 模拟登陆一个站点 - """ - return site_info.get("name"), self.__login_base(site_info) - - @staticmethod - def __login_base(site_info: CommentedMap) -> str: - """ - 模拟登陆通用处理 - :param site_info: 站点信息 - :return: 签到结果信息 - """ - if not site_info: - return "" - site = site_info.get("name") - site_url = site_info.get("url") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - render = site_info.get("render") - proxies = settings.PROXY if site_info.get("proxy") else None - proxy_server = settings.PROXY_SERVER if site_info.get("proxy") else None - if not site_url or not site_cookie: - logger.warn(f"未配置 {site} 的站点地址或Cookie,无法签到") - return "" - # 模拟登录 - try: - # 访问链接 - site_url = str(site_url).replace("attendance.php", "") - logger.info(f"开始站点模拟登陆:{site},地址:{site_url}...") - if render: - page_source = PlaywrightHelper().get_page_source(url=site_url, - cookies=site_cookie, - ua=ua, - proxies=proxy_server) - if not SiteUtils.is_logged_in(page_source): - if under_challenge(page_source): - return f"无法通过Cloudflare!" - return f"仿真登录失败,Cookie已失效!" - else: - return "模拟登陆成功" - else: - res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).get_res(url=site_url) - # 判断登录状态 - if res and res.status_code in [200, 500, 403]: - if not SiteUtils.is_logged_in(res.text): - if under_challenge(res.text): - msg = "站点被Cloudflare防护,请打开站点浏览器仿真" - elif res.status_code == 200: - msg = "Cookie已失效" - else: - msg = f"状态码:{res.status_code}" - logger.warn(f"{site} 模拟登陆失败,{msg}") - return f"模拟登陆失败,{msg}!" - else: - logger.info(f"{site} 模拟登陆成功") - return f"模拟登陆成功" - elif res is not None: - logger.warn(f"{site} 模拟登陆失败,状态码:{res.status_code}") - return f"模拟登陆失败,状态码:{res.status_code}!" - else: - logger.warn(f"{site} 模拟登陆失败,无法打开网站") - return f"模拟登陆失败,无法打开网站!" - except Exception as e: - logger.warn("%s 模拟登陆失败:%s" % (site, str(e))) - traceback.print_exc() - return f"模拟登陆失败:{str(e)}!" - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - @eventmanager.register(EventType.SiteDeleted) - def site_deleted(self, event): - """ - 删除对应站点选中 - """ - site_id = event.event_data.get("site_id") - config = self.get_config() - if config: - self._sign_sites = self.__remove_site_id(config.get("sign_sites") or [], site_id) - self._login_sites = self.__remove_site_id(config.get("login_sites") or [], site_id) - # 保存配置 - self.__update_config() - - def __remove_site_id(self, do_sites, site_id): - if do_sites: - if isinstance(do_sites, str): - do_sites = [do_sites] - - # 删除对应站点 - if site_id: - do_sites = [site for site in do_sites if int(site) != int(site_id)] - else: - # 清空 - do_sites = [] - - # 若无站点,则停止 - if len(do_sites) == 0: - self._enabled = False - - return do_sites diff --git a/app/plugins/autosignin/sites/52pt.py b/app/plugins/autosignin/sites/52pt.py deleted file mode 100644 index 44c61551..00000000 --- a/app/plugins/autosignin/sites/52pt.py +++ /dev/null @@ -1,147 +0,0 @@ -import random -import re -from typing import Tuple - -from lxml import etree - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class Pt52(_ISiteSigninHandler): - """ - 52pt - 如果填写openai key则调用chatgpt获取答案 - 否则随机 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "52pt.site" - - # 已签到 - _sign_regex = ['今天已经签过到了'] - - # 签到成功,待补充 - _success_regex = ['\\d+点魔力值'] - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: dict) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - render = site_info.get("render") - proxy = site_info.get("proxy") - - # 判断今日是否已签到 - html_text = self.get_page_source(url='https://52pt.site/bakatest.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"今日已签到") - return True, '今日已签到' - - # 没有签到则解析html - html = etree.HTML(html_text) - - if not html: - return False, '签到失败' - - # 获取页面问题、答案 - questionid = html.xpath("//input[@name='questionid']/@value")[0] - option_ids = html.xpath("//input[@name='choice[]']/@value") - question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0] - - # 正则获取问题 - match = re.search(r'请问:(.+)', question_str) - if match: - question_str = match.group(1) - logger.debug(f"获取到签到问题 {question_str}") - else: - logger.error(f"未获取到签到问题") - return False, f"【{site}】签到失败,未获取到签到问题" - - # 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交 - choice = [option_ids[random.randint(0, len(option_ids) - 1)]] - - # 签到 - return self.__signin(questionid=questionid, - choice=choice, - site_cookie=site_cookie, - ua=ua, - proxy=proxy, - site=site) - - def __signin(self, questionid: str, - choice: list, - site: str, - site_cookie: str, - ua: str, - proxy: bool) -> Tuple[bool, str]: - """ - 签到请求 - questionid: 450 - choice[]: 8 - choice[]: 4 - usercomment: 此刻心情:无 - submit: 提交 - 多选会有多个choice[].... - """ - data = { - 'questionid': questionid, - 'choice[]': choice[0] if len(choice) == 1 else choice, - 'usercomment': '太难了!', - 'wantskip': '不会' - } - logger.debug(f"签到请求参数 {data}") - - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url='https://52pt.site/bakatest.php', data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # 判断是否签到成功 - sign_status = self.sign_in_result(html_res=sign_res.text, - regexs=self._success_regex) - if sign_status: - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - sign_status = self.sign_in_result(html_res=sign_res.text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - logger.error(f"{site} 签到失败,请到页面查看") - return False, '签到失败,请到页面查看' diff --git a/app/plugins/autosignin/sites/__init__.py b/app/plugins/autosignin/sites/__init__.py deleted file mode 100644 index 8c14ef51..00000000 --- a/app/plugins/autosignin/sites/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from abc import ABCMeta, abstractmethod -from typing import Tuple - -import chardet -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.helper.browser import PlaywrightHelper -from app.log import logger -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class _ISiteSigninHandler(metaclass=ABCMeta): - """ - 实现站点签到的基类,所有站点签到类都需要继承此类,并实现match和signin方法 - 实现类放置到sitesignin目录下将会自动加载 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "" - - @abstractmethod - def match(self, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, self.site_url) else False - - @abstractmethod - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: True|False,签到结果信息 - """ - pass - - @staticmethod - def get_page_source(url: str, cookie: str, ua: str, proxy: bool, render: bool) -> str: - """ - 获取页面源码 - :param url: Url地址 - :param cookie: Cookie - :param ua: UA - :param proxy: 是否使用代理 - :param render: 是否渲染 - :return: 页面源码,错误信息 - """ - if render: - return PlaywrightHelper().get_page_source(url=url, - cookies=cookie, - ua=ua, - proxies=settings.PROXY_SERVER if proxy else None) - else: - res = RequestUtils(cookies=cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).get_res(url=url) - if res is not None: - # 使用chardet检测字符编码 - raw_data = res.content - if raw_data: - try: - result = chardet.detect(raw_data) - encoding = result['encoding'] - # 解码为字符串 - return raw_data.decode(encoding) - except Exception as e: - logger.error(f"chardet解码失败:{str(e)}") - return res.text - else: - return res.text - return "" - - @staticmethod - def sign_in_result(html_res: str, regexs: list) -> bool: - """ - 判断是否签到成功 - """ - html_text = re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_res)) - for regex in regexs: - if re.search(str(regex), html_text): - return True - return False diff --git a/app/plugins/autosignin/sites/btschool.py b/app/plugins/autosignin/sites/btschool.py deleted file mode 100644 index b8f26712..00000000 --- a/app/plugins/autosignin/sites/btschool.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.string import StringUtils - - -class BTSchool(_ISiteSigninHandler): - """ - 学校签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "pt.btschool.club" - - # 已签到 - _sign_text = '每日签到' - - @classmethod - def match(cls, url) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - render = site_info.get("render") - proxy = site_info.get("proxy") - - logger.info(f"{site} 开始签到") - # 判断今日是否已签到 - html_text = self.get_page_source(url='https://pt.btschool.club', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 已签到 - if self._sign_text not in html_text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - html_text = self.get_page_source(url='https://pt.btschool.club/index.php?action=addbonus', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - if not html_text: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # 签到成功 - if self._sign_text not in html_text: - logger.info(f"{site} 签到成功") - return True, '签到成功' diff --git a/app/plugins/autosignin/sites/chdbits.py b/app/plugins/autosignin/sites/chdbits.py deleted file mode 100644 index ed2cf675..00000000 --- a/app/plugins/autosignin/sites/chdbits.py +++ /dev/null @@ -1,148 +0,0 @@ -import random -import re -from typing import Tuple - -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class CHDBits(_ISiteSigninHandler): - """ - 彩虹岛签到 - 如果填写openai key则调用chatgpt获取答案 - 否则随机 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "ptchdbits.co" - - # 已签到 - _sign_regex = ['今天已经签过到了'] - - # 签到成功,待补充 - _success_regex = ['\\d+点魔力值'] - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 判断今日是否已签到 - html_text = self.get_page_source(url='https://ptchdbits.co/bakatest.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 没有签到则解析html - html = etree.HTML(html_text) - - if not html: - return False, '签到失败' - - # 获取页面问题、答案 - questionid = html.xpath("//input[@name='questionid']/@value")[0] - option_ids = html.xpath("//input[@name='choice[]']/@value") - question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0] - - # 正则获取问题 - match = re.search(r'请问:(.+)', question_str) - if match: - question_str = match.group(1) - logger.debug(f"获取到签到问题 {question_str}") - else: - logger.error(f"未获取到签到问题") - return False, f"【{site}】签到失败,未获取到签到问题" - - # 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交 - choice = [option_ids[random.randint(0, len(option_ids) - 1)]] - - # 签到 - return self.__signin(questionid=questionid, - choice=choice, - site_cookie=site_cookie, - ua=ua, - proxy=proxy, - site=site) - - def __signin(self, questionid: str, - choice: list, - site: str, - site_cookie: str, - ua: str, - proxy: bool) -> Tuple[bool, str]: - """ - 签到请求 - questionid: 450 - choice[]: 8 - choice[]: 4 - usercomment: 此刻心情:无 - submit: 提交 - 多选会有多个choice[].... - """ - data = { - 'questionid': questionid, - 'choice[]': choice[0] if len(choice) == 1 else choice, - 'usercomment': '太难了!', - 'wantskip': '不会' - } - logger.debug(f"签到请求参数 {data}") - - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url='https://ptchdbits.co/bakatest.php', data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # 判断是否签到成功 - sign_status = self.sign_in_result(html_res=sign_res.text, - regexs=self._success_regex) - if sign_status: - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - sign_status = self.sign_in_result(html_res=sign_res.text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - logger.error(f"{site} 签到失败,请到页面查看") - return False, '签到失败,请到页面查看' diff --git a/app/plugins/autosignin/sites/haidan.py b/app/plugins/autosignin/sites/haidan.py deleted file mode 100644 index 38a4af3c..00000000 --- a/app/plugins/autosignin/sites/haidan.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.string import StringUtils - - -class HaiDan(_ISiteSigninHandler): - """ - 海胆签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "haidan.video" - - # 签到成功 - _succeed_regex = ['(?<=value=")已经打卡(?=")'] - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 签到 - html_text = self.get_page_source(url='https://www.haidan.video/signin.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._succeed_regex) - if sign_status: - logger.info(f"{site} 签到成功") - return True, '签到成功' - - logger.error(f"{site} 签到失败,签到接口返回 {html_text}") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/hares.py b/app/plugins/autosignin/sites/hares.py deleted file mode 100644 index 5aea8f10..00000000 --- a/app/plugins/autosignin/sites/hares.py +++ /dev/null @@ -1,83 +0,0 @@ -import json -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class Hares(_ISiteSigninHandler): - """ - 白兔签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "club.hares.top" - - # 已签到 - _sign_text = '已签到' - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url='https://club.hares.top', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - if not html_text: - logger.error(f"{site} 模拟访问失败,请检查站点连通性") - return False, '模拟访问失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 模拟访问失败,Cookie已失效") - return False, '模拟访问失败,Cookie已失效' - - # if self._sign_text in html_res.text: - # logger.info(f"今日已签到") - # return True, '今日已签到' - - headers = { - 'Accept': 'application/json', - "User-Agent": ua - } - sign_res = RequestUtils(cookies=site_cookie, - headers=headers, - proxies=settings.PROXY if proxy else None - ).get_res(url="https://club.hares.top/attendance.php?action=sign") - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # {"code":1,"msg":"您今天已经签到过了"} - # {"code":0,"msg":"签到成功"} - sign_dict = json.loads(sign_res.text) - if sign_dict['code'] == 0: - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' diff --git a/app/plugins/autosignin/sites/hd4fans.py b/app/plugins/autosignin/sites/hd4fans.py deleted file mode 100644 index 9dabbaf6..00000000 --- a/app/plugins/autosignin/sites/hd4fans.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class HD4fans(_ISiteSigninHandler): - """ - 兽签到 - """ - - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "pt.hd4fans.org" - - # 签到成功 - _repeat_text = '[签到成功]' - _success_text = "签到成功" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url='https://pt.hd4fans.org/index.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 判断是否已签到 - if self._repeat_text in html_text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 签到 - data = { - 'action': 'checkin' - } - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url="https://pt.hd4fans.org/checkin.php", data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - # sign_res.text=本次签到魔力 - if sign_res.text and sign_res.text.isdigit(): - logger.info(f"{site} 签到成功") - return True, '签到成功' - - logger.error(f"{site} 签到失败,签到接口返回 {sign_res.text}") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/hdarea.py b/app/plugins/autosignin/sites/hdarea.py deleted file mode 100644 index bc345e7c..00000000 --- a/app/plugins/autosignin/sites/hdarea.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class HDArea(_ISiteSigninHandler): - """ - 好大签到 - """ - - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "hdarea.club" - - # 签到成功 - _success_text = "此次签到您获得" - _repeat_text = "请不要重复签到哦" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxies = settings.PROXY if site_info.get("proxy") else None - - # 获取页面html - data = { - 'action': 'sign_in' - } - html_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).post_res(url="https://www.hdarea.club/sign_in.php", data=data) - if not html_res or html_res.status_code != 200: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_res.text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 判断是否已签到 - # '已连续签到278天,此次签到您获得了100魔力值奖励!' - if self._success_text in html_res.text: - logger.info(f"{site} 签到成功") - return True, '签到成功' - if self._repeat_text in html_res.text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - logger.error(f"{site} 签到失败,签到接口返回 {html_res.text}") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/hdchina.py b/app/plugins/autosignin/sites/hdchina.py deleted file mode 100644 index 1d14982b..00000000 --- a/app/plugins/autosignin/sites/hdchina.py +++ /dev/null @@ -1,117 +0,0 @@ -import json -from typing import Tuple - -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class HDChina(_ISiteSigninHandler): - """ - 瓷器签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "hdchina.org" - - # 已签到 - _sign_regex = ['已签到'] - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxies = settings.PROXY if site_info.get("proxy") else None - - # 尝试解决瓷器cookie每天签到后过期,只保留hdchina=部分 - cookie = "" - # 按照分号进行字符串拆分 - sub_strs = site_cookie.split(";") - # 遍历每个子字符串 - for sub_str in sub_strs: - if "hdchina=" in sub_str: - # 如果子字符串包含"hdchina=",则保留该子字符串 - cookie += sub_str + ";" - - if "hdchina=" not in cookie: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - site_cookie = cookie - # 获取页面html - html_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).get_res(url="https://hdchina.org/index.php") - if not html_res or html_res.status_code != 200: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_res.text or "阻断页面" in html_res.text: - logger.error(f"{site} 签到失败,Cookie失效") - return False, '签到失败,Cookie失效' - - # 获取新返回的cookie进行签到 - site_cookie = ';'.join(['{}={}'.format(k, v) for k, v in html_res.cookies.get_dict().items()]) - - # 判断是否已签到 - html_res.encoding = "utf-8" - sign_status = self.sign_in_result(html_res=html_res.text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 没有签到则解析html - html = etree.HTML(html_res.text) - - if not html: - return False, '签到失败' - - # x_csrf - x_csrf = html.xpath("//meta[@name='x-csrf']/@content")[0] - if not x_csrf: - logger.error("{site} 签到失败,获取x-csrf失败") - return False, '签到失败' - logger.debug(f"获取到x-csrf {x_csrf}") - - # 签到 - data = { - 'csrf': x_csrf - } - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=proxies - ).post_res(url="https://hdchina.org/plugin_sign-in.php?cmd=signin", data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - sign_dict = json.loads(sign_res.text) - logger.debug(f"签到返回结果 {sign_dict}") - if sign_dict['state']: - # {'state': 'success', 'signindays': 10, 'integral': 20} - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - # {'state': False, 'msg': '不正确的CSRF / Incorrect CSRF token'} - logger.error(f"{site} 签到失败,不正确的CSRF / Incorrect CSRF token") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/hdcity.py b/app/plugins/autosignin/sites/hdcity.py deleted file mode 100644 index 229a523a..00000000 --- a/app/plugins/autosignin/sites/hdcity.py +++ /dev/null @@ -1,66 +0,0 @@ -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.string import StringUtils - - -class HDCity(_ISiteSigninHandler): - """ - 城市签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "hdcity.city" - - # 签到成功 - _success_text = '本次签到获得魅力' - # 重复签到 - _repeat_text = '已签到' - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url='https://hdcity.city/sign', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 判断是否已签到 - # '已连续签到278天,此次签到您获得了100魔力值奖励!' - if self._success_text in html_text: - logger.info(f"{site} 签到成功") - return True, '签到成功' - if self._repeat_text in html_text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - logger.error(f"{site} 签到失败,签到接口返回 {html_text}") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/hdsky.py b/app/plugins/autosignin/sites/hdsky.py deleted file mode 100644 index 8e2ded5b..00000000 --- a/app/plugins/autosignin/sites/hdsky.py +++ /dev/null @@ -1,133 +0,0 @@ -import json -import time -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.helper.ocr import OcrHelper -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class HDSky(_ISiteSigninHandler): - """ - 天空ocr签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "hdsky.me" - - # 已签到 - _sign_regex = ['已签到'] - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 判断今日是否已签到 - html_text = self.get_page_source(url='https://hdsky.me', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 获取验证码请求,考虑到网络问题获取失败,多获取几次试试 - res_times = 0 - img_hash = None - while not img_hash and res_times <= 3: - image_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url='https://hdsky.me/image_code_ajax.php', - data={'action': 'new'}) - if image_res and image_res.status_code == 200: - image_json = json.loads(image_res.text) - if image_json["success"]: - img_hash = image_json["code"] - break - res_times += 1 - logger.debug(f"获取{site}验证码失败,正在进行重试,目前重试次数 {res_times}") - time.sleep(1) - - # 获取到二维码hash - if img_hash: - # 完整验证码url - img_get_url = 'https://hdsky.me/image.php?action=regimage&imagehash=%s' % img_hash - logger.debug(f"获取到{site}验证码链接 {img_get_url}") - # ocr识别多次,获取6位验证码 - times = 0 - ocr_result = None - # 识别几次 - while times <= 3: - # ocr二维码识别 - ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url, - cookie=site_cookie, - ua=ua) - logger.debug(f"ocr识别{site}验证码 {ocr_result}") - if ocr_result: - if len(ocr_result) == 6: - logger.info(f"ocr识别{site}验证码成功 {ocr_result}") - break - times += 1 - logger.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}") - time.sleep(1) - - if ocr_result: - # 组装请求参数 - data = { - 'action': 'showup', - 'imagehash': img_hash, - 'imagestring': ocr_result - } - # 访问签到链接 - res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url='https://hdsky.me/showup.php', data=data) - if res and res.status_code == 200: - if json.loads(res.text)["success"]: - logger.info(f"{site} 签到成功") - return True, '签到成功' - elif str(json.loads(res.text)["message"]) == "date_unmatch": - # 重复签到 - logger.warn(f"{site} 重复成功") - return True, '今日已签到' - elif str(json.loads(res.text)["message"]) == "invalid_imagehash": - # 验证码错误 - logger.warn(f"{site} 签到失败:验证码错误") - return False, '签到失败:验证码错误' - - logger.error(f'{site} 签到失败:未获取到验证码') - return False, '签到失败:未获取到验证码' diff --git a/app/plugins/autosignin/sites/hdupt.py b/app/plugins/autosignin/sites/hdupt.py deleted file mode 100644 index 470981df..00000000 --- a/app/plugins/autosignin/sites/hdupt.py +++ /dev/null @@ -1,82 +0,0 @@ -import re -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.string import StringUtils - - -class HDUpt(_ISiteSigninHandler): - """ - hdu签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "pt.hdupt.com" - - # 已签到 - _sign_regex = [''] - - # 签到成功 - _success_text = '本次签到获得魅力' - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url='https://pt.hdupt.com', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 签到 - html_text = self.get_page_source(url='https://pt.hdupt.com/added.php?action=qiandao', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - logger.debug(f"{site} 签到接口返回 {html_text}") - # 判断是否已签到 sign_res.text = ".23" - if len(list(map(int, re.findall(r"\d+", html_text)))) > 0: - logger.info(f"{site} 签到成功") - return True, '签到成功' - - logger.error(f"{site} 签到失败,签到接口返回 {html_text}") - return False, '签到失败' diff --git a/app/plugins/autosignin/sites/opencd.py b/app/plugins/autosignin/sites/opencd.py deleted file mode 100644 index 1f8d0c14..00000000 --- a/app/plugins/autosignin/sites/opencd.py +++ /dev/null @@ -1,132 +0,0 @@ -import json -import time -from typing import Tuple - -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.helper.ocr import OcrHelper -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class Opencd(_ISiteSigninHandler): - """ - 皇后ocr签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "open.cd" - - # 已签到 - _repeat_text = "/plugin_sign-in.php?cmd=show-log" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 判断今日是否已签到 - html_text = self.get_page_source(url='https://www.open.cd', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - if self._repeat_text in html_text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 获取签到参数 - html_text = self.get_page_source(url='https://www.open.cd/plugin_sign-in.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - # 没有签到则解析html - html = etree.HTML(html_text) - if not html: - return False, '签到失败' - - # 签到参数 - img_url = html.xpath('//form[@id="frmSignin"]//img/@src')[0] - img_hash = html.xpath('//form[@id="frmSignin"]//input[@name="imagehash"]/@value')[0] - if not img_url or not img_hash: - logger.error(f"{site} 签到失败,获取签到参数失败") - return False, '签到失败,获取签到参数失败' - - # 完整验证码url - img_get_url = 'https://www.open.cd/%s' % img_url - logger.debug(f"{site} 获取到{site}验证码链接 {img_get_url}") - - # ocr识别多次,获取6位验证码 - times = 0 - ocr_result = None - # 识别几次 - while times <= 3: - # ocr二维码识别 - ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url, - cookie=site_cookie, - ua=ua) - logger.debug(f"ocr识别{site}验证码 {ocr_result}") - if ocr_result: - if len(ocr_result) == 6: - logger.info(f"ocr识别{site}验证码成功 {ocr_result}") - break - times += 1 - logger.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}") - time.sleep(1) - - if ocr_result: - # 组装请求参数 - data = { - 'imagehash': img_hash, - 'imagestring': ocr_result - } - # 访问签到链接 - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url='https://www.open.cd/plugin_sign-in.php?cmd=signin', data=data) - if sign_res and sign_res.status_code == 200: - logger.debug(f"sign_res返回 {sign_res.text}") - # sign_res.text = '{"state":"success","signindays":"0","integral":"10"}' - sign_dict = json.loads(sign_res.text) - if sign_dict['state']: - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - logger.error(f"{site} 签到失败,签到接口返回 {sign_dict}") - return False, '签到失败' - - logger.error(f'{site} 签到失败:未获取到验证码') - return False, '签到失败:未获取到验证码' diff --git a/app/plugins/autosignin/sites/pterclub.py b/app/plugins/autosignin/sites/pterclub.py deleted file mode 100644 index 4047272a..00000000 --- a/app/plugins/autosignin/sites/pterclub.py +++ /dev/null @@ -1,65 +0,0 @@ -import json -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.string import StringUtils - - -class PTerClub(_ISiteSigninHandler): - """ - 猫签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "pterclub.com" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 签到 - html_text = self.get_page_source(url='https://pterclub.com/attendance-ajax.php', - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - try: - sign_dict = json.loads(html_text) - except Exception as e: - logger.error(f"{site} 签到失败,签到接口返回数据异常,错误信息:{str(e)}") - return False, '签到失败,签到接口返回数据异常' - if sign_dict['status'] == '1': - # {"status":"1","data":" (签到已成功300)","message":"

这是您的第237次签到, - # 已连续签到237天。

本次签到获得300克猫粮。

"} - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - # {"status":"0","data":"抱歉","message":"您今天已经签到过了,请勿重复刷新。"} - logger.info(f"{site} 今日已签到") - return True, '今日已签到' diff --git a/app/plugins/autosignin/sites/tjupt.py b/app/plugins/autosignin/sites/tjupt.py deleted file mode 100644 index 4a20f84c..00000000 --- a/app/plugins/autosignin/sites/tjupt.py +++ /dev/null @@ -1,274 +0,0 @@ -import json -import os -import time -from io import BytesIO -from typing import Tuple - -from PIL import Image -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class Tjupt(_ISiteSigninHandler): - """ - 北洋签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "tjupt.org" - - # 签到地址 - _sign_in_url = 'https://www.tjupt.org/attendance.php' - - # 已签到 - _sign_regex = ['今日已签到'] - - # 签到成功 - _succeed_regex = ['这是您的首次签到,本次签到获得\\d+个魔力值。', - '签到成功,这是您的第\\d+次签到,已连续签到\\d+天,本次签到获得\\d+个魔力值。', - '重新签到成功,本次签到获得\\d+个魔力值'] - - # 存储正确的答案,后续可直接查 - _answer_path = settings.TEMP_PATH / "signin/" - _answer_file = _answer_path / "tjupt.json" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 创建正确答案存储目录 - if not os.path.exists(os.path.dirname(self._answer_file)): - os.makedirs(os.path.dirname(self._answer_file)) - - # 获取北洋签到页面html - html_text = self.get_page_source(url=self._sign_in_url, - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - - # 获取签到后返回html,判断是否签到成功 - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 没有签到则解析html - html = etree.HTML(html_text) - if not html: - return False, '签到失败' - img_url = html.xpath('//table[@class="captcha"]//img/@src')[0] - - if not img_url: - logger.error(f"{site} 签到失败,未获取到签到图片") - return False, '签到失败,未获取到签到图片' - - # 签到图片 - img_url = "https://www.tjupt.org" + img_url - logger.info(f"获取到签到图片 {img_url}") - # 获取签到图片hash - captcha_img_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).get_res(url=img_url) - if not captcha_img_res or captcha_img_res.status_code != 200: - logger.error(f"{site} 签到图片 {img_url} 请求失败") - return False, '签到失败,未获取到签到图片' - captcha_img = Image.open(BytesIO(captcha_img_res.content)) - captcha_img_hash = self._tohash(captcha_img) - logger.debug(f"签到图片hash {captcha_img_hash}") - - # 签到答案选项 - values = html.xpath("//input[@name='answer']/@value") - options = html.xpath("//input[@name='answer']/following-sibling::text()") - - if not values or not options: - logger.error(f"{site} 签到失败,未获取到答案选项") - return False, '签到失败,未获取到答案选项' - - # value+选项 - answers = list(zip(values, options)) - logger.debug(f"获取到所有签到选项 {answers}") - - # 查询已有答案 - exits_answers = {} - try: - with open(self._answer_file, 'r') as f: - json_str = f.read() - exits_answers = json.loads(json_str) - # 查询本地本次验证码hash答案 - captcha_answer = exits_answers[captcha_img_hash] - - # 本地存在本次hash对应的正确答案再遍历查询 - if captcha_answer: - for value, answer in answers: - if str(captcha_answer) == str(answer): - # 确实是答案 - return self.__signin(answer=value, - site_cookie=site_cookie, - ua=ua, - proxy=proxy, - site=site) - except (FileNotFoundError, IOError, OSError) as e: - logger.debug(f"查询本地已知答案失败:{str(e)},继续请求豆瓣查询") - - # 本地不存在正确答案则请求豆瓣查询匹配 - for value, answer in answers: - if answer: - # 豆瓣检索 - db_res = RequestUtils().get_res(url=f'https://movie.douban.com/j/subject_suggest?q={answer}') - if not db_res or db_res.status_code != 200: - logger.debug(f"签到选项 {answer} 未查询到豆瓣数据") - continue - - # 豆瓣返回结果 - db_answers = json.loads(db_res.text) - if not isinstance(db_answers, list): - db_answers = [db_answers] - - if len(db_answers) == 0: - logger.debug(f"签到选项 {answer} 查询到豆瓣数据为空") - - for db_answer in db_answers: - answer_img_url = db_answer['img'] - - # 获取答案hash - answer_img_res = RequestUtils(referer="https://movie.douban.com").get_res(url=answer_img_url) - if not answer_img_res or answer_img_res.status_code != 200: - logger.debug(f"签到答案 {answer} {answer_img_url} 请求失败") - continue - - answer_img = Image.open(BytesIO(answer_img_res.content)) - answer_img_hash = self._tohash(answer_img) - logger.debug(f"签到答案图片hash {answer} {answer_img_hash}") - - # 获取选项图片与签到图片相似度,大于0.9默认是正确答案 - score = self._comparehash(captcha_img_hash, answer_img_hash) - logger.info(f"签到图片与选项 {answer} 豆瓣图片相似度 {score}") - if score > 0.9: - # 确实是答案 - return self.__signin(answer=value, - site_cookie=site_cookie, - ua=ua, - proxy=proxy, - site=site, - exits_answers=exits_answers, - captcha_img_hash=captcha_img_hash) - - # 间隔5s,防止请求太频繁被豆瓣屏蔽ip - time.sleep(5) - logger.error(f"豆瓣图片匹配,未获取到匹配答案") - - # 没有匹配签到成功,则签到失败 - return False, '签到失败,未获取到匹配答案' - - def __signin(self, answer, site_cookie, ua, proxy, site, exits_answers=None, captcha_img_hash=None): - """ - 签到请求 - """ - data = { - 'answer': answer, - 'submit': '提交' - } - logger.debug(f"提交data {data}") - sign_in_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url=self._sign_in_url, data=data) - if not sign_in_res or sign_in_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # 获取签到后返回html,判断是否签到成功 - sign_status = self.sign_in_result(html_res=sign_in_res.text, - regexs=self._succeed_regex) - if sign_status: - logger.info(f"签到成功") - if exits_answers and captcha_img_hash: - # 签到成功写入本地文件 - self.__write_local_answer(exits_answers=exits_answers or {}, - captcha_img_hash=captcha_img_hash, - answer=answer) - return True, '签到成功' - else: - logger.error(f"{site} 签到失败,请到页面查看") - return False, '签到失败,请到页面查看' - - def __write_local_answer(self, exits_answers, captcha_img_hash, answer): - """ - 签到成功写入本地文件 - """ - try: - exits_answers[captcha_img_hash] = answer - # 序列化数据 - formatted_data = json.dumps(exits_answers, indent=4) - with open(self._answer_file, 'w') as f: - f.write(formatted_data) - except (FileNotFoundError, IOError, OSError) as e: - logger.debug(f"签到成功写入本地文件失败:{str(e)}") - - @staticmethod - def _tohash(img, shape=(10, 10)): - """ - 获取图片hash - """ - img = img.resize(shape) - gray = img.convert('L') - s = 0 - hash_str = '' - for i in range(shape[1]): - for j in range(shape[0]): - s = s + gray.getpixel((j, i)) - avg = s / (shape[0] * shape[1]) - for i in range(shape[1]): - for j in range(shape[0]): - if gray.getpixel((j, i)) > avg: - hash_str = hash_str + '1' - else: - hash_str = hash_str + '0' - return hash_str - - @staticmethod - def _comparehash(hash1, hash2, shape=(10, 10)): - """ - 比较图片hash - 返回相似度 - """ - n = 0 - if len(hash1) != len(hash2): - return -1 - for i in range(len(hash1)): - if hash1[i] == hash2[i]: - n = n + 1 - return n / (shape[0] * shape[1]) diff --git a/app/plugins/autosignin/sites/ttg.py b/app/plugins/autosignin/sites/ttg.py deleted file mode 100644 index d3470a6c..00000000 --- a/app/plugins/autosignin/sites/ttg.py +++ /dev/null @@ -1,97 +0,0 @@ -import re -from typing import Tuple - -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class TTG(_ISiteSigninHandler): - """ - TTG签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "totheglory.im" - - # 已签到 - _sign_regex = ['已签到'] - _sign_text = '亲,您今天已签到过,不要太贪哦' - - # 签到成功 - _success_text = '您已连续签到' - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url="https://totheglory.im", - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 判断是否已签到 - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 获取签到参数 - signed_timestamp = re.search('(?<=signed_timestamp: ")\\d{10}', html_text).group() - signed_token = re.search('(?<=signed_token: ").*(?=")', html_text).group() - logger.debug(f"signed_timestamp={signed_timestamp} signed_token={signed_token}") - - data = { - 'signed_timestamp': signed_timestamp, - 'signed_token': signed_token - } - # 签到 - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url="https://totheglory.im/signed.php", - data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - sign_res.encoding = "utf-8" - if self._success_text in sign_res.text: - logger.info(f"{site} 签到成功") - return True, '签到成功' - if self._sign_text in sign_res.text: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - logger.error(f"{site} 签到失败,未知原因") - return False, '签到失败,未知原因' diff --git a/app/plugins/autosignin/sites/u2.py b/app/plugins/autosignin/sites/u2.py deleted file mode 100644 index 2c45c2c9..00000000 --- a/app/plugins/autosignin/sites/u2.py +++ /dev/null @@ -1,123 +0,0 @@ -import datetime -import random -import re -from typing import Tuple - -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class U2(_ISiteSigninHandler): - """ - U2签到 随机 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "u2.dmhy.org" - - # 已签到 - _sign_regex = ['已签到', - 'Show Up', - 'Показать', - '已簽到', - '已簽到'] - - # 签到成功 - _success_text = "window.location.href = 'showup.php';" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - now = datetime.datetime.now() - # 判断当前时间是否小于9点 - if now.hour < 9: - logger.error(f"{site} 签到失败,9点前不签到") - return False, '签到失败,9点前不签到' - - # 获取页面html - html_text = self.get_page_source(url="https://u2.dmhy.org/showup.php", - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 签到失败,请检查站点连通性") - return False, '签到失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 签到失败,Cookie已失效") - return False, '签到失败,Cookie已失效' - - # 判断是否已签到 - sign_status = self.sign_in_result(html_res=html_text, - regexs=self._sign_regex) - if sign_status: - logger.info(f"{site} 今日已签到") - return True, '今日已签到' - - # 没有签到则解析html - html = etree.HTML(html_text) - - if not html: - return False, '签到失败' - - # 获取签到参数 - req = html.xpath("//form//td/input[@name='req']/@value")[0] - hash_str = html.xpath("//form//td/input[@name='hash']/@value")[0] - form = html.xpath("//form//td/input[@name='form']/@value")[0] - submit_name = html.xpath("//form//td/input[@type='submit']/@name") - submit_value = html.xpath("//form//td/input[@type='submit']/@value") - if not re or not hash_str or not form or not submit_name or not submit_value: - logger.error("{site} 签到失败,未获取到相关签到参数") - return False, '签到失败' - - # 随机一个答案 - answer_num = random.randint(0, 3) - data = { - 'req': req, - 'hash': hash_str, - 'form': form, - 'message': '一切随缘~', - submit_name[answer_num]: submit_value[answer_num] - } - # 签到 - sign_res = RequestUtils(cookies=site_cookie, - ua=ua, - proxies=settings.PROXY if proxy else None - ).post_res(url="https://u2.dmhy.org/showup.php?action=show", - data=data) - if not sign_res or sign_res.status_code != 200: - logger.error(f"{site} 签到失败,签到接口请求失败") - return False, '签到失败,签到接口请求失败' - - # 判断是否签到成功 - # sign_res.text = "" - if self._success_text in sign_res.text: - logger.info(f"{site} 签到成功") - return True, '签到成功' - else: - logger.error(f"{site} 签到失败,未知原因") - return False, '签到失败,未知原因' diff --git a/app/plugins/autosignin/sites/zhuque.py b/app/plugins/autosignin/sites/zhuque.py deleted file mode 100644 index f3375f5f..00000000 --- a/app/plugins/autosignin/sites/zhuque.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -from typing import Tuple - -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.log import logger -from app.plugins.autosignin.sites import _ISiteSigninHandler -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class ZhuQue(_ISiteSigninHandler): - """ - ZHUQUE签到 - """ - # 匹配的站点Url,每一个实现类都需要设置为自己的站点Url - site_url = "zhuque.in" - - @classmethod - def match(cls, url: str) -> bool: - """ - 根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可 - :param url: 站点Url - :return: 是否匹配,如匹配则会调用该类的signin方法 - """ - return True if StringUtils.url_equal(url, cls.site_url) else False - - def signin(self, site_info: CommentedMap) -> Tuple[bool, str]: - """ - 执行签到操作 - :param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息 - :return: 签到结果信息 - """ - site = site_info.get("name") - site_cookie = site_info.get("cookie") - ua = site_info.get("ua") - proxy = site_info.get("proxy") - render = site_info.get("render") - - # 获取页面html - html_text = self.get_page_source(url="https://zhuque.in", - cookie=site_cookie, - ua=ua, - proxy=proxy, - render=render) - if not html_text: - logger.error(f"{site} 模拟登录失败,请检查站点连通性") - return False, '模拟登录失败,请检查站点连通性' - - if "login.php" in html_text: - logger.error(f"{site} 模拟登录失败,Cookie已失效") - return False, '模拟登录失败,Cookie已失效' - - html = etree.HTML(html_text) - - if not html: - return False, '模拟登录失败' - - # 释放技能 - msg = '失败' - x_csrf_token = html.xpath("//meta[@name='x-csrf-token']/@content")[0] - if x_csrf_token: - data = { - "all": 1, - "resetModal": "true" - } - headers = { - "x-csrf-token": str(x_csrf_token), - "Content-Type": "application/json; charset=utf-8", - "User-Agent": ua - } - skill_res = RequestUtils(cookies=site_cookie, - headers=headers, - proxies=settings.PROXY if proxy else None - ).post_res(url="https://zhuque.in/api/gaming/fireGenshinCharacterMagic", json=data) - if not skill_res or skill_res.status_code != 200: - logger.error(f"模拟登录失败,释放技能失败") - - # '{"status":200,"data":{"code":"FIRE_GENSHIN_CHARACTER_MAGIC_SUCCESS","bonus":0}}' - skill_dict = json.loads(skill_res.text) - if skill_dict['status'] == 200: - bonus = int(skill_dict['data']['bonus']) - msg = f'成功,获得{bonus}魔力' - - logger.info(f'【{site}】模拟登录成功,技能释放{msg}') - return True, f'模拟登录成功,技能释放{msg}' diff --git a/app/plugins/bestfilmversion/__init__.py b/app/plugins/bestfilmversion/__init__.py deleted file mode 100644 index 8f44a55f..00000000 --- a/app/plugins/bestfilmversion/__init__.py +++ /dev/null @@ -1,694 +0,0 @@ -from datetime import datetime, timedelta -from functools import reduce -from pathlib import Path -from threading import RLock -from typing import Optional, Any, List, Dict, Tuple -from xml.dom.minidom import parseString - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from requests import Response - -from app.chain.subscribe import SubscribeChain -from app.core.config import settings -from app.core.context import MediaInfo -from app.core.event import eventmanager -from app.log import logger -from app.modules.emby import Emby -from app.modules.jellyfin import Jellyfin -from app.modules.plex import Plex -from app.plugins import _PluginBase -from app.schemas import WebhookEventInfo -from app.schemas.types import MediaType, EventType -from app.utils.http import RequestUtils - -lock = RLock() - - -class BestFilmVersion(_PluginBase): - # 插件名称 - plugin_name = "收藏洗版" - # 插件描述 - plugin_desc = "Jellyfin/Emby/Plex点击收藏电影后,自动订阅洗版。" - # 插件图标 - plugin_icon = "like.jpg" - # 主题色 - plugin_color = "#E4003F" - # 插件版本 - plugin_version = "2.0" - # 插件作者 - plugin_author = "wlj" - # 作者主页 - author_url = "https://github.com/developer-wlj" - # 插件配置项ID前缀 - plugin_config_prefix = "bestfilmversion_" - # 加载顺序 - plugin_order = 13 - # 可使用的用户级别 - auth_level = 2 - - # 私有变量 - _scheduler: Optional[BackgroundScheduler] = None - _cache_path: Optional[Path] = None - subscribechain = None - - # 配置属性 - _enabled: bool = False - _cron: str = "" - _notify: bool = False - _webhook_enabled: bool = False - _only_once: bool = False - - def init_plugin(self, config: dict = None): - self._cache_path = settings.TEMP_PATH / "__best_film_version_cache__" - self.subscribechain = SubscribeChain() - - # 停止现有任务 - self.stop_service() - - # 配置 - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._webhook_enabled = config.get("webhook_enabled") - self._only_once = config.get("only_once") - - if self._enabled: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if not self._webhook_enabled: - if self._cron: - try: - self._scheduler.add_job(func=self.sync, - trigger=CronTrigger.from_crontab(self._cron), - name="收藏洗版") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - else: - self._scheduler.add_job(self.sync, "interval", minutes=30, name="收藏洗版") - - if self._only_once: - self._only_once = False - self.update_config({ - "enabled": self._enabled, - "cron": self._cron, - "notify": self._notify, - "webhook_enabled": self._webhook_enabled, - "only_once": self._only_once - }) - self._scheduler.add_job(self.sync, 'date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="立即运行收藏洗版") - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - """ - 获取插件API - [{ - "path": "/xx", - "endpoint": self.xxx, - "methods": ["GET", "POST"], - "summary": "API说明" - }] - """ - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'only_once', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'webhook_enabled', - 'label': 'Webhook', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '支持主动定时获取媒体库数据和Webhook实时触发两种方式,两者只能选其一,' - 'Webhook需要在媒体服务器设置发送Webhook报文。' - 'Plex使用主动获取时,建议执行周期设置大于1小时,' - '收藏Api调用Plex官网接口,有频率限制。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": False, - "cron": "*/30 * * * *", - "webhook_enabled": False, - "only_once": False - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - title = history.get("title") - poster = history.get("poster") - mtype = history.get("type") - time_str = history.get("time") - tmdbid = history.get("tmdbid") - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': poster, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'VCardSubtitle', - 'props': { - 'class': 'pa-2 font-bold break-words whitespace-break-spaces' - }, - 'content': [ - { - 'component': 'a', - 'props': { - 'href': f"https://www.themoviedb.org/movie/{tmdbid}", - 'target': '_blank' - }, - 'text': title - } - ] - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{mtype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{time_str}' - } - ] - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - def sync(self): - """ - 通过流媒体管理工具收藏,自动洗版 - """ - # 获取锁 - _is_lock: bool = lock.acquire(timeout=60) - if not _is_lock: - return - try: - # 读取缓存 - caches = self._cache_path.read_text().split("\n") if self._cache_path.exists() else [] - # 读取历史记录 - history = self.get_data('history') or [] - - # 媒体服务器类型,多个以,分隔 - if not settings.MEDIASERVER: - return - media_servers = settings.MEDIASERVER.split(',') - - # 读取收藏 - all_items = {} - for media_server in media_servers: - if media_server == 'jellyfin': - all_items['jellyfin'] = self.jellyfin_get_items() - elif media_server == 'emby': - all_items['emby'] = self.emby_get_items() - else: - all_items['plex'] = self.plex_get_watchlist() - - def function(y, x): - return y if (x['Name'] in [i['Name'] for i in y]) else (lambda z, u: (z.append(u), z))(y, x)[1] - - # 处理所有结果 - for server, all_item in all_items.items(): - # all_item 根据电影名去重 - result = reduce(function, all_item, []) - for data in result: - # 检查缓存 - if data.get('Name') in caches: - continue - - # 获取详情 - if server == 'jellyfin': - item_info_resp = Jellyfin().get_iteminfo(itemid=data.get('Id')) - elif server == 'emby': - item_info_resp = Emby().get_iteminfo(itemid=data.get('Id')) - else: - item_info_resp = self.plex_get_iteminfo(itemid=data.get('Id')) - logger.debug(f'BestFilmVersion插件 item打印 {item_info_resp}') - if not item_info_resp: - continue - - # 只接受Movie类型 - if data.get('Type') != 'Movie': - continue - - # 获取tmdb_id - tmdb_id = item_info_resp.tmdbid - if not tmdb_id: - continue - # 识别媒体信息 - mediainfo: MediaInfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE) - if not mediainfo: - logger.warn(f'未识别到媒体信息,标题:{data.get("Name")},tmdbid:{tmdb_id}') - continue - # 添加订阅 - self.subscribechain.add(mtype=MediaType.MOVIE, - title=mediainfo.title, - year=mediainfo.year, - tmdbid=mediainfo.tmdb_id, - best_version=True, - username="收藏洗版", - exist_ok=True) - # 加入缓存 - caches.append(data.get('Name')) - # 存储历史记录 - if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]: - history.append({ - "title": mediainfo.title, - "type": mediainfo.type.value, - "year": mediainfo.year, - "poster": mediainfo.get_poster_image(), - "overview": mediainfo.overview, - "tmdbid": mediainfo.tmdb_id, - "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S") - }) - # 保存历史记录 - self.save_data('history', history) - # 保存缓存 - self._cache_path.write_text("\n".join(caches)) - finally: - lock.release() - - def jellyfin_get_items(self) -> List[dict]: - # 获取所有user - users_url = "[HOST]Users?&apikey=[APIKEY]" - users = self.get_users(Jellyfin().get_data(users_url)) - if not users: - logger.info(f"bestfilmversion/users_url: {users_url}") - return [] - all_items = [] - for user in users: - # 根据加入日期 降序排序 - url = "[HOST]Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \ - "&SortOrder=Descending" \ - "&Filters=IsFavorite" \ - "&Recursive=true" \ - "&Fields=PrimaryImageAspectRatio%2CBasicSyncInfo" \ - "&CollapseBoxSetItems=false" \ - "&ExcludeLocationTypes=Virtual" \ - "&EnableTotalRecordCount=false" \ - "&Limit=20" \ - "&apikey=[APIKEY]" - resp = self.get_items(Jellyfin().get_data(url)) - if not resp: - continue - all_items.extend(resp) - return all_items - - def emby_get_items(self) -> List[dict]: - # 获取所有user - get_users_url = "[HOST]Users?&api_key=[APIKEY]" - users = self.get_users(Emby().get_data(get_users_url)) - if not users: - return [] - all_items = [] - for user in users: - # 根据加入日期 降序排序 - url = "[HOST]emby/Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \ - "&SortOrder=Descending" \ - "&Filters=IsFavorite" \ - "&Recursive=true" \ - "&Fields=PrimaryImageAspectRatio%2CBasicSyncInfo" \ - "&CollapseBoxSetItems=false" \ - "&ExcludeLocationTypes=Virtual" \ - "&EnableTotalRecordCount=false" \ - "&Limit=20&api_key=[APIKEY]" - resp = self.get_items(Emby().get_data(url)) - if not resp: - continue - all_items.extend(resp) - return all_items - - @staticmethod - def get_items(resp: Response): - try: - if resp: - return resp.json().get("Items") or [] - else: - return [] - except Exception as e: - print(str(e)) - return [] - - @staticmethod - def get_users(resp: Response): - try: - if resp: - return [data['Id'] for data in resp.json()] - else: - logger.error(f"BestFilmVersion/Users 未获取到返回数据") - return [] - except Exception as e: - logger.error(f"连接BestFilmVersion/Users 出错:" + str(e)) - return [] - - @staticmethod - def plex_get_watchlist() -> List[dict]: - # 根据加入日期 降序排序 - url = f"https://metadata.provider.plex.tv/library/sections/watchlist/all?type=1&sort=addedAt%3Adesc" \ - f"&X-Plex-Container-Start=0&X-Plex-Container-Size=50" \ - f"&X-Plex-Token={settings.PLEX_TOKEN}" - res = [] - try: - resp = RequestUtils().get_res(url=url) - if resp: - dom = parseString(resp.text) - # 获取文档元素对象 - elem = dom.documentElement - # 获取 指定元素 - eles = elem.getElementsByTagName('Video') - if not eles: - return [] - for ele in eles: - data = {} - # 获取标签中内容 - ele_id = ele.attributes['ratingKey'].nodeValue - ele_title = ele.attributes['title'].nodeValue - ele_type = ele.attributes['type'].nodeValue - _type = "Movie" if ele_type == "movie" else "" - data['Id'] = ele_id - data['Name'] = ele_title - data['Type'] = _type - res.append(data) - return res - else: - logger.error(f"Plex/Watchlist 未获取到返回数据") - return [] - except Exception as e: - logger.error(f"连接Plex/Watchlist 出错:" + str(e)) - return [] - - @staticmethod - def plex_get_iteminfo(itemid): - url = f"https://metadata.provider.plex.tv/library/metadata/{itemid}" \ - f"?X-Plex-Token={settings.PLEX_TOKEN}" - ids = [] - try: - resp = RequestUtils(accept_type="application/json, text/plain, */*").get_res(url=url) - if resp: - metadata = resp.json().get('MediaContainer').get('Metadata') - for item in metadata: - _guid = item.get('Guid') - if not _guid: - continue - - id_list = [h.get('id') for h in _guid if h.get('id').__contains__("tmdb")] - if not id_list: - continue - - ids.append({'Name': 'TheMovieDb', 'Url': id_list[0]}) - - if not ids: - return [] - return {'ExternalUrls': ids} - else: - logger.error(f"Plex/Items 未获取到返回数据") - return [] - except Exception as e: - logger.error(f"连接Plex/Items 出错:" + str(e)) - return [] - - @eventmanager.register(EventType.WebhookMessage) - def webhook_message_action(self, event): - - if not self._enabled: - return - if not self._webhook_enabled: - return - - data: WebhookEventInfo = event.event_data - # 排除不是收藏调用 - if data.channel not in ['jellyfin', 'emby', 'plex']: - return - if data.channel in ['emby', 'plex'] and data.event != 'item.rate': - return - if data.channel == 'jellyfin' and data.save_reason != 'UpdateUserRating': - return - logger.info(f'BestFilmVersion/webhook_message_action WebhookEventInfo打印:{data}') - - # 获取锁 - _is_lock: bool = lock.acquire(timeout=60) - if not _is_lock: - return - try: - if not data.tmdb_id: - info = None - if (data.channel == 'jellyfin' - and data.save_reason == 'UpdateUserRating' - and data.item_favorite): - info = Jellyfin().get_iteminfo(itemid=data.item_id) - elif data.channel == 'emby' and data.event == 'item.rate': - info = Emby().get_iteminfo(itemid=data.item_id) - elif data.channel == 'plex' and data.event == 'item.rate': - info = Plex().get_iteminfo(itemid=data.item_id) - logger.debug(f'BestFilmVersion/webhook_message_action item打印:{info}') - if not info: - return - if info.item_type not in ['Movie', 'MOV', 'movie']: - return - # 获取tmdb_id - tmdb_id = info.tmdbid - else: - tmdb_id = data.tmdb_id - if (data.channel == 'jellyfin' - and (data.save_reason != 'UpdateUserRating' or not data.item_favorite)): - return - if data.item_type not in ['Movie', 'MOV', 'movie']: - return - # 识别媒体信息 - mediainfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE) - if not mediainfo: - logger.warn(f'未识别到媒体信息,标题:{data.item_name},tmdbID:{tmdb_id}') - return - # 读取缓存 - caches = self._cache_path.read_text().split("\n") if self._cache_path.exists() else [] - # 检查缓存 - if data.item_name in caches: - return - # 读取历史记录 - history = self.get_data('history') or [] - # 添加订阅 - self.subscribechain.add(mtype=MediaType.MOVIE, - title=mediainfo.title, - year=mediainfo.year, - tmdbid=mediainfo.tmdb_id, - best_version=True, - username="收藏洗版", - exist_ok=True) - # 加入缓存 - caches.append(data.item_name) - # 存储历史记录 - if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]: - history.append({ - "title": mediainfo.title, - "type": mediainfo.type.value, - "year": mediainfo.year, - "poster": mediainfo.get_poster_image(), - "overview": mediainfo.overview, - "tmdbid": mediainfo.tmdb_id, - "time": datetime.now().strftime("%Y-%m-%d %H:%M:%S") - }) - # 保存历史记录 - self.save_data('history', history) - # 保存缓存 - self._cache_path.write_text("\n".join(caches)) - finally: - lock.release() diff --git a/app/plugins/brushflow/__init__.py b/app/plugins/brushflow/__init__.py deleted file mode 100644 index 5763c547..00000000 --- a/app/plugins/brushflow/__init__.py +++ /dev/null @@ -1,1891 +0,0 @@ -import re -import threading -import time -from datetime import datetime, timedelta -from threading import Event -from typing import Any, List, Dict, Tuple, Optional, Union - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler - -from app import schemas -from app.chain.torrents import TorrentsChain -from app.core.config import settings -from app.db.site_oper import SiteOper -from app.helper.sites import SitesHelper -from app.log import logger -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.schemas import Notification, NotificationType, TorrentInfo -from app.utils.string import StringUtils - -lock = threading.Lock() - - -class BrushFlow(_PluginBase): - # 插件名称 - plugin_name = "站点刷流" - # 插件描述 - plugin_desc = "自动托管刷流,将会提高对应站点的访问频率。" - # 插件图标 - plugin_icon = "brush.jpg" - # 主题色 - plugin_color = "#FFD54E" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "brushflow_" - # 加载顺序 - plugin_order = 21 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - siteshelper = None - siteoper = None - torrents = None - sites = None - qb = None - tr = None - # 添加种子定时 - _cron = 10 - # 检查种子定时 - _check_interval = 5 - # 退出事件 - _event = Event() - _scheduler = None - _enabled = False - _notify = True - _onlyonce = False - _brushsites = [] - _downloader = "qbittorrent" - _disksize = 0 - _freeleech = "free" - _maxupspeed = 0 - _maxdlspeed = 0 - _maxdlcount = 0 - _include = "" - _exclude = "" - _size = 0 - _seeder = 0 - _pubtime = 0 - _seed_time = 0 - _seed_ratio = 0 - _seed_size = 0 - _download_time = 0 - _seed_avgspeed = 0 - _seed_inactivetime = 0 - _up_speed = 0 - _dl_speed = 0 - _save_path = "" - _clear_task = False - - def init_plugin(self, config: dict = None): - self.siteshelper = SitesHelper() - self.siteoper = SiteOper() - self.torrents = TorrentsChain() - self.sites = SitesHelper() - if config: - self._enabled = config.get("enabled") - self._notify = config.get("notify") - self._onlyonce = config.get("onlyonce") - self._brushsites = config.get("brushsites") - self._downloader = config.get("downloader") - self._disksize = config.get("disksize") - self._freeleech = config.get("freeleech") - self._maxupspeed = config.get("maxupspeed") - self._maxdlspeed = config.get("maxdlspeed") - self._maxdlcount = config.get("maxdlcount") - self._include = config.get("include") - self._exclude = config.get("exclude") - self._size = config.get("size") - self._seeder = config.get("seeder") - self._pubtime = config.get("pubtime") - self._seed_time = config.get("seed_time") - self._seed_ratio = config.get("seed_ratio") - self._seed_size = config.get("seed_size") - self._download_time = config.get("download_time") - self._seed_avgspeed = config.get("seed_avgspeed") - self._seed_inactivetime = config.get("seed_inactivetime") - self._up_speed = config.get("up_speed") - self._dl_speed = config.get("dl_speed") - self._save_path = config.get("save_path") - self._clear_task = config.get("clear_task") - - # 过滤掉已删除的站点 - self._brushsites = [site.get("id") for site in self.sites.get_indexers() if - not site.get("public") and site.get("id") in self._brushsites] - - # 保存配置 - self.__update_config() - - if self._clear_task: - # 清除统计数据 - self.save_data("statistic", {}) - # 清除种子记录 - self.save_data("torrents", {}) - # 关闭一次性开关 - self._clear_task = False - self.__update_config() - - # 停止现有任务 - self.stop_service() - - # 启动定时任务 & 立即运行一次 - if self.get_state() or self._onlyonce: - self.qb = Qbittorrent() - self.tr = Transmission() - # 检查配置 - if self._downloader == "qbittorrent": - if self.qb.is_inactive(): - logger.error("站点刷流任务出错:Qbittorrent未连接") - self.systemmessage.put("站点刷流任务出错:Qbittorrent未连接") - return - elif self._downloader == "transmission": - if self.tr.is_inactive(): - logger.error("站点刷流任务出错:Transmission未连接") - self.systemmessage.put("站点刷流任务出错:Transmission未连接") - return - if self._disksize and not StringUtils.is_number(self._disksize): - logger.error(f"站点刷流任务出错,保种体积设置错误:{self._disksize}") - self.systemmessage.put(f"站点刷流任务出错,保种体积设置错误:{self._disksize}") - self._disksize = 0 - return - if self._maxupspeed and not StringUtils.is_number(self._maxupspeed): - logger.error(f"站点刷流任务出错,总上传带宽设置错误:{self._maxupspeed}") - self.systemmessage.put(f"站点刷流任务出错,总上传带宽设置错误:{self._maxupspeed}") - self._maxupspeed = 0 - return - if self._maxdlspeed and not StringUtils.is_number(self._maxdlspeed): - logger.error(f"站点刷流任务出错,总下载带宽设置错误:{self._maxdlspeed}") - self.systemmessage.put(f"站点刷流任务出错,总下载带宽设置错误:{self._maxdlspeed}") - self._maxdlspeed = 0 - return - if self._maxdlcount and not StringUtils.is_number(self._maxdlcount): - logger.error(f"站点刷流任务出错,同时下载任务数设置错误:{self._maxdlcount}") - self.systemmessage.put(f"站点刷流任务出错,同时下载任务数设置错误:{self._maxdlcount}") - self._maxdlcount = 0 - return - if self._size: - size = str(self._size).split("-")[0] - if not StringUtils.is_number(size): - logger.error(f"站点刷流任务出错,种子大小设置错误:{self._size}") - self.systemmessage.put(f"站点刷流任务出错,种子大小设置错误:{self._size}") - self._size = 0 - return - if self._seeder: - seeder = str(self._seeder).split("-")[0] - if not StringUtils.is_number(seeder): - logger.error(f"站点刷流任务出错,做种人数设置错误:{self._seeder}") - self.systemmessage.put(f"站点刷流任务出错,做种人数设置错误:{self._seeder}") - self._seeder = 0 - return - if self._seed_time and not StringUtils.is_number(self._seed_time): - logger.error(f"站点刷流任务出错,做种时间设置错误:{self._seed_time}") - self.systemmessage.put(f"站点刷流任务出错,做种时间设置错误:{self._seed_time}") - self._seed_time = 0 - return - if self._seed_ratio and not StringUtils.is_number(self._seed_ratio): - logger.error(f"站点刷流任务出错,分享率设置错误:{self._seed_ratio}") - self.systemmessage.put(f"站点刷流任务出错,分享率设置错误:{self._seed_ratio}") - self._seed_ratio = 0 - return - if self._seed_size and not StringUtils.is_number(self._seed_size): - logger.error(f"站点刷流任务出错,上传量设置错误:{self._seed_size}") - self.systemmessage.put(f"站点刷流任务出错,上传量设置错误:{self._seed_size}") - self._seed_size = 0 - return - if self._download_time and not StringUtils.is_number(self._download_time): - logger.error(f"站点刷流任务出错,下载超时时间设置错误:{self._download_time}") - self.systemmessage.put(f"站点刷流任务出错,下载超时时间设置错误:{self._download_time}") - self._download_time = 0 - return - if self._seed_avgspeed and not StringUtils.is_number(self._seed_avgspeed): - logger.error(f"站点刷流任务出错,平均上传速度设置错误:{self._seed_avgspeed}") - self.systemmessage.put(f"站点刷流任务出错,平均上传速度设置错误:{self._seed_avgspeed}") - self._seed_avgspeed = 0 - return - if self._seed_inactivetime and not StringUtils.is_number(self._seed_inactivetime): - logger.error(f"站点刷流任务出错,未活动时间设置错误:{self._seed_inactivetime}") - self.systemmessage.put(f"站点刷流任务出错,未活动时间设置错误:{self._seed_inactivetime}") - self._seed_inactivetime = 0 - return - if self._up_speed and not StringUtils.is_number(self._up_speed): - logger.error(f"站点刷流任务出错,单任务上传限速设置错误:{self._up_speed}") - self.systemmessage.put(f"站点刷流任务出错,单任务上传限速设置错误:{self._up_speed}") - self._up_speed = 0 - return - if self._dl_speed and not StringUtils.is_number(self._dl_speed): - logger.error(f"站点刷流任务出错,单任务下载限速设置错误:{self._dl_speed}") - self.systemmessage.put(f"站点刷流任务出错,单任务下载限速设置错误:{self._dl_speed}") - self._dl_speed = 0 - return - - # 检查必要条件 - if not self._brushsites or not self._downloader: - return - - # 启动任务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - logger.info(f"站点刷流服务启动,周期:{self._cron}分钟") - try: - self._scheduler.add_job(self.brush, 'interval', minutes=self._cron) - except Exception as e: - logger.error(f"站点刷流服务启动失败:{str(e)}") - self.systemmessage.put(f"站点刷流服务启动失败:{str(e)}") - return - if self._onlyonce: - logger.info(f"站点刷流服务启动,立即运行一次") - self._scheduler.add_job(self.brush, 'date', - run_date=datetime.now( - tz=pytz.timezone(settings.TZ) - ) + timedelta(seconds=3), - name="站点刷流服务") - # 关闭一次性开关 - self._onlyonce = False - self.__update_config() - if self._scheduler.get_jobs(): - # 增加检查任务 - self._scheduler.add_job(self.check, 'interval', - minutes=self._check_interval, - name="站点刷流检查服务") - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return True if self._enabled and self._brushsites and self._downloader else False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - # 站点的可选项 - site_options = [{"title": site.get("name"), "value": site.get("id")} - for site in self.siteshelper.get_indexers()] - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'brushsites', - 'label': '刷流站点', - 'items': site_options - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'downloader', - 'label': '下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'disksize', - 'label': '保种体积(GB)', - 'placeholder': '达到后停止新增任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'freeleech', - 'label': '促销', - 'items': [ - {'title': '全部(包括普通)', 'value': ''}, - {'title': '免费', 'value': 'free'}, - {'title': '2X免费', 'value': '2xfree'}, - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'maxupspeed', - 'label': '总上传带宽(KB/s)', - 'placeholder': '达到后停止新增任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'maxdlspeed', - 'label': '总下载带宽(KB/s)', - 'placeholder': '达到后停止新增任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'maxdlcount', - 'label': '同时下载任务数', - 'placeholder': '达到后停止新增任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'include', - 'label': '包含规则', - 'placeholder': '支持正式表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'exclude', - 'label': '排除规则', - 'placeholder': '支持正式表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'size', - 'label': '种子大小(GB)', - 'placeholder': '如:5 或 5-10' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seeder', - 'label': '做种人数', - 'placeholder': '如:5 或 5-10' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'pubtime', - 'label': '发布时间(分钟)', - 'placeholder': '如:5 或 5-10' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seed_time', - 'label': '做种时间(小时)', - 'placeholder': '达到后删除任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seed_ratio', - 'label': '分享率', - 'placeholder': '达到后删除任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seed_size', - 'label': '上传量(GB)', - 'placeholder': '达到后删除任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'download_time', - 'label': '下载超时时间(小时)', - 'placeholder': '达到后删除任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seed_avgspeed', - 'label': '平均上传速度(KB/s)', - 'placeholder': '低于时删除任务' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seed_inactivetime', - 'label': '未活动时间(分钟) ', - 'placeholder': '超过时删除任务' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'up_speed', - 'label': '单任务上传限速(KB/s)', - 'placeholder': '种子上传限速' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'dl_speed', - 'label': '单任务下载限速(KB/s)', - 'placeholder': '种子下载限速' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - "cols": 12, - "md": 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'save_path', - 'label': '保存目录', - 'placeholder': '留空自动' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear_task', - 'label': '清除统计数据', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "onlyonce": False, - "clear_task": False, - "freeleech": "free" - } - - def get_page(self) -> List[dict]: - # 种子明细 - torrents = self.get_data("torrents") or {} - # 统计数据 - stattistic_data: Dict[str, dict] = self.get_data("statistic") or { - "count": 0, - "deleted": 0, - "uploaded": 0, - "downloaded": 0, - } - if not torrents: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - else: - data_list = torrents.values() - # 按time倒序排序 - data_list = sorted(data_list, key=lambda x: x.get("time") or 0, reverse=True) - # 总上传量格式化 - total_upload = StringUtils.str_filesize(stattistic_data.get("uploaded") or 0) - # 总下载量格式化 - total_download = StringUtils.str_filesize(stattistic_data.get("downloaded") or 0) - # 下载种子数 - total_count = stattistic_data.get("count") or 0 - # 删除种子数 - total_deleted = stattistic_data.get("deleted") or 0 - # 种子数据明细 - torrent_trs = [ - { - 'component': 'tr', - 'props': { - 'class': 'text-sm' - }, - 'content': [ - { - 'component': 'td', - 'props': { - 'class': 'whitespace-nowrap break-keep text-high-emphasis' - }, - 'text': data.get("site_name") - }, - { - 'component': 'td', - 'text': data.get("title") - }, - { - 'component': 'td', - 'text': StringUtils.str_filesize(data.get("size")) - }, - { - 'component': 'td', - 'text': StringUtils.str_filesize(data.get("uploaded") or 0) - }, - { - 'component': 'td', - 'text': StringUtils.str_filesize(data.get("downloaded") or 0) - }, - { - 'component': 'td', - 'text': round(data.get('ratio') or 0, 2) - }, - { - 'component': 'td', - 'props': { - 'class': 'text-no-wrap' - }, - 'text': "已删除" if data.get("deleted") else "正常" - } - ] - } for data in data_list - ] - - # 拼装页面 - return [ - { - 'component': 'VRow', - 'content': [ - # 总上传量 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/upload.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总上传量' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': total_upload - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 总下载量 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/download.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总下载量' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': total_download - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 下载种子数 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/seed.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '下载种子数' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': total_count - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 删除种子数 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/delete.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '删除种子数' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': total_deleted - } - ] - } - ] - } - ] - } - ] - } - ] - }, - # 种子明细 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTable', - 'props': { - 'hover': True - }, - 'content': [ - { - 'component': 'thead', - 'props': { - 'class': 'text-no-wrap' - }, - 'content': [ - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '站点' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '标题' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '大小' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '上传量' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '下载量' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '分享率' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '状态' - } - ] - }, - { - 'component': 'tbody', - 'content': torrent_trs - } - ] - } - ] - } - ] - } - ] - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) - - def __update_config(self): - """ - 更新配置 - """ - self.update_config({ - "onlyonce": self._onlyonce, - "enabled": self._enabled, - "notify": self._notify, - "brushsites": self._brushsites, - "downloader": self._downloader, - "disksize": self._disksize, - "freeleech": self._freeleech, - "maxupspeed": self._maxupspeed, - "maxdlspeed": self._maxdlspeed, - "maxdlcount": self._maxdlcount, - "include": self._include, - "exclude": self._exclude, - "size": self._size, - "seeder": self._seeder, - "pubtime": self._pubtime, - "seed_time": self._seed_time, - "seed_ratio": self._seed_ratio, - "seed_size": self._seed_size, - "download_time": self._download_time, - "seed_avgspeed": self._seed_avgspeed, - "seed_inactivetime": self._seed_inactivetime, - "up_speed": self._up_speed, - "dl_speed": self._dl_speed, - "save_path": self._save_path, - "clear_task": self._clear_task - }) - - def brush(self): - """ - 执行刷流动作,添加下载任务 - """ - if not self._brushsites or not self._downloader: - return - - with lock: - logger.info(f"开始执行刷流任务 ...") - # 读取种子记录 - task_info: Dict[str, dict] = self.get_data("torrents") or {} - if task_info: - # 当前保种大小 - torrents_size = sum([ - task.get("size") or 0 - for task in task_info.values() if not task.get("deleted") - ]) - else: - torrents_size = 0 - # 读取统计数据 - statistic_info = self.get_data("statistic") or { - "count": 0, - "deleted": 0, - } - # 处理所有站点 - for siteid in self._brushsites: - siteinfo = self.siteoper.get(siteid) - if not siteinfo: - logger.warn(f"站点不存在:{siteid}") - continue - logger.info(f"开始获取站点 {siteinfo.name} 的新种子 ...") - torrents = self.torrents.browse(domain=siteinfo.domain) - if not torrents: - logger.info(f"站点 {siteinfo.name} 没有获取到种子") - continue - # 按pubdate降序排列 - torrents.sort(key=lambda x: x.pubdate or '', reverse=True) - # 过滤种子 - for torrent in torrents: - # 控重 - if f"{torrent.site_name}{torrent.title}" in [ - f"{task.get('site_name')}{task.get('title')}" for task in task_info.values() - ]: - continue - # 促销 - if self._freeleech and torrent.downloadvolumefactor != 0: - continue - if self._freeleech == "2xfree" and torrent.uploadvolumefactor != 2: - continue - # 包含规则 - if self._include and not re.search(r"%s" % self._include, torrent.title, re.I): - continue - # 排除规则 - if self._exclude and re.search(r"%s" % self._exclude, torrent.title, re.I): - continue - # 种子大小(GB) - if self._size: - sizes = str(self._size).split("-") - begin_size = sizes[0] - if len(sizes) > 1: - end_size = sizes[-1] - else: - end_size = 0 - if begin_size and not end_size \ - and torrent.size > float(begin_size) * 1024 ** 3: - continue - elif begin_size and end_size \ - and not float(begin_size) * 1024 ** 3 <= torrent.size <= float(end_size) * 1024 ** 3: - continue - # 做种人数 - if self._seeder: - seeders = str(self._seeder).split("-") - begin_seeder = seeders[0] - if len(seeders) > 1: - end_seeder = seeders[-1] - else: - end_seeder = 0 - if begin_seeder and not end_seeder \ - and torrent.seeders > int(begin_seeder): - continue - elif begin_seeder and end_seeder \ - and not int(begin_seeder) <= torrent.seeders <= int(end_seeder): - continue - # 计算发布时间,将字符串转换为时间 - pubdate_minutes = self.__get_pubminutes(torrent.pubdate) - # 发布时间(分钟) - if self._pubtime: - pubtimes = str(self._pubtime).split("-") - begin_pubtime = pubtimes[0] - if len(pubtimes) > 1: - end_pubtime = pubtimes[-1] - else: - end_pubtime = 0 - # 将种子发布日志转换为与当前时间的差 - if begin_pubtime and not end_pubtime \ - and pubdate_minutes > int(begin_pubtime): - continue - elif begin_pubtime and end_pubtime \ - and not int(begin_pubtime) <= pubdate_minutes <= int(end_pubtime): - continue - # 同时下载任务数 - downloads = self.__get_downloading_count() - if self._maxdlcount and downloads >= int(self._maxdlcount): - logger.warn(f"当前同时下载任务数 {downloads} 已达到最大值 {self._maxdlcount},停止新增任务") - break - # 获取下载器的下载信息 - downloader_info = self.__get_downloader_info() - if downloader_info: - current_upload_speed = downloader_info.upload_speed or 0 - current_download_speed = downloader_info.download_speed or 0 - # 总上传带宽(KB/s) - if self._maxupspeed \ - and current_upload_speed >= float(self._maxupspeed) * 1024: - logger.warn(f"当前总上传带宽 {StringUtils.str_filesize(current_upload_speed)} " - f"已达到最大值 {self._maxupspeed} KB/s,暂时停止新增任务") - break - # 总下载带宽(KB/s) - if self._maxdlspeed \ - and current_download_speed >= float(self._maxdlspeed) * 1024: - logger.warn(f"当前总下载带宽 {StringUtils.str_filesize(current_download_speed)} " - f"已达到最大值 {self._maxdlspeed} KB/s,暂时停止新增任务") - break - # 保种体积(GB) - if self._disksize \ - and (torrents_size + torrent.size) > float(self._disksize) * 1024 ** 3: - logger.warn(f"当前做种体积 {StringUtils.str_filesize(torrents_size)} " - f"已超过保种体积 {self._disksize},停止新增任务") - break - # 添加下载任务 - hash_string = self.__download(torrent=torrent) - if not hash_string: - logger.warn(f"{torrent.title} 添加刷流任务失败!") - continue - # 保存任务信息 - task_info[hash_string] = { - "site": siteinfo.id, - "site_name": siteinfo.name, - "title": torrent.title, - "size": torrent.size, - "pubdate": torrent.pubdate, - "ratio": 0, - "downloaded": 0, - "uploaded": 0, - "deleted": False, - "time": time.time() - } - # 统计数据 - torrents_size += torrent.size - statistic_info["count"] += 1 - # 发送消息 - self.__send_add_message(torrent) - - # 保存数据 - self.save_data("torrents", task_info) - # 保存统计数据 - self.save_data("statistic", statistic_info) - logger.info(f"刷流任务执行完成") - - def check(self): - """ - 定时检查,删除下载任务 - { - hash: { - site_name: - size: - } - } - """ - if not self._downloader: - return - - with lock: - logger.info(f"开始检查刷流下载任务 ...") - # 读取种子记录 - task_info: Dict[str, dict] = self.get_data("torrents") or {} - # 种子Hash - check_hashs = list(task_info.keys()) - if not task_info or not check_hashs: - logger.info(f"没有需要检查的刷流下载任务") - return - logger.info(f"共有 {len(check_hashs)} 个任务正在刷流,开始检查任务状态") - # 获取下载器实例 - downloader = self.__get_downloader(self._downloader) - if not downloader: - return - # 读取统计数据 - statistic_info = self.get_data("statistic") or { - "count": 0, - "deleted": 0, - "uploaded": 0, - "downloaded": 0 - } - # 获取下载器中的种子 - torrents, error = downloader.get_torrents(ids=check_hashs) - if error: - logger.warn("连接下载器出错,将在下个时间周期重试") - return - if not torrents: - logger.warn(f"刷流任务在下载器中不存在,清除记录") - self.save_data("torrents", {}) - return - # 检查种子状态,判断是否要删种 - remove_torrents = [] - for torrent in torrents: - torrent_hash = self.__get_hash(torrent) - site_name = task_info.get(torrent_hash).get("site_name") - torrent_info = self.__get_torrent_info(torrent) - # 更新上传量、下载量 - if not task_info.get(torrent_info.get("hash")): - task_info[torrent_hash] = { - "downloaded": torrent_info.get("downloaded"), - "uploaded": torrent_info.get("uploaded"), - "ratio": torrent_info.get("ratio"), - } - else: - task_info[torrent_hash]["downloaded"] = torrent_info.get("downloaded") - task_info[torrent_hash]["uploaded"] = torrent_info.get("uploaded") - task_info[torrent_hash]["ratio"] = torrent_info.get("ratio") - # 做种时间(小时) - if self._seed_time: - if torrent_info.get("seeding_time") >= float(self._seed_time) * 3600: - logger.info(f"做种时间达到 {self._seed_time} 小时,删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"做种时间达到 {self._seed_time} 小时") - continue - # 分享率 - if self._seed_ratio: - if torrent_info.get("ratio") >= float(self._seed_ratio): - logger.info(f"分享率达到 {self._seed_ratio},删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"分享率达到 {self._seed_ratio}") - continue - # 上传量(GB) - if self._seed_size: - if torrent_info.get("uploaded") >= float(self._seed_size) * 1024 * 1024 * 1024: - logger.info(f"上传量达到 {self._seed_size} GB,删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"上传量达到 {self._seed_size} GB") - continue - # 下载耗时(小时) - if self._download_time \ - and torrent_info.get("downloaded") < torrent_info.get("total_size"): - if torrent_info.get("dltime") >= float(self._download_time) * 3600: - logger.info(f"下载耗时达到 {self._download_time} 小时,删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"下载耗时达到 {self._download_time} 小时") - continue - # 平均上传速度(KB / s),大于30分钟才有效 - if self._seed_avgspeed: - if torrent_info.get("avg_upspeed") <= float(self._seed_avgspeed) * 1024 and \ - torrent_info.get("seeding_time") >= 30 * 60: - logger.info(f"平均上传速度低于 {self._seed_avgspeed} KB/s,删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"平均上传速度低于 {self._seed_avgspeed} KB/s") - continue - # 未活动时间(分钟) - if self._seed_inactivetime: - if torrent_info.get("iatime") >= float(self._seed_inactivetime) * 60: - logger.info( - f"未活动时间达到 {self._seed_inactivetime} 分钟,删除种子:{torrent_info.get('title')}") - downloader.delete_torrents(ids=torrent_hash, delete_file=True) - remove_torrents.append(torrent_info) - self.__send_delete_message(site_name=site_name, - torrent_title=torrent_info.get("title"), - reason=f"未活动时间达到 {self._seed_inactivetime} 分钟") - continue - # 统计删除状态 - if remove_torrents: - if not statistic_info.get("deleted"): - statistic_info["deleted"] = 0 - statistic_info["deleted"] += len(remove_torrents) - # 删除任务记录 - for torrent in remove_torrents: - task_info[torrent.get("hash")].update({ - "deleted": True, - }) - # 统计总上传量、下载量 - total_uploaded = 0 - total_downloaded = 0 - for hash_str, task in task_info.items(): - total_downloaded += task.get("downloaded") or 0 - total_uploaded += task.get("uploaded") or 0 - # 更新统计数据 - statistic_info["uploaded"] = total_uploaded - statistic_info["downloaded"] = total_downloaded - # 打印统计数据 - logger.info(f"刷流任务统计数据:" - f"总任务数:{len(task_info)}," - f"已删除:{statistic_info.get('deleted')}," - f"总上传量:{StringUtils.str_filesize(statistic_info.get('uploaded'))}," - f"总下载量:{StringUtils.str_filesize(statistic_info.get('downloaded'))}") - # 保存统计数据 - self.save_data("statistic", statistic_info) - # 保存任务记录 - self.save_data("torrents", task_info) - logger.info(f"刷流下载任务检查完成") - - def __get_downloader(self, dtype: str) -> Optional[Union[Transmission, Qbittorrent]]: - """ - 根据类型返回下载器实例 - """ - if dtype == "qbittorrent": - return self.qb - elif dtype == "transmission": - return self.tr - else: - return None - - def __download(self, torrent: TorrentInfo) -> Optional[str]: - """ - 添加下载任务 - """ - # 上传限速 - up_speed = int(self._up_speed) if self._up_speed else None - # 下载限速 - down_speed = int(self._dl_speed) if self._dl_speed else None - if self._downloader == "qbittorrent": - if not self.qb: - return None - # 限速值转为bytes - up_speed = up_speed * 1024 if up_speed else None - down_speed = down_speed * 1024 if down_speed else None - # 生成随机Tag - tag = StringUtils.generate_random_str(10) - state = self.qb.add_torrent(content=torrent.enclosure, - download_dir=self._save_path or None, - cookie=torrent.site_cookie, - tag=["已整理", "刷流", tag], - upload_limit=up_speed, - download_limit=down_speed) - if not state: - return None - else: - # 获取种子Hash - torrent_hash = self.qb.get_torrent_id_by_tag(tags=tag) - if not torrent_hash: - logger.error(f"{self._downloader} 获取种子Hash失败") - return None - return torrent_hash - elif self._downloader == "transmission": - if not self.tr: - return None - # 添加任务 - torrent = self.tr.add_torrent(content=torrent.enclosure, - download_dir=self._save_path or None, - cookie=torrent.site_cookie, - labels=["已整理", "刷流"]) - if not torrent: - return None - else: - if self._up_speed or self._dl_speed: - self.tr.change_torrent(hash_string=torrent.hashString, - upload_limit=up_speed, - download_limit=down_speed) - return torrent.hashString - return None - - def __get_hash(self, torrent: Any): - """ - 获取种子hash - """ - try: - return torrent.get("hash") if self._downloader == "qbittorrent" else torrent.hashString - except Exception as e: - print(str(e)) - return "" - - def __get_label(self, torrent: Any): - """ - 获取种子标签 - """ - try: - return [str(tag).strip() for tag in torrent.get("tags").split(',')] \ - if self._downloader == "qbittorrent" else torrent.labels or [] - except Exception as e: - print(str(e)) - return [] - - def __get_torrent_info(self, torrent: Any) -> dict: - - # 当前时间戳 - date_now = int(time.time()) - # QB - if self._downloader == "qbittorrent": - """ - { - "added_on": 1693359031, - "amount_left": 0, - "auto_tmm": false, - "availability": -1, - "category": "tJU", - "completed": 67759229411, - "completion_on": 1693609350, - "content_path": "/mnt/sdb/qb/downloads/Steel.Division.2.Men.of.Steel-RUNE", - "dl_limit": -1, - "dlspeed": 0, - "download_path": "", - "downloaded": 67767365851, - "downloaded_session": 0, - "eta": 8640000, - "f_l_piece_prio": false, - "force_start": false, - "hash": "116bc6f3efa6f3b21a06ce8f1cc71875", - "infohash_v1": "116bc6f306c40e072bde8f1cc71875", - "infohash_v2": "", - "last_activity": 1693609350, - "magnet_uri": "magnet:?xt=", - "max_ratio": -1, - "max_seeding_time": -1, - "name": "Steel.Division.2.Men.of.Steel-RUNE", - "num_complete": 1, - "num_incomplete": 0, - "num_leechs": 0, - "num_seeds": 0, - "priority": 0, - "progress": 1, - "ratio": 0, - "ratio_limit": -2, - "save_path": "/mnt/sdb/qb/downloads", - "seeding_time": 615035, - "seeding_time_limit": -2, - "seen_complete": 1693609350, - "seq_dl": false, - "size": 67759229411, - "state": "stalledUP", - "super_seeding": false, - "tags": "", - "time_active": 865354, - "total_size": 67759229411, - "tracker": "https://tracker", - "trackers_count": 2, - "up_limit": -1, - "uploaded": 0, - "uploaded_session": 0, - "upspeed": 0 - } - """ - # ID - torrent_id = torrent.get("hash") - # 标题 - torrent_title = torrent.get("name") - # 下载时间 - if (not torrent.get("added_on") - or torrent.get("added_on") < 0): - dltime = 0 - else: - dltime = date_now - torrent.get("added_on") - # 做种时间 - if (not torrent.get("completion_on") - or torrent.get("completion_on") < 0): - seeding_time = 0 - else: - seeding_time = date_now - torrent.get("completion_on") - # 分享率 - ratio = torrent.get("ratio") or 0 - # 上传量 - uploaded = torrent.get("uploaded") or 0 - # 平均上传速度 Byte/s - if dltime: - avg_upspeed = int(uploaded / dltime) - else: - avg_upspeed = uploaded - # 已未活动 秒 - if (not torrent.get("last_activity") - or torrent.get("last_activity") < 0): - iatime = 0 - else: - iatime = date_now - torrent.get("last_activity") - # 下载量 - downloaded = torrent.get("downloaded") - # 种子大小 - total_size = torrent.get("total_size") - # 添加时间 - add_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(torrent.get("added_on") or 0)) - # TR - else: - # ID - torrent_id = torrent.hashString - # 标题 - torrent_title = torrent.name - # 做种时间 - if (not torrent.date_done - or torrent.date_done.timestamp() < 1): - seeding_time = 0 - else: - seeding_time = date_now - int(torrent.date_done.timestamp()) - # 下载耗时 - if (not torrent.date_added - or torrent.date_added.timestamp() < 1): - dltime = 0 - else: - dltime = date_now - int(torrent.date_added.timestamp()) - # 下载量 - downloaded = int(torrent.total_size * torrent.progress / 100) - # 分享率 - ratio = torrent.ratio or 0 - # 上传量 - uploaded = int(downloaded * torrent.ratio) - # 平均上传速度 - if dltime: - avg_upspeed = int(uploaded / dltime) - else: - avg_upspeed = uploaded - # 未活动时间 - if (not torrent.date_active - or torrent.date_active.timestamp() < 1): - iatime = 0 - else: - iatime = date_now - int(torrent.date_active.timestamp()) - # 种子大小 - total_size = torrent.total_size - # 添加时间 - add_time = time.strftime('%Y-%m-%d %H:%M:%S', - time.localtime(torrent.date_added.timestamp() if torrent.date_added else 0)) - - return { - "hash": torrent_id, - "title": torrent_title, - "seeding_time": seeding_time, - "ratio": ratio, - "uploaded": uploaded, - "downloaded": downloaded, - "avg_upspeed": avg_upspeed, - "iatime": iatime, - "dltime": dltime, - "total_size": total_size, - "add_time": add_time - } - - def __send_delete_message(self, site_name: str, torrent_title: str, reason: str): - """ - 发送删除种子的消息 - """ - if not self._notify: - return - self.chain.post_message(Notification( - mtype=NotificationType.SiteMessage, - title=f"【刷流任务删种】", - text=f"站点:{site_name}\n" - f"标题:{torrent_title}\n" - f"原因:{reason}" - )) - - def __send_add_message(self, torrent: TorrentInfo): - """ - 发送添加下载的消息 - """ - if not self._notify: - return - msg_text = "" - if torrent.site_name: - msg_text = f"站点:{torrent.site_name}" - if torrent.title: - msg_text = f"{msg_text}\n标题:{torrent.title}" - if torrent.size: - if str(torrent.size).replace(".", "").isdigit(): - size = StringUtils.str_filesize(torrent.size) - else: - size = torrent.size - msg_text = f"{msg_text}\n大小:{size}" - if torrent.pubdate: - msg_text = f"{msg_text}\n发布时间:{torrent.pubdate}" - if torrent.seeders: - msg_text = f"{msg_text}\n做种数:{torrent.seeders}" - if torrent.volume_factor: - msg_text = f"{msg_text}\n促销:{torrent.volume_factor}" - if torrent.hit_and_run: - msg_text = f"{msg_text}\nHit&Run:是" - - self.chain.post_message(Notification( - mtype=NotificationType.SiteMessage, - title="【刷流任务种子下载】", - text=msg_text - )) - - def __get_torrents_size(self) -> int: - """ - 获取任务中的种子总大小 - """ - # 读取种子记录 - task_info = self.get_data("torrents") or {} - if not task_info: - return 0 - total_size = sum([task.get("size") or 0 for task in task_info.values()]) - return total_size - - def __get_downloader_info(self) -> schemas.DownloaderInfo: - """ - 获取下载器实时信息(所有下载器) - """ - ret_info = schemas.DownloaderInfo() - - # Qbittorrent - if self.qb: - info = self.qb.transfer_info() - if info: - ret_info.download_speed += info.get("dl_info_speed") - ret_info.upload_speed += info.get("up_info_speed") - ret_info.download_size += info.get("dl_info_data") - ret_info.upload_size += info.get("up_info_data") - - # Transmission - if self.tr: - info = self.tr.transfer_info() - if info: - ret_info.download_speed += info.download_speed - ret_info.upload_speed += info.upload_speed - ret_info.download_size += info.current_stats.downloaded_bytes - ret_info.upload_size += info.current_stats.uploaded_bytes - - return ret_info - - def __get_downloading_count(self) -> int: - """ - 获取正在下载的任务数量 - """ - downlader = self.__get_downloader(self._downloader) - if not downlader: - return 0 - torrents = downlader.get_downloading_torrents() - return len(torrents) or 0 - - @staticmethod - def __get_pubminutes(pubdate: str) -> int: - """ - 将字符串转换为时间,并计算与当前时间差)(分钟) - """ - try: - if not pubdate: - return 0 - pubdate = pubdate.replace("T", " ").replace("Z", "") - pubdate = datetime.strptime(pubdate, "%Y-%m-%d %H:%M:%S") - now = datetime.now() - return (now - pubdate).total_seconds() // 60 - except Exception as e: - print(str(e)) - return 0 diff --git a/app/plugins/chatgpt/__init__.py b/app/plugins/chatgpt/__init__.py deleted file mode 100644 index f094c911..00000000 --- a/app/plugins/chatgpt/__init__.py +++ /dev/null @@ -1,231 +0,0 @@ -from typing import Any, List, Dict, Tuple - -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.log import logger -from app.plugins import _PluginBase -from app.plugins.chatgpt.openai import OpenAi -from app.schemas.types import EventType - - -class ChatGPT(_PluginBase): - # 插件名称 - plugin_name = "ChatGPT" - # 插件描述 - plugin_desc = "消息交互支持与ChatGPT对话。" - # 插件图标 - plugin_icon = "chatgpt.png" - # 主题色 - plugin_color = "#74AA9C" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "chatgpt_" - # 加载顺序 - plugin_order = 15 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - openai = None - _enabled = False - _proxy = False - _recognize = False - _openai_url = None - _openai_key = None - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._proxy = config.get("proxy") - self._recognize = config.get("recognize") - self._openai_url = config.get("openai_url") - self._openai_key = config.get("openai_key") - self.openai = OpenAi(api_key=self._openai_key, api_url=self._openai_url, - proxy=settings.PROXY if self._proxy else None) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'proxy', - 'label': '使用代理', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'recognize', - 'label': '辅助识别', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'openai_url', - 'label': 'OpenAI API Url', - 'placeholder': 'https://api.openai.com', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'openai_key', - 'label': 'sk-xxx' - } - } - ] - } - ] - }, - ] - } - ], { - "enabled": False, - "proxy": False, - "recognize": False, - "openai_url": "https://api.openai.com", - "openai_key": "" - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.UserMessage) - def talk(self, event: Event): - """ - 监听用户消息,获取ChatGPT回复 - """ - if not self._enabled: - return - if not self.openai: - return - text = event.event_data.get("text") - userid = event.event_data.get("userid") - channel = event.event_data.get("channel") - if not text: - return - response = self.openai.get_response(text=text, userid=userid) - if response: - self.post_message(channel=channel, title=response, userid=userid) - - @eventmanager.register(EventType.NameRecognize) - def recognize(self, event: Event): - """ - 监听识别事件,使用ChatGPT辅助识别名称 - """ - if not event.event_data: - return - title = event.event_data.get("title") - if not title: - return - # 收到事件后需要立码返回,避免主程序等待 - if not self._enabled \ - or not self.openai \ - or not self._recognize: - eventmanager.send_event( - EventType.NameRecognizeResult, - { - 'title': title - } - ) - return - # 调用ChatGPT - response = self.openai.get_media_name(filename=title) - logger.info(f"ChatGPT辅助识别结果:{response}") - if response: - eventmanager.send_event( - EventType.NameRecognizeResult, - { - 'title': title, - 'name': response.get("title"), - 'year': response.get("year"), - 'season': response.get("season"), - 'episode': response.get("episode") - } - ) - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/chatgpt/openai.py b/app/plugins/chatgpt/openai.py deleted file mode 100644 index 3613926a..00000000 --- a/app/plugins/chatgpt/openai.py +++ /dev/null @@ -1,204 +0,0 @@ -import json -import time -from typing import List, Union - -import openai -from cacheout import Cache - -OpenAISessionCache = Cache(maxsize=100, ttl=3600, timer=time.time, default=None) - - -class OpenAi: - _api_key: str = None - _api_url: str = None - - def __init__(self, api_key: str = None, api_url: str = None, proxy: dict = None): - self._api_key = api_key - self._api_url = api_url - openai.api_base = self._api_url + "/v1" - openai.api_key = self._api_key - if proxy and proxy.get("https"): - openai.proxy = proxy.get("https") - - def get_state(self) -> bool: - return True if self._api_key else False - - @staticmethod - def __save_session(session_id: str, message: str): - """ - 保存会话 - :param session_id: 会话ID - :param message: 消息 - :return: - """ - seasion = OpenAISessionCache.get(session_id) - if seasion: - seasion.append({ - "role": "assistant", - "content": message - }) - OpenAISessionCache.set(session_id, seasion) - - @staticmethod - def __get_session(session_id: str, message: str) -> List[dict]: - """ - 获取会话 - :param session_id: 会话ID - :return: 会话上下文 - """ - seasion = OpenAISessionCache.get(session_id) - if seasion: - seasion.append({ - "role": "user", - "content": message - }) - else: - seasion = [ - { - "role": "system", - "content": "请在接下来的对话中请使用中文回复,并且内容尽可能详细。" - }, - { - "role": "user", - "content": message - }] - OpenAISessionCache.set(session_id, seasion) - return seasion - - @staticmethod - def __get_model(message: Union[str, List[dict]], - prompt: str = None, - user: str = "MoviePilot", - **kwargs): - """ - 获取模型 - """ - if not isinstance(message, list): - if prompt: - message = [ - { - "role": "system", - "content": prompt - }, - { - "role": "user", - "content": message - } - ] - else: - message = [ - { - "role": "user", - "content": message - } - ] - return openai.ChatCompletion.create( - model="gpt-3.5-turbo", - user=user, - messages=message, - **kwargs - ) - - @staticmethod - def __clear_session(session_id: str): - """ - 清除会话 - :param session_id: 会话ID - :return: - """ - if OpenAISessionCache.get(session_id): - OpenAISessionCache.delete(session_id) - - def get_media_name(self, filename: str): - """ - 从文件名中提取媒体名称等要素 - :param filename: 文件名 - :return: Json - """ - if not self.get_state(): - return None - result = "" - try: - _filename_prompt = "I will give you a movie/tvshow file name.You need to return a Json." \ - "\nPay attention to the correct identification of the film name." \ - "\n{\"title\":string,\"version\":string,\"part\":string,\"year\":string,\"resolution\":string,\"season\":number|null,\"episode\":number|null}" - completion = self.__get_model(prompt=_filename_prompt, message=filename) - result = completion.choices[0].message.content - return json.loads(result) - except Exception as e: - print(f"{str(e)}:{result}") - return {} - - def get_response(self, text: str, userid: str): - """ - 聊天对话,获取答案 - :param text: 输入文本 - :param userid: 用户ID - :return: - """ - if not self.get_state(): - return "" - try: - if not userid: - return "用户信息错误" - else: - userid = str(userid) - if text == "#清除": - self.__clear_session(userid) - return "会话已清除" - # 获取历史上下文 - messages = self.__get_session(userid, text) - completion = self.__get_model(message=messages, user=userid) - result = completion.choices[0].message.content - if result: - self.__save_session(userid, text) - return result - except openai.error.RateLimitError as e: - return f"请求被ChatGPT拒绝了,{str(e)}" - except openai.error.APIConnectionError as e: - return f"ChatGPT网络连接失败:{str(e)}" - except openai.error.Timeout as e: - return f"没有接收到ChatGPT的返回消息:{str(e)}" - except Exception as e: - return f"请求ChatGPT出现错误:{str(e)}" - - def translate_to_zh(self, text: str): - """ - 翻译为中文 - :param text: 输入文本 - """ - if not self.get_state(): - return False, None - system_prompt = "You are a translation engine that can only translate text and cannot interpret it." - user_prompt = f"translate to zh-CN:\n\n{text}" - result = "" - try: - completion = self.__get_model(prompt=system_prompt, - message=user_prompt, - temperature=0, - top_p=1, - frequency_penalty=0, - presence_penalty=0) - result = completion.choices[0].message.content.strip() - return True, result - except Exception as e: - print(f"{str(e)}:{result}") - return False, str(e) - - def get_question_answer(self, question: str): - """ - 从给定问题和选项中获取正确答案 - :param question: 问题及选项 - :return: Json - """ - if not self.get_state(): - return None - result = "" - try: - _question_prompt = "下面我们来玩一个游戏,你是老师,我是学生,你需要回答我的问题,我会给你一个题目和几个选项,你的回复必须是给定选项中正确答案对应的序号,请直接回复数字" - completion = self.__get_model(prompt=_question_prompt, message=question) - result = completion.choices[0].message.content - return result - except Exception as e: - print(f"{str(e)}:{result}") - return {} diff --git a/app/plugins/chinesesubfinder/__init__.py b/app/plugins/chinesesubfinder/__init__.py deleted file mode 100644 index 7550e440..00000000 --- a/app/plugins/chinesesubfinder/__init__.py +++ /dev/null @@ -1,256 +0,0 @@ -from functools import lru_cache -from pathlib import Path -from typing import List, Tuple, Dict, Any - -from app.core.config import settings -from app.core.context import MediaInfo -from app.core.event import eventmanager, Event -from app.log import logger -from app.plugins import _PluginBase -from app.schemas import TransferInfo -from app.schemas.types import EventType, MediaType -from app.utils.http import RequestUtils - - -class ChineseSubFinder(_PluginBase): - # 插件名称 - plugin_name = "ChineseSubFinder" - # 插件描述 - plugin_desc = "整理入库时通知ChineseSubFinder下载字幕。" - # 插件图标 - plugin_icon = "chinesesubfinder.png" - # 主题色 - plugin_color = "#83BE39" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "chinesesubfinder_" - # 加载顺序 - plugin_order = 5 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _save_tmp_path = None - _enabled = False - _host = None - _api_key = None - _remote_path = None - _local_path = None - - def init_plugin(self, config: dict = None): - self._save_tmp_path = settings.TEMP_PATH - if config: - self._enabled = config.get("enabled") - self._api_key = config.get("api_key") - self._host = config.get('host') - if self._host: - if not self._host.startswith('http'): - self._host = "http://" + self._host - if not self._host.endswith('/'): - self._host = self._host + "/" - self._local_path = config.get("local_path") - self._remote_path = config.get("remote_path") - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'host', - 'label': '服务器' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'api_key', - 'label': 'API密钥' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'local_path', - 'label': '本地路径' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'remote_path', - 'label': '远端路径' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "host": "", - "api_key": "", - "local_path": "", - "remote_path": "" - } - - def get_state(self) -> bool: - return self._enabled - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - pass - - @eventmanager.register(EventType.TransferComplete) - def download(self, event: Event): - """ - 调用ChineseSubFinder下载字幕 - """ - if not self._enabled or not self._host or not self._api_key: - return - item = event.event_data - if not item: - return - # 请求地址 - req_url = "%sapi/v1/add-job" % self._host - - # 媒体信息 - item_media: MediaInfo = item.get("mediainfo") - # 转移信息 - item_transfer: TransferInfo = item.get("transferinfo") - # 类型 - item_type = item_media.type - # 目的路径 - item_dest: Path = item_transfer.target_path - # 是否蓝光原盘 - item_bluray = item_transfer.is_bluray - # 文件清单 - item_file_list = item_transfer.file_list_new - - if item_bluray: - # 蓝光原盘虚拟个文件 - item_file_list = ["%s.mp4" % item_dest / item_dest.name] - - for file_path in item_file_list: - # 路径替换 - if self._local_path and self._remote_path and file_path.startswith(self._local_path): - file_path = file_path.replace(self._local_path, self._remote_path).replace('\\', '/') - - # 调用CSF下载字幕 - self.__request_csf(req_url=req_url, - file_path=file_path, - item_type=0 if item_type == MediaType.MOVIE.value else 1, - item_bluray=item_bluray) - - @lru_cache(maxsize=128) - def __request_csf(self, req_url, file_path, item_type, item_bluray): - # 一个名称只建一个任务 - logger.info("通知ChineseSubFinder下载字幕: %s" % file_path) - params = { - "video_type": item_type, - "physical_video_file_full_path": file_path, - "task_priority_level": 3, - "media_server_inside_video_id": "", - "is_bluray": item_bluray - } - try: - res = RequestUtils(headers={ - "Authorization": "Bearer %s" % self._api_key - }).post(req_url, json=params) - if not res or res.status_code != 200: - logger.error("调用ChineseSubFinder API失败!") - else: - # 如果文件目录没有识别的nfo元数据, 此接口会返回控制符,推测是ChineseSubFinder的原因 - # emby refresh元数据时异步的 - if res.text: - job_id = res.json().get("job_id") - message = res.json().get("message") - if not job_id: - logger.warn("ChineseSubFinder下载字幕出错:%s" % message) - else: - logger.info("ChineseSubFinder任务添加成功:%s" % job_id) - elif res.status_code != 200: - logger.warn(f"ChineseSubFinder调用出错:{res.status_code} - {res.reason}") - except Exception as e: - logger.error("连接ChineseSubFinder出错:" + str(e)) diff --git a/app/plugins/clouddiskdel/__init__.py b/app/plugins/clouddiskdel/__init__.py deleted file mode 100644 index bcc0e7cd..00000000 --- a/app/plugins/clouddiskdel/__init__.py +++ /dev/null @@ -1,436 +0,0 @@ -import os -import shutil -import time -from pathlib import Path - -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.log import logger -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple - -from app.schemas.types import EventType, MediaImageType, NotificationType, MediaType -from app.utils.system import SystemUtils - - -class CloudDiskDel(_PluginBase): - # 插件名称 - plugin_name = "云盘文件删除" - # 插件描述 - plugin_desc = "媒体库删除strm文件后同步删除云盘资源。" - # 插件图标 - plugin_icon = "clouddisk.png" - # 主题色 - plugin_color = "#ff9933" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "clouddiskdel_" - # 加载顺序 - plugin_order = 26 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _paths = {} - _notify = False - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._notify = config.get("notify") - for path in str(config.get("path")).split("\n"): - paths = path.split(":") - self._paths[paths[0]] = paths[1] - - @eventmanager.register(EventType.NetworkDiskDel) - def clouddisk_del(self, event: Event): - if not self._enabled: - return - - event_data = event.event_data - logger.info(f"获取到云盘删除请求 {event_data}") - - media_path = event_data.get("media_path") - if not media_path: - logger.error("未获取到删除路径") - return - - media_name = event_data.get("media_name") - tmdb_id = event_data.get("tmdb_id") - media_type = event_data.get("media_type") - season_num = event_data.get("season_num") - episode_num = event_data.get("episode_num") - - # 判断删除媒体路径是否与配置的媒体库路径相符,相符则继续删除,不符则跳过 - for library_path in list(self._paths.keys()): - if str(media_path).startswith(library_path): - # 替换网盘路径 - media_path = str(media_path).replace(library_path, self._paths.get(library_path)) - logger.info(f"获取到moviepilot本地云盘挂载路径 {media_path}") - path = Path(media_path) - if path.is_file() or media_path.endswith(".strm"): - # 删除文件、nfo、jpg等同名文件 - pattern = path.stem.replace('[', '?').replace(']', '?') - logger.info(f"开始筛选同名文件 {pattern}") - files = path.parent.glob(f"{pattern}.*") - for file in files: - Path(file).unlink() - logger.info(f"云盘文件 {file} 已删除") - else: - # 非根目录,才删除目录 - shutil.rmtree(path) - # 删除目录 - logger.warn(f"云盘目录 {path} 已删除") - - # 判断当前媒体父路径下是否有媒体文件,如有则无需遍历父级 - if not SystemUtils.exits_files(path.parent, settings.RMT_MEDIAEXT): - # 判断父目录是否为空, 为空则删除 - for parent_path in path.parents: - if str(parent_path.parent) != str(path.root): - # 父目录非根目录,才删除父目录 - if not SystemUtils.exits_files(parent_path, settings.RMT_MEDIAEXT): - # 当前路径下没有媒体文件则删除 - shutil.rmtree(parent_path) - logger.warn(f"云盘目录 {parent_path} 已删除") - - break - - # 发送消息 - image = 'https://emby.media/notificationicon.png' - media_type = MediaType.MOVIE if media_type in ["Movie", "MOV"] else MediaType.TV - if self._notify: - backrop_image = self.chain.obtain_specific_image( - mediaid=tmdb_id, - mtype=media_type, - image_type=MediaImageType.Backdrop, - season=season_num, - episode=episode_num - ) or image - - # 类型 - if media_type == MediaType.MOVIE: - msg = f'电影 {media_name} {tmdb_id}' - # 删除电视剧 - elif media_type == MediaType.TV and not season_num and not episode_num: - msg = f'剧集 {media_name} {tmdb_id}' - # 删除季 S02 - elif media_type == MediaType.TV and season_num and not episode_num: - msg = f'剧集 {media_name} S{season_num} {tmdb_id}' - # 删除剧集S02E02 - elif media_type == MediaType.TV and season_num and episode_num: - msg = f'剧集 {media_name} S{season_num}E{episode_num} {tmdb_id}' - else: - msg = media_name - - # 发送通知 - self.post_message( - mtype=NotificationType.MediaServer, - title="云盘同步删除任务完成", - image=backrop_image, - text=f"{msg}\n" - f"时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}" - ) - - # 读取历史记录 - history = self.get_data('history') or [] - - # 获取poster - poster_image = self.chain.obtain_specific_image( - mediaid=tmdb_id, - mtype=media_type, - image_type=MediaImageType.Poster, - ) or image - history.append({ - "type": media_type.value, - "title": media_name, - "path": media_path, - "season": season_num, - "episode": episode_num, - "image": poster_image, - "del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) - }) - - # 保存历史 - self.save_data("history", history) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '开启通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'path', - 'rows': '2', - 'label': '媒体库路径映射', - 'placeholder': '媒体服务器路径:moviepilot内云盘挂载路径(一行一个)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '需要开启媒体库删除插件且正确配置排除路径。' - '主要针对于strm文件删除后同步删除云盘资源。' - '如遇删除失败,请检查文件权限问题。' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '关于路径映射:' - 'emby:/data/series/A.mp4,' - 'moviepilot内云盘挂载路径:/mnt/link/series/A.mp4。' - '路径映射填/data:/mnt/link' - } - } - ] - } - ] - }, - ] - } - ], { - "enabled": False, - "path": "", - "notify": False - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('del_time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - htype = history.get("type") - title = history.get("title") - season = history.get("season") - episode = history.get("episode") - image = history.get("image") - del_time = history.get("del_time") - - if season: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'季:{season}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'集:{episode}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - else: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': image, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': sub_contents - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/cloudflarespeedtest/__init__.py b/app/plugins/cloudflarespeedtest/__init__.py deleted file mode 100644 index 1bd6d3bb..00000000 --- a/app/plugins/cloudflarespeedtest/__init__.py +++ /dev/null @@ -1,812 +0,0 @@ -import os -import subprocess -import time -import zipfile -from datetime import datetime, timedelta -from pathlib import Path -from typing import List, Tuple, Dict, Any - -import pytz -import requests -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from python_hosts import Hosts, HostsEntry -from requests import Response - -from app import schemas -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType, NotificationType -from app.utils.http import RequestUtils -from app.utils.ip import IpUtils -from app.utils.system import SystemUtils - - -class CloudflareSpeedTest(_PluginBase): - # 插件名称 - plugin_name = "Cloudflare IP优选" - # 插件描述 - plugin_desc = "🌩 测试 Cloudflare CDN 延迟和速度,自动优选IP。" - # 插件图标 - plugin_icon = "cloudflare.jpg" - # 主题色 - plugin_color = "#F6821F" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "cloudflarespeedtest_" - # 加载顺序 - plugin_order = 12 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _customhosts = False - _cf_ip = None - _scheduler = None - _cron = None - _onlyonce = False - _ipv4 = False - _ipv6 = False - _version = None - _additional_args = None - _re_install = False - _notify = False - _check = False - _cf_path = None - _cf_ipv4 = None - _cf_ipv6 = None - _result_file = None - _release_prefix = 'https://github.com/XIU2/CloudflareSpeedTest/releases/download' - _binary_name = 'CloudflareST' - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - # 读取配置 - if config: - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._cf_ip = config.get("cf_ip") - self._version = config.get("version") - self._ipv4 = config.get("ipv4") - self._ipv6 = config.get("ipv6") - self._re_install = config.get("re_install") - self._additional_args = config.get("additional_args") - self._notify = config.get("notify") - self._check = config.get("check") - - if self.get_state() or self._onlyonce: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - try: - if self.get_state() and self._cron: - logger.info(f"Cloudflare CDN优选服务启动,周期:{self._cron}") - self._scheduler.add_job(func=self.__cloudflareSpeedTest, - trigger=CronTrigger.from_crontab(self._cron), - name="Cloudflare优选") - - if self._onlyonce: - logger.info(f"Cloudflare CDN优选服务启动,立即运行一次") - self._scheduler.add_job(func=self.__cloudflareSpeedTest, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="Cloudflare优选") - # 关闭一次性开关 - self._onlyonce = False - self.__update_config() - except Exception as err: - logger.error(f"Cloudflare CDN优选服务出错:{str(err)}") - self.systemmessage.put(f"Cloudflare CDN优选服务出错:{str(err)}") - return - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - @eventmanager.register(EventType.CloudFlareSpeedTest) - def __cloudflareSpeedTest(self, event: Event = None): - """ - CloudflareSpeedTest优选 - """ - self._cf_path = self.get_data_path() - self._cf_ipv4 = os.path.join(self._cf_path, "ip.txt") - self._cf_ipv6 = os.path.join(self._cf_path, "ipv6.txt") - self._result_file = os.path.join(self._cf_path, "result_hosts.txt") - - # 获取自定义Hosts插件,若无设置则停止 - customHosts = self.get_config("CustomHosts") - self._customhosts = customHosts and customHosts.get("enabled") - if self._cf_ip and not customHosts or not customHosts.get("hosts"): - logger.error(f"Cloudflare CDN优选依赖于自定义Hosts,请先维护hosts") - return - - if not self._cf_ip: - logger.error("CloudflareSpeedTest加载成功,首次运行,需要配置优选ip") - return - - if event and event.event_data: - logger.info("收到命令,开始Cloudflare IP优选 ...") - self.post_message(channel=event.event_data.get("channel"), - title="开始Cloudflare IP优选 ...", - userid=event.event_data.get("user")) - - # ipv4和ipv6必须其一 - if not self._ipv4 and not self._ipv6: - self._ipv4 = True - self.__update_config() - logger.warn(f"Cloudflare CDN优选未指定ip类型,默认ipv4") - - err_flag, release_version = self.__check_environment() - if err_flag and release_version: - # 更新版本 - self._version = release_version - self.__update_config() - - hosts = customHosts.get("hosts") - if isinstance(hosts, str): - hosts = str(hosts).split('\n') - # 校正优选ip - if self._check: - self.__check_cf_ip(hosts=hosts) - - # 开始优选 - if err_flag: - logger.info("正在进行CLoudflare CDN优选,请耐心等待") - # 执行优选命令,-dd不测速 - if SystemUtils.is_windows(): - cf_command = f'cd \"{self._cf_path}\" && CloudflareST {self._additional_args} -o \"{self._result_file}\"' + ( - f' -f \"{self._cf_ipv4}\"' if self._ipv4 else '') + ( - f' -f \"{self._cf_ipv6}\"' if self._ipv6 else '') - else: - cf_command = f'cd {self._cf_path} && chmod a+x {self._binary_name} && ./{self._binary_name} {self._additional_args} -o {self._result_file}' + ( - f' -f {self._cf_ipv4}' if self._ipv4 else '') + (f' -f {self._cf_ipv6}' if self._ipv6 else '') - logger.info(f'正在执行优选命令 {cf_command}') - if SystemUtils.is_windows(): - process = subprocess.Popen(cf_command, shell=True) - # 执行命令后无法退出 采用异步和设置超时方案 - # 设置超时时间为120秒 - if cf_command.__contains__("-dd"): - time.sleep(120) - else: - time.sleep(600) - # 如果没有在120秒内完成任务,那么杀死该进程 - if process.poll() is None: - os.system('taskkill /F /IM CloudflareST.exe') - else: - os.system(cf_command) - - # 获取优选后最优ip - if SystemUtils.is_windows(): - powershell_command = f"powershell.exe -Command \"Get-Content \'{self._result_file}\' | Select-Object -Skip 1 -First 1 | Write-Output\"" - logger.info(f'正在执行powershell命令 {powershell_command}') - best_ip = SystemUtils.execute(powershell_command) - best_ip = best_ip.split(',')[0] - else: - best_ip = SystemUtils.execute("sed -n '2,1p' " + self._result_file + " | awk -F, '{print $1}'") - logger.info(f"\n获取到最优ip==>[{best_ip}]") - - # 替换自定义Hosts插件数据库hosts - if IpUtils.is_ipv4(best_ip) or IpUtils.is_ipv6(best_ip): - if best_ip == self._cf_ip: - logger.info(f"CloudflareSpeedTest CDN优选ip未变,不做处理") - else: - # 替换优选ip - err_hosts = customHosts.get("err_hosts") - - # 处理ip - new_hosts = [] - for host in hosts: - if host and host != '\n': - host_arr = str(host).split() - if host_arr[0] == self._cf_ip: - new_hosts.append(host.replace(self._cf_ip, best_ip).replace("\n", "") + "\n") - else: - new_hosts.append(host.replace("\n", "") + "\n") - - # 更新自定义Hosts - self.update_config( - { - "hosts": ''.join(new_hosts), - "err_hosts": err_hosts, - "enabled": True - }, "CustomHosts" - ) - - # 更新优选ip - old_ip = self._cf_ip - self._cf_ip = best_ip - self.__update_config() - logger.info(f"Cloudflare CDN优选ip [{best_ip}] 已替换自定义Hosts插件") - - # 解发自定义hosts插件重载 - logger.info("通知CustomHosts插件重载 ...") - self.eventmanager.send_event(EventType.PluginReload, - { - "plugin_id": "CustomHosts" - }) - if self._notify: - self.post_message( - mtype=NotificationType.SiteMessage, - title="【Cloudflare优选任务完成】", - text=f"原ip:{old_ip}\n" - f"新ip:{best_ip}" - ) - else: - logger.error("获取到最优ip格式错误,请重试") - self._onlyonce = False - self.__update_config() - self.stop_service() - - def __check_cf_ip(self, hosts): - """ - 校正cf优选ip - 防止特殊情况下cf优选ip和自定义hosts插件中ip不一致 - """ - # 统计每个IP地址出现的次数 - ip_count = {} - for host in hosts: - if host: - ip = host.split()[0] - if ip in ip_count: - ip_count[ip] += 1 - else: - ip_count[ip] = 1 - - # 找出出现次数最多的IP地址 - max_ips = [] # 保存最多出现的IP地址 - max_count = 0 - for ip, count in ip_count.items(): - if count > max_count: - max_ips = [ip] # 更新最多的IP地址 - max_count = count - elif count == max_count: - max_ips.append(ip) - - # 如果出现次数最多的ip不止一个,则不做兼容处理 - if len(max_ips) != 1: - return - - if max_ips[0] != self._cf_ip: - self._cf_ip = max_ips[0] - logger.info(f"获取到自定义hosts插件中ip {max_ips[0]} 出现次数最多,已自动校正优选ip") - - def __check_environment(self): - """ - 环境检查 - """ - # 是否安装标识 - install_flag = False - - # 是否重新安装 - if self._re_install: - install_flag = True - if SystemUtils.is_windows(): - os.system(f'rd /s /q \"{self._cf_path}\"') - else: - os.system(f'rm -rf {self._cf_path}') - logger.info(f'删除CloudflareSpeedTest目录 {self._cf_path},开始重新安装') - - # 判断目录是否存在 - cf_path = Path(self._cf_path) - if not cf_path.exists(): - os.mkdir(self._cf_path) - - # 获取CloudflareSpeedTest最新版本 - release_version = self.__get_release_version() - if not release_version: - # 如果升级失败但是有可执行文件CloudflareST,则可继续运行,反之停止 - if Path(f'{self._cf_path}/{self._binary_name}').exists(): - logger.warn(f"获取CloudflareSpeedTest版本失败,存在可执行版本,继续运行") - return True, None - elif self._version: - logger.error(f"获取CloudflareSpeedTest版本失败,获取上次运行版本{self._version},开始安装") - install_flag = True - else: - release_version = "v2.2.2" - self._version = release_version - logger.error(f"获取CloudflareSpeedTest版本失败,获取默认版本{release_version},开始安装") - install_flag = True - - # 有更新 - if not install_flag and release_version != self._version: - logger.info(f"检测到CloudflareSpeedTest有版本[{release_version}]更新,开始安装") - install_flag = True - - # 重装后数据库有版本数据,但是本地没有则重装 - if not install_flag and release_version == self._version and not Path( - f'{self._cf_path}/{self._binary_name}').exists() and not Path( - f'{self._cf_path}/CloudflareST.exe').exists(): - logger.warn(f"未检测到CloudflareSpeedTest本地版本,重新安装") - install_flag = True - - if not install_flag: - logger.info(f"CloudflareSpeedTest无新版本,存在可执行版本,继续运行") - return True, None - - # 检查环境、安装 - if SystemUtils.is_windows(): - # windows - cf_file_name = 'CloudflareST_windows_amd64.zip' - download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}' - return self.__os_install(download_url, cf_file_name, release_version, - f"ditto -V -x -k --sequesterRsrc {self._cf_path}/{cf_file_name} {self._cf_path}") - elif SystemUtils.is_macos(): - # mac - uname = SystemUtils.execute('uname -m') - arch = 'amd64' if uname == 'x86_64' else 'arm64' - cf_file_name = f'CloudflareST_darwin_{arch}.zip' - download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}' - return self.__os_install(download_url, cf_file_name, release_version, - f"ditto -V -x -k --sequesterRsrc {self._cf_path}/{cf_file_name} {self._cf_path}") - else: - # docker - uname = SystemUtils.execute('uname -m') - arch = 'amd64' if uname == 'x86_64' else 'arm64' - cf_file_name = f'CloudflareST_linux_{arch}.tar.gz' - download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}' - return self.__os_install(download_url, cf_file_name, release_version, - f"tar -zxf {self._cf_path}/{cf_file_name} -C {self._cf_path}") - - def __os_install(self, download_url, cf_file_name, release_version, unzip_command): - """ - macos docker安装cloudflare - """ - # 手动下载安装包后,无需在此下载 - if not Path(f'{self._cf_path}/{cf_file_name}').exists(): - # 首次下载或下载新版压缩包 - proxies = settings.PROXY - https_proxy = proxies.get("https") if proxies and proxies.get("https") else None - if https_proxy: - if SystemUtils.is_windows(): - self.__get_windows_cloudflarest(download_url, proxies) - else: - os.system( - f'wget -P {self._cf_path} --no-check-certificate -e use_proxy=yes -e https_proxy={https_proxy} {download_url}') - else: - if SystemUtils.is_windows(): - self.__get_windows_cloudflarest(download_url, proxies) - else: - os.system(f'wget -P {self._cf_path} https://ghproxy.com/{download_url}') - - # 判断是否下载好安装包 - if Path(f'{self._cf_path}/{cf_file_name}').exists(): - try: - if SystemUtils.is_windows(): - with zipfile.ZipFile(f'{self._cf_path}/{cf_file_name}', 'r') as zip_ref: - # 解压ZIP文件中的所有文件到指定目录 - zip_ref.extractall(self._cf_path) - if Path(f'{self._cf_path}\\CloudflareST.exe').exists(): - logger.info(f"CloudflareSpeedTest安装成功,当前版本:{release_version}") - return True, release_version - else: - logger.error(f"CloudflareSpeedTest安装失败,请检查") - os.system(f'rd /s /q \"{self._cf_path}\"') - return False, None - # 解压 - os.system(f'{unzip_command}') - # 删除压缩包 - os.system(f'rm -rf {self._cf_path}/{cf_file_name}') - if Path(f'{self._cf_path}/{self._binary_name}').exists(): - logger.info(f"CloudflareSpeedTest安装成功,当前版本:{release_version}") - return True, release_version - else: - logger.error(f"CloudflareSpeedTest安装失败,请检查") - os.removedirs(self._cf_path) - return False, None - except Exception as err: - # 如果升级失败但是有可执行文件CloudflareST,则可继续运行,反之停止 - if Path(f'{self._cf_path}/{self._binary_name}').exists() or \ - Path(f'{self._cf_path}\\CloudflareST.exe').exists(): - logger.error(f"CloudflareSpeedTest安装失败:{str(err)},继续使用现版本运行") - return True, None - else: - logger.error(f"CloudflareSpeedTest安装失败:{str(err)},无可用版本,停止运行") - if SystemUtils.is_windows(): - os.system(f'rd /s /q \"{self._cf_path}\"') - else: - os.removedirs(self._cf_path) - return False, None - else: - # 如果升级失败但是有可执行文件CloudflareST,则可继续运行,反之停止 - if Path(f'{self._cf_path}/{self._binary_name}').exists() or \ - Path(f'{self._cf_path}\\CloudflareST.exe').exists(): - logger.warn(f"CloudflareSpeedTest安装失败,存在可执行版本,继续运行") - return True, None - else: - logger.error(f"CloudflareSpeedTest安装失败,无可用版本,停止运行") - if SystemUtils.is_windows(): - os.system(f'rd /s /q \"{self._cf_path}\"') - else: - os.removedirs(self._cf_path) - return False, None - - def __get_windows_cloudflarest(self, download_url, proxies): - response = Response() - try: - response = requests.get(download_url, stream=True, proxies=proxies if proxies else None) - except requests.exceptions.RequestException as e: - logger.error(f"CloudflareSpeedTest下载失败:{str(e)}") - if response.status_code == 200: - with open(f'{self._cf_path}\\CloudflareST_windows_amd64.zip', 'wb') as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - - @staticmethod - def __get_release_version(): - """ - 获取CloudflareSpeedTest最新版本 - """ - version_res = RequestUtils().get_res( - "https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest") - if not version_res: - version_res = RequestUtils(proxies=settings.PROXY).get_res( - "https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest") - if version_res: - ver_json = version_res.json() - version = f"{ver_json['tag_name']}" - return version - else: - return None - - def __update_config(self): - """ - 更新优选插件配置 - """ - self.update_config({ - "onlyonce": False, - "cron": self._cron, - "cf_ip": self._cf_ip, - "version": self._version, - "ipv4": self._ipv4, - "ipv6": self._ipv6, - "re_install": self._re_install, - "additional_args": self._additional_args, - "notify": self._notify, - "check": self._check - }) - - def get_state(self) -> bool: - return True if self._cf_ip and self._cron else False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - return [{ - "cmd": "/cloudflare_speedtest", - "event": EventType.CloudFlareSpeedTest, - "desc": "Cloudflare IP优选", - "data": {} - }] - - def get_api(self) -> List[Dict[str, Any]]: - return [{ - "path": "/cloudflare_speedtest", - "endpoint": self.cloudflare_speedtest, - "methods": ["GET"], - "summary": "Cloudflare IP优选", - "description": "Cloudflare IP优选", - }] - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cf_ip', - 'label': '优选IP', - 'placeholder': '121.121.121.121' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '优选周期', - 'placeholder': '0 0 0 ? *' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'version', - 'readonly': True, - 'label': 'CloudflareSpeedTest版本', - 'placeholder': '暂未安装' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'ipv4', - 'label': 'IPv4', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'ipv6', - 'label': 'IPv6', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'check', - 'label': '自动校准', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 're_install', - 'label': '重装后运行', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '运行时通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'additional_args', - 'label': '高级参数', - 'placeholder': '-dd' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': 'F12看请求的Server属性,如果是cloudflare说明该站点支持Cloudflare IP优选。' - } - } - ] - } - ] - } - ] - } - ], { - "cf_ip": "", - "cron": "", - "version": "", - "ipv4": True, - "ipv6": False, - "check": False, - "onlyonce": False, - "re_install": False, - "notify": True, - "additional_args": "" - } - - def get_page(self) -> List[dict]: - pass - - def cloudflare_speedtest(self) -> schemas.Response: - """ - API调用CloudflareSpeedTest IP优选 - """ - self.__cloudflareSpeedTest() - return schemas.Response(success=True) - - @staticmethod - def __read_system_hosts(): - """ - 读取系统hosts对象 - """ - # 获取本机hosts路径 - if SystemUtils.is_windows(): - hosts_path = r"c:\windows\system32\drivers\etc\hosts" - else: - hosts_path = '/etc/hosts' - # 读取系统hosts - return Hosts(path=hosts_path) - - def __add_hosts_to_system(self, hosts): - """ - 添加hosts到系统 - """ - # 系统hosts对象 - system_hosts = self.__read_system_hosts() - # 过滤掉插件添加的hosts - orgin_entries = [] - for entry in system_hosts.entries: - if entry.entry_type == "comment" and entry.comment == "# CustomHostsPlugin": - break - orgin_entries.append(entry) - system_hosts.entries = orgin_entries - # 新的有效hosts - new_entrys = [] - # 新的错误的hosts - err_hosts = [] - err_flag = False - for host in hosts: - if not host: - continue - host_arr = str(host).split() - try: - host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6', - address=host_arr[0], - names=host_arr[1:]) - new_entrys.append(host_entry) - except Exception as err: - err_hosts.append(host + "\n") - logger.error(f"[HOST] 格式转换错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}") - - # 写入系统hosts - if new_entrys: - try: - # 添加分隔标识 - system_hosts.add([HostsEntry(entry_type='comment', comment="# CustomHostsPlugin")]) - # 添加新的Hosts - system_hosts.add(new_entrys) - system_hosts.write() - logger.info("更新系统hosts文件成功") - except Exception as err: - err_flag = True - logger.error(f"更新系统hosts文件失败:{str(err) or '请检查权限'}") - # 推送实时消息 - self.systemmessage.put(f"更新系统hosts文件失败:{str(err) or '请检查权限'}") - return err_flag, err_hosts - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/customhosts/__init__.py b/app/plugins/customhosts/__init__.py deleted file mode 100644 index c9629483..00000000 --- a/app/plugins/customhosts/__init__.py +++ /dev/null @@ -1,260 +0,0 @@ -from typing import List, Tuple, Dict, Any - -from python_hosts import Hosts, HostsEntry - -from app.core.event import eventmanager -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType -from app.utils.ip import IpUtils -from app.utils.system import SystemUtils - - -class CustomHosts(_PluginBase): - # 插件名称 - plugin_name = "自定义Hosts" - # 插件描述 - plugin_desc = "修改系统hosts文件,加速网络访问。" - # 插件图标 - plugin_icon = "hosts.png" - # 主题色 - plugin_color = "#02C4E0" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "customhosts_" - # 加载顺序 - plugin_order = 10 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _hosts = [] - _enabled = False - - def init_plugin(self, config: dict = None): - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._hosts = config.get("hosts") - if isinstance(self._hosts, str): - self._hosts = str(self._hosts).split('\n') - if self._enabled and self._hosts: - # 排除空的host - new_hosts = [] - for host in self._hosts: - if host and host != '\n': - new_hosts.append(host.replace("\n", "") + "\n") - self._hosts = new_hosts - - # 添加到系统 - error_flag, error_hosts = self.__add_hosts_to_system(self._hosts) - self._enabled = self._enabled and not error_flag - - # 更新错误Hosts - self.update_config({ - "hosts": ''.join(self._hosts), - "err_hosts": error_hosts, - "enabled": self._enabled - }) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'hosts', - 'label': '自定义hosts', - 'rows': 10, - 'placeholder': '每行一个配置,格式为:ip host1 host2 ...' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'err_hosts', - 'readonly': True, - 'label': '错误hosts', - 'rows': 2, - 'placeholder': '错误的hosts配置会展示在此处,请修改上方hosts重新提交(错误的hosts不会写入系统hosts文件)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': 'host格式ip host,中间有空格!!!' - '(注:容器运行则更新容器hosts!非宿主机!)' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "hosts": "", - "err_hosts": "" - } - - def get_page(self) -> List[dict]: - pass - - @staticmethod - def __read_system_hosts(): - """ - 读取系统hosts对象 - """ - # 获取本机hosts路径 - if SystemUtils.is_windows(): - hosts_path = r"c:\windows\system32\drivers\etc\hosts" - else: - hosts_path = '/etc/hosts' - # 读取系统hosts - return Hosts(path=hosts_path) - - def __add_hosts_to_system(self, hosts): - """ - 添加hosts到系统 - """ - # 系统hosts对象 - system_hosts = self.__read_system_hosts() - # 过滤掉插件添加的hosts - orgin_entries = [] - for entry in system_hosts.entries: - if entry.entry_type == "comment" and entry.comment == "# CustomHostsPlugin": - break - orgin_entries.append(entry) - system_hosts.entries = orgin_entries - # 新的有效hosts - new_entrys = [] - # 新的错误的hosts - err_hosts = [] - err_flag = False - for host in hosts: - if not host: - continue - host_arr = str(host).split() - try: - host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6', - address=host_arr[0], - names=host_arr[1:]) - new_entrys.append(host_entry) - except Exception as err: - err_hosts.append(host + "\n") - logger.error(f"[HOST] 格式转换错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}") - - # 写入系统hosts - if new_entrys: - try: - # 添加分隔标识 - system_hosts.add([HostsEntry(entry_type='comment', comment="# CustomHostsPlugin")]) - # 添加新的Hosts - system_hosts.add(new_entrys) - system_hosts.write() - logger.info("更新系统hosts文件成功") - except Exception as err: - err_flag = True - logger.error(f"更新系统hosts文件失败:{str(err) or '请检查权限'}") - # 推送实时消息 - self.systemmessage.put(f"更新系统hosts文件失败:{str(err) or '请检查权限'}") - return err_flag, err_hosts - - def stop_service(self): - """ - 退出插件 - """ - pass - - @eventmanager.register(EventType.PluginReload) - def reload(self, event): - """ - 响应插件重载事件 - """ - plugin_id = event.event_data.get("plugin_id") - if not plugin_id: - return - if plugin_id != self.__class__.__name__: - return - return self.init_plugin(self.get_config()) diff --git a/app/plugins/customsites/__init__.py b/app/plugins/customsites/__init__.py deleted file mode 100644 index 6e792b14..00000000 --- a/app/plugins/customsites/__init__.py +++ /dev/null @@ -1,250 +0,0 @@ -from typing import Any, List, Dict, Tuple -from urllib.parse import urlparse - -from app.core.config import settings -from app.core.event import EventManager -from app.helper.cookiecloud import CookieCloudHelper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType - - -class CustomSites(_PluginBase): - # 插件名称 - plugin_name = "自定义站点" - # 插件描述 - plugin_desc = "增加自定义站点为签到和统计使用。" - # 插件图标 - plugin_icon = "world.png" - # 主题色 - plugin_color = "#9AC16C" - # 插件版本 - plugin_version = "0.1" - # 插件作者 - plugin_author = "lightolly" - # 作者主页 - author_url = "https://github.com/lightolly" - # 插件配置项ID前缀 - plugin_config_prefix = "customsites_" - # 加载顺序 - plugin_order = 0 - # 可使用的用户级别 - auth_level = 2 - - # 自定义站点起始 id - site_id_base = 60000 - site_id_alloc = site_id_base - - # 私有属性 - cookie_cloud: CookieCloudHelper = None - - # 配置属性 - _enabled: bool = False - """ - { - "id": "站点ID", - "name": "站点名称", - "url": "站点地址", - "cookie": "站点Cookie", - "ua": "User-Agent", - "proxy": "是否使用代理", - "render": "是否仿真", - } - """ - _sites: list[Dict] = [] - """ - 格式 - 站点名称|url|是否仿真 - """ - _site_urls: str = "" - - def init_plugin(self, config: dict = None): - self.cookie_cloud = CookieCloudHelper( - server=settings.COOKIECLOUD_HOST, - key=settings.COOKIECLOUD_KEY, - password=settings.COOKIECLOUD_PASSWORD - ) - - del_sites = [] - sites = [] - new_site_urls = [] - # 配置 - if config: - self._enabled = config.get("enabled", False) - self._sites = config.get("sites", []) - self._site_urls = config.get("site_urls", "") - - if not self._enabled: - return - - site_urls = self._site_urls.splitlines() - # 只保留 匹配site_urls的 sites - urls = [site_url.split('|')[1] for site_url in site_urls] - for site in self._sites: - if site.get("url") not in urls: - del_sites.append(site) - else: - sites.append(site) - - for item in site_urls: - _, url, _ = item.split("|") - if url in [site.get("url") for site in self._sites]: - continue - else: - new_site_urls.append(item) - - # 获取待分配的最大ID - alloc_ids = [site.get("id") for site in self._sites if site.get("id")] + [self.site_id_base] - self.site_id_alloc = max(alloc_ids) + 1 - - # 补全 site_id - for item in new_site_urls: - site_name, item, site_render = item.split("|") - sites.append({ - "id": self.site_id_alloc, - "name": site_name, - "url": item, - "render": True if site_render.upper() == 'Y' else False, - "cookie": "", - }) - self.site_id_alloc += 1 - self._sites = sites - # 保存配置 - self.sync_cookie() - self.__update_config() - - # 通知站点删除 - for site in del_sites: - self.delete_site(site.get("id")) - logger.info(f"删除站点 {site.get('name')}") - - def get_state(self) -> bool: - return self._enabled - - def __update_config(self): - # 保存配置 - self.update_config( - { - "enabled": self._enabled, - "sites": self._sites, - "site_urls": self._site_urls - } - ) - - def __get_site_by_domain(self, domain): - for site in self._sites: - site_domain = urlparse(site.get("url")).netloc - if site_domain.endswith(domain): - return site - return None - - def sync_cookie(self): - """ - 通过CookieCloud同步站点Cookie - """ - logger.info("开始同步CookieCloud站点 ...") - cookies, msg = self.cookie_cloud.download() - if not cookies: - logger.error(f"CookieCloud同步失败:{msg}") - return - # 保存Cookie或新增站点 - _update_count = 0 - for domain, cookie in cookies.items(): - # 获取站点信息 - site_info = self.__get_site_by_domain(domain) - if site_info: - # 更新站点Cookie - logger.info(f"更新站点 {domain} Cookie ...") - site_info.update({"cookie": cookie}) - _update_count += 1 - - # 处理完成 - ret_msg = f"更新了{_update_count}个站点,总{len(self._sites)}个站点" - logger.info(f"自定义站点 Cookie同步成功:{ret_msg}") - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'site_urls', - 'label': '站点列表', - 'rows': 5, - 'placeholder': '每一行一个站点,配置方式:\n' - '站点名称|站点地址|是否仿真(Y/N)\n' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "site_urls": [], - "sites": self._sites - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - pass - - @staticmethod - def delete_site(site_id): - """ - 删除站点通知 - """ - # 插件站点删除 - EventManager().send_event(EventType.SiteDeleted, - { - "site_id": site_id - }) diff --git a/app/plugins/dirmonitor/__init__.py b/app/plugins/dirmonitor/__init__.py deleted file mode 100644 index 6c068b56..00000000 --- a/app/plugins/dirmonitor/__init__.py +++ /dev/null @@ -1,847 +0,0 @@ -import datetime -import re -import shutil -import threading -import traceback -from pathlib import Path -from typing import List, Tuple, Dict, Any, Optional - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from watchdog.events import FileSystemEventHandler -from watchdog.observers import Observer -from watchdog.observers.polling import PollingObserver - -from app import schemas -from app.chain.tmdb import TmdbChain -from app.chain.transfer import TransferChain -from app.core.config import settings -from app.core.context import MediaInfo -from app.core.event import eventmanager, Event -from app.core.metainfo import MetaInfoPath -from app.db.downloadhistory_oper import DownloadHistoryOper -from app.db.transferhistory_oper import TransferHistoryOper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas import Notification, NotificationType, TransferInfo -from app.schemas.types import EventType, MediaType, SystemConfigKey -from app.utils.string import StringUtils -from app.utils.system import SystemUtils - -lock = threading.Lock() - - -class FileMonitorHandler(FileSystemEventHandler): - """ - 目录监控响应类 - """ - - def __init__(self, monpath: str, sync: Any, **kwargs): - super(FileMonitorHandler, self).__init__(**kwargs) - self._watch_path = monpath - self.sync = sync - - def on_created(self, event): - self.sync.event_handler(event=event, text="创建", - mon_path=self._watch_path, event_path=event.src_path) - - def on_moved(self, event): - self.sync.event_handler(event=event, text="移动", - mon_path=self._watch_path, event_path=event.dest_path) - - -class DirMonitor(_PluginBase): - # 插件名称 - plugin_name = "目录监控" - # 插件描述 - plugin_desc = "监控目录文件发生变化时实时整理到媒体库。" - # 插件图标 - plugin_icon = "directory.png" - # 主题色 - plugin_color = "#E0995E" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "dirmonitor_" - # 加载顺序 - plugin_order = 4 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _scheduler = None - transferhis = None - downloadhis = None - transferchian = None - tmdbchain = None - _observer = [] - _enabled = False - _notify = False - _onlyonce = False - # 模式 compatibility/fast - _mode = "fast" - # 转移方式 - _transfer_type = settings.TRANSFER_TYPE - _monitor_dirs = "" - _exclude_keywords = "" - _interval: int = 10 - # 存储源目录与目的目录关系 - _dirconf: Dict[str, Optional[Path]] = {} - # 存储源目录转移方式 - _transferconf: Dict[str, Optional[str]] = {} - _medias = {} - # 退出事件 - _event = threading.Event() - - def init_plugin(self, config: dict = None): - self.transferhis = TransferHistoryOper() - self.downloadhis = DownloadHistoryOper() - self.transferchian = TransferChain() - self.tmdbchain = TmdbChain() - # 清空配置 - self._dirconf = {} - self._transferconf = {} - - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._notify = config.get("notify") - self._onlyonce = config.get("onlyonce") - self._mode = config.get("mode") - self._transfer_type = config.get("transfer_type") - self._monitor_dirs = config.get("monitor_dirs") or "" - self._exclude_keywords = config.get("exclude_keywords") or "" - self._interval = config.get("interval") or 10 - - # 停止现有任务 - self.stop_service() - - if self._enabled or self._onlyonce: - # 定时服务管理器 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - # 追加入库消息统一发送服务 - self._scheduler.add_job(self.send_msg, trigger='interval', seconds=15) - - # 读取目录配置 - monitor_dirs = self._monitor_dirs.split("\n") - if not monitor_dirs: - return - for mon_path in monitor_dirs: - # 格式源目录:目的目录 - if not mon_path: - continue - - # 自定义转移方式 - _transfer_type = self._transfer_type - if mon_path.count("#") == 1: - _transfer_type = mon_path.split("#")[1] - mon_path = mon_path.split("#")[0] - - # 存储目的目录 - if SystemUtils.is_windows(): - if mon_path.count(":") > 1: - paths = [mon_path.split(":")[0] + ":" + mon_path.split(":")[1], - mon_path.split(":")[2] + ":" + mon_path.split(":")[3]] - else: - paths = [mon_path] - else: - paths = mon_path.split(":") - - # 目的目录 - target_path = None - if len(paths) > 1: - mon_path = paths[0] - target_path = Path(paths[1]) - self._dirconf[mon_path] = target_path - else: - self._dirconf[mon_path] = None - - # 转移方式 - self._transferconf[mon_path] = _transfer_type - - # 启用目录监控 - if self._enabled: - # 检查媒体库目录是不是下载目录的子目录 - try: - if target_path and target_path.is_relative_to(Path(mon_path)): - logger.warn(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控") - self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控") - continue - except Exception as e: - logger.debug(str(e)) - pass - - try: - if self._mode == "compatibility": - # 兼容模式,目录同步性能降低且NAS不能休眠,但可以兼容挂载的远程共享目录如SMB - observer = PollingObserver(timeout=10) - else: - # 内部处理系统操作类型选择最优解 - observer = Observer(timeout=10) - self._observer.append(observer) - observer.schedule(FileMonitorHandler(mon_path, self), path=mon_path, recursive=True) - observer.daemon = True - observer.start() - logger.info(f"{mon_path} 的目录监控服务启动") - except Exception as e: - err_msg = str(e) - if "inotify" in err_msg and "reached" in err_msg: - logger.warn( - f"目录监控服务启动出现异常:{err_msg},请在宿主机上(不是docker容器内)执行以下命令并重启:" - + """ - echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf - echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf - sudo sysctl -p - """) - else: - logger.error(f"{mon_path} 启动目录监控失败:{err_msg}") - self.systemmessage.put(f"{mon_path} 启动目录监控失败:{err_msg}") - - # 运行一次定时服务 - if self._onlyonce: - logger.info("目录监控服务启动,立即运行一次") - self._scheduler.add_job(func=self.sync_all, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - # 关闭一次性开关 - self._onlyonce = False - # 保存配置 - self.__update_config() - - # 启动定时服务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __update_config(self): - """ - 更新配置 - """ - self.update_config({ - "enabled": self._enabled, - "notify": self._notify, - "onlyonce": self._onlyonce, - "mode": self._mode, - "transfer_type": self._transfer_type, - "monitor_dirs": self._monitor_dirs, - "exclude_keywords": self._exclude_keywords, - "interval": self._interval - }) - - @eventmanager.register(EventType.DirectorySync) - def remote_sync(self, event: Event): - """ - 远程全量同步 - """ - if event: - self.post_message(channel=event.event_data.get("channel"), - title="开始同步监控目录 ...", - userid=event.event_data.get("user")) - self.sync_all() - if event: - self.post_message(channel=event.event_data.get("channel"), - title="监控目录同步完成!", userid=event.event_data.get("user")) - - def sync_all(self): - """ - 立即运行一次,全量同步目录中所有文件 - """ - logger.info("开始全量同步监控目录 ...") - # 遍历所有监控目录 - for mon_path in self._dirconf.keys(): - # 遍历目录下所有文件 - for file_path in SystemUtils.list_files(Path(mon_path), settings.RMT_MEDIAEXT): - self.__handle_file(event_path=str(file_path), mon_path=mon_path) - logger.info("全量同步监控目录完成!") - - def event_handler(self, event, mon_path: str, text: str, event_path: str): - """ - 处理文件变化 - :param event: 事件 - :param mon_path: 监控目录 - :param text: 事件描述 - :param event_path: 事件文件路径 - """ - if not event.is_directory: - # 文件发生变化 - logger.debug("文件%s:%s" % (text, event_path)) - self.__handle_file(event_path=event_path, mon_path=mon_path) - - def __handle_file(self, event_path: str, mon_path: str): - """ - 同步一个文件 - :param event_path: 事件文件路径 - :param mon_path: 监控目录 - """ - file_path = Path(event_path) - try: - if not file_path.exists(): - return - # 全程加锁 - with lock: - transfer_history = self.transferhis.get_by_src(event_path) - if transfer_history: - logger.debug("文件已处理过:%s" % event_path) - return - - # 回收站及隐藏的文件不处理 - if event_path.find('/@Recycle/') != -1 \ - or event_path.find('/#recycle/') != -1 \ - or event_path.find('/.') != -1 \ - or event_path.find('/@eaDir') != -1: - logger.debug(f"{event_path} 是回收站或隐藏的文件") - return - - # 命中过滤关键字不处理 - if self._exclude_keywords: - for keyword in self._exclude_keywords.split("\n"): - if keyword and re.findall(keyword, event_path): - logger.info(f"{event_path} 命中过滤关键字 {keyword},不处理") - return - - # 整理屏蔽词不处理 - transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords) - if transfer_exclude_words: - for keyword in transfer_exclude_words: - if not keyword: - continue - if keyword and re.search(r"%s" % keyword, event_path, re.IGNORECASE): - logger.info(f"{event_path} 命中整理屏蔽词 {keyword},不处理") - return - - # 不是媒体文件不处理 - if file_path.suffix not in settings.RMT_MEDIAEXT: - logger.debug(f"{event_path} 不是媒体文件") - return - - # 判断是不是蓝光目录 - if re.search(r"BDMV[/\\]STREAM", event_path, re.IGNORECASE): - # 截取BDMV前面的路径 - event_path = event_path[:event_path.find("BDMV")] - file_path = Path(event_path) - - # 查询历史记录,已转移的不处理 - if self.transferhis.get_by_src(event_path): - logger.info(f"{event_path} 已整理过") - return - - # 元数据 - file_meta = MetaInfoPath(file_path) - if not file_meta.name: - logger.error(f"{file_path.name} 无法识别有效信息") - return - - # 查询转移目的目录 - target: Path = self._dirconf.get(mon_path) - # 查询转移方式 - transfer_type = self._transferconf.get(mon_path) - # 根据父路径获取下载历史 - download_history = self.downloadhis.get_by_path(Path(event_path).parent) - - # 识别媒体信息 - mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta, - tmdbid=download_history.tmdbid if download_history else None) - if not mediainfo: - logger.warn(f'未识别到媒体信息,标题:{file_meta.name}') - # 新增转移成功历史记录 - his = self.transferhis.add_fail( - src_path=file_path, - mode=transfer_type, - meta=file_meta - ) - if self._notify: - self.chain.post_message(Notification( - mtype=NotificationType.Manual, - title=f"{file_path.name} 未识别到媒体信息,无法入库!\n" - f"回复:```\n/redo {his.id} [tmdbid]|[类型]\n``` 手动识别转移。" - )) - return - - # 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title - if not settings.SCRAP_FOLLOW_TMDB: - transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id, - mtype=mediainfo.type.value) - if transfer_history: - mediainfo.title = transfer_history.title - logger.info(f"{file_path.name} 识别为:{mediainfo.type.value} {mediainfo.title_year}") - - # 更新媒体图片 - self.chain.obtain_images(mediainfo=mediainfo) - - # 获取集数据 - if mediainfo.type == MediaType.TV: - episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=mediainfo.tmdb_id, - season=file_meta.begin_season or 1) - else: - episodes_info = None - - # 获取downloadhash - download_hash = self.get_download_hash(src=str(file_path)) - - # 转移 - transferinfo: TransferInfo = self.chain.transfer(mediainfo=mediainfo, - path=file_path, - transfer_type=transfer_type, - target=target, - meta=file_meta, - episodes_info=episodes_info) - - if not transferinfo: - logger.error("文件转移模块运行失败") - return - if not transferinfo.success: - # 转移失败 - logger.warn(f"{file_path.name} 入库失败:{transferinfo.message}") - # 新增转移失败历史记录 - self.transferhis.add_fail( - src_path=file_path, - mode=transfer_type, - download_hash=download_hash, - meta=file_meta, - mediainfo=mediainfo, - transferinfo=transferinfo - ) - if self._notify: - self.chain.post_message(Notification( - mtype=NotificationType.Manual, - title=f"{mediainfo.title_year}{file_meta.season_episode} 入库失败!", - text=f"原因:{transferinfo.message or '未知'}", - image=mediainfo.get_message_image() - )) - return - - # 新增转移成功历史记录 - self.transferhis.add_success( - src_path=file_path, - mode=transfer_type, - download_hash=download_hash, - meta=file_meta, - mediainfo=mediainfo, - transferinfo=transferinfo - ) - - # 刮削单个文件 - if settings.SCRAP_METADATA: - self.chain.scrape_metadata(path=transferinfo.target_path, - mediainfo=mediainfo, - transfer_type=transfer_type) - - """ - { - "title_year season": { - "files": [ - { - "path":, - "mediainfo":, - "file_meta":, - "transferinfo": - } - ], - "time": "2023-08-24 23:23:23.332" - } - } - """ - # 发送消息汇总 - media_list = self._medias.get(mediainfo.title_year + " " + file_meta.season) or {} - if media_list: - media_files = media_list.get("files") or [] - if media_files: - file_exists = False - for file in media_files: - if str(event_path) == file.get("path"): - file_exists = True - break - if not file_exists: - media_files.append({ - "path": event_path, - "mediainfo": mediainfo, - "file_meta": file_meta, - "transferinfo": transferinfo - }) - else: - media_files = [ - { - "path": event_path, - "mediainfo": mediainfo, - "file_meta": file_meta, - "transferinfo": transferinfo - } - ] - media_list = { - "files": media_files, - "time": datetime.datetime.now() - } - else: - media_list = { - "files": [ - { - "path": event_path, - "mediainfo": mediainfo, - "file_meta": file_meta, - "transferinfo": transferinfo - } - ], - "time": datetime.datetime.now() - } - self._medias[mediainfo.title_year + " " + file_meta.season] = media_list - - # 广播事件 - self.eventmanager.send_event(EventType.TransferComplete, { - 'meta': file_meta, - 'mediainfo': mediainfo, - 'transferinfo': transferinfo - }) - - # 移动模式删除空目录 - if transfer_type == "move": - for file_dir in file_path.parents: - if len(str(file_dir)) <= len(str(Path(mon_path))): - # 重要,删除到监控目录为止 - break - files = SystemUtils.list_files(file_dir, settings.RMT_MEDIAEXT) - if not files: - logger.warn(f"移动模式,删除空目录:{file_dir}") - shutil.rmtree(file_dir, ignore_errors=True) - - except Exception as e: - logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc())) - - def send_msg(self): - """ - 定时检查是否有媒体处理完,发送统一消息 - """ - if not self._medias or not self._medias.keys(): - return - - # 遍历检查是否已刮削完,发送消息 - for medis_title_year_season in list(self._medias.keys()): - media_list = self._medias.get(medis_title_year_season) - logger.info(f"开始处理媒体 {medis_title_year_season} 消息") - - if not media_list: - continue - - # 获取最后更新时间 - last_update_time = media_list.get("time") - media_files = media_list.get("files") - if not last_update_time or not media_files: - continue - - transferinfo = media_files[0].get("transferinfo") - file_meta = media_files[0].get("file_meta") - mediainfo = media_files[0].get("mediainfo") - # 判断剧集最后更新时间距现在是已超过10秒或者电影,发送消息 - if (datetime.datetime.now() - last_update_time).total_seconds() > int(self._interval) \ - or mediainfo.type == MediaType.MOVIE: - # 发送通知 - if self._notify: - - # 汇总处理文件总大小 - total_size = 0 - file_count = 0 - - # 剧集汇总 - episodes = [] - for file in media_files: - transferinfo = file.get("transferinfo") - total_size += transferinfo.total_size - file_count += 1 - - file_meta = file.get("file_meta") - if file_meta and file_meta.begin_episode: - episodes.append(file_meta.begin_episode) - - transferinfo.total_size = total_size - # 汇总处理文件数量 - transferinfo.file_count = file_count - - # 剧集季集信息 S01 E01-E04 || S01 E01、E02、E04 - season_episode = None - # 处理文件多,说明是剧集,显示季入库消息 - if mediainfo.type == MediaType.TV: - # 季集文本 - season_episode = f"{file_meta.season} {StringUtils.format_ep(episodes)}" - # 发送消息 - self.transferchian.send_transfer_message(meta=file_meta, - mediainfo=mediainfo, - transferinfo=transferinfo, - season_episode=season_episode) - # 发送完消息,移出key - del self._medias[medis_title_year_season] - continue - - def get_download_hash(self, src: str): - """ - 从表中获取download_hash,避免连接下载器 - """ - download_file = self.downloadhis.get_file_by_fullpath(src) - if download_file: - return download_file.download_hash - return None - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - return [{ - "cmd": "/directory_sync", - "event": EventType.DirectorySync, - "desc": "目录监控同步", - "category": "管理", - "data": {} - }] - - def get_api(self) -> List[Dict[str, Any]]: - return [{ - "path": "/directory_sync", - "endpoint": self.sync, - "methods": ["GET"], - "summary": "目录监控同步", - "description": "目录监控同步", - }] - - def sync(self) -> schemas.Response: - """ - API调用目录同步 - """ - self.sync_all() - return schemas.Response(success=True) - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'mode', - 'label': '监控模式', - 'items': [ - {'title': '兼容模式', 'value': 'compatibility'}, - {'title': '性能模式', 'value': 'fast'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'transfer_type', - 'label': '转移方式', - 'items': [ - {'title': '移动', 'value': 'move'}, - {'title': '复制', 'value': 'copy'}, - {'title': '硬链接', 'value': 'link'}, - {'title': '软链接', 'value': 'softlink'}, - {'title': 'Rclone复制', 'value': 'rclone_copy'}, - {'title': 'Rclone移动', 'value': 'rclone_move'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'interval', - 'label': '入库消息延迟', - 'placeholder': '10' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'monitor_dirs', - 'label': '监控目录', - 'rows': 5, - 'placeholder': '每一行一个目录,支持以下几种配置方式,转移方式支持 move、copy、link、softlink、rclone_copy、rclone_move:\n' - '监控目录\n' - '监控目录#转移方式\n' - '监控目录:转移目的目录\n' - '监控目录:转移目的目录#转移方式' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'exclude_keywords', - 'label': '排除关键词', - 'rows': 2, - 'placeholder': '每一行一个关键词' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '入库消息延迟默认10s,如网络较慢可酌情调大,有助于发送统一入库消息。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": False, - "onlyonce": False, - "mode": "fast", - "transfer_type": settings.TRANSFER_TYPE, - "monitor_dirs": "", - "exclude_keywords": "", - "interval": 10 - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - if self._observer: - for observer in self._observer: - try: - observer.stop() - observer.join() - except Exception as e: - print(str(e)) - self._observer = [] - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None diff --git a/app/plugins/doubanrank/__init__.py b/app/plugins/doubanrank/__init__.py deleted file mode 100644 index 7f4b356f..00000000 --- a/app/plugins/doubanrank/__init__.py +++ /dev/null @@ -1,574 +0,0 @@ -import datetime -import re -import xml.dom.minidom -from threading import Event -from typing import Tuple, List, Dict, Any - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.douban import DoubanChain -from app.chain.download import DownloadChain -from app.chain.subscribe import SubscribeChain -from app.core.config import settings -from app.core.context import MediaInfo -from app.core.metainfo import MetaInfo -from app.log import logger -from app.plugins import _PluginBase -from app.utils.dom import DomUtils -from app.utils.http import RequestUtils - - -class DoubanRank(_PluginBase): - # 插件名称 - plugin_name = "豆瓣榜单订阅" - # 插件描述 - plugin_desc = "监控豆瓣热门榜单,自动添加订阅。" - # 插件图标 - plugin_icon = "movie.jpg" - # 主题色 - plugin_color = "#01B3E3" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "doubanrank_" - # 加载顺序 - plugin_order = 6 - # 可使用的用户级别 - auth_level = 2 - - # 退出事件 - _event = Event() - # 私有属性 - downloadchain: DownloadChain = None - subscribechain: SubscribeChain = None - doubanchain: DoubanChain = None - _scheduler = None - _douban_address = { - 'movie-ustop': 'https://rsshub.app/douban/movie/ustop', - 'movie-weekly': 'https://rsshub.app/douban/movie/weekly', - 'movie-real-time': 'https://rsshub.app/douban/movie/weekly/subject_real_time_hotest', - 'show-domestic': 'https://rsshub.app/douban/movie/weekly/show_domestic', - 'movie-hot-gaia': 'https://rsshub.app/douban/movie/weekly/movie_hot_gaia', - 'tv-hot': 'https://rsshub.app/douban/movie/weekly/tv_hot', - 'movie-top250': 'https://rsshub.app/douban/movie/weekly/movie_top250', - } - _enabled = False - _cron = "" - _onlyonce = False - _rss_addrs = [] - _ranks = [] - _vote = 0 - _clear = False - _clearflag = False - - def init_plugin(self, config: dict = None): - self.downloadchain = DownloadChain() - self.subscribechain = SubscribeChain() - self.doubanchain = DoubanChain() - - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._onlyonce = config.get("onlyonce") - self._vote = float(config.get("vote")) if config.get("vote") else 0 - rss_addrs = config.get("rss_addrs") - if rss_addrs: - if isinstance(rss_addrs, str): - self._rss_addrs = rss_addrs.split('\n') - else: - self._rss_addrs = rss_addrs - else: - self._rss_addrs = [] - self._ranks = config.get("ranks") or [] - self._clear = config.get("clear") - - # 停止现有任务 - self.stop_service() - - # 启动服务 - if self._enabled or self._onlyonce: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - logger.info(f"豆瓣榜单订阅服务启动,周期:{self._cron}") - try: - self._scheduler.add_job(func=self.__refresh_rss, - trigger=CronTrigger.from_crontab(self._cron), - name="豆瓣榜单订阅") - except Exception as e: - logger.error(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}") - self.systemmessage.put(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}") - else: - self._scheduler.add_job(func=self.__refresh_rss, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - logger.info("豆瓣榜单订阅服务启动,周期:每天 08:00") - - if self._onlyonce: - logger.info("豆瓣榜单订阅服务启动,立即运行一次") - self._scheduler.add_job(func=self.__refresh_rss, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - - if self._onlyonce or self._clear: - # 关闭一次性开关 - self._onlyonce = False - # 记录缓存清理标志 - self._clearflag = self._clear - # 关闭清理缓存 - self._clear = False - # 保存配置 - self.__update_config() - - if self._scheduler.get_jobs(): - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'vote', - 'label': '评分', - 'placeholder': '评分大于等于该值才订阅' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'ranks', - 'label': '热门榜单', - 'items': [ - {'title': '电影北美票房榜', 'value': 'movie-ustop'}, - {'title': '一周口碑电影榜', 'value': 'movie-weekly'}, - {'title': '实时热门电影', 'value': 'movie-real-time'}, - {'title': '热门综艺', 'value': 'show-domestic'}, - {'title': '热门电影', 'value': 'movie-hot-gaia'}, - {'title': '热门电视剧', 'value': 'tv-hot'}, - {'title': '电影TOP10', 'value': 'movie-top250'}, - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'rss_addrs', - 'label': '自定义榜单地址', - 'placeholder': '每行一个地址,如:https://rsshub.app/douban/movie/ustop' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear', - 'label': '清理历史记录', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "cron": "", - "onlyonce": False, - "vote": "", - "ranks": [], - "rss_addrs": "", - "clear": False - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询历史记录 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - title = history.get("title") - poster = history.get("poster") - mtype = history.get("type") - time_str = history.get("time") - doubanid = history.get("doubanid") - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': poster, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'VCardSubtitle', - 'props': { - 'class': 'pa-2 font-bold break-words whitespace-break-spaces' - }, - 'content': [ - { - 'component': 'a', - 'props': { - 'href': f"https://movie.douban.com/subject/{doubanid}", - 'target': '_blank' - }, - 'text': title - } - ] - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{mtype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{time_str}' - } - ] - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def stop_service(self): - """ - 停止服务 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) - - def __update_config(self): - """ - 列新配置 - """ - self.update_config({ - "enabled": self._enabled, - "cron": self._cron, - "onlyonce": self._onlyonce, - "vote": self._vote, - "ranks": self._ranks, - "rss_addrs": '\n'.join(map(str, self._rss_addrs)), - "clear": self._clear - }) - - def __refresh_rss(self): - """ - 刷新RSS - """ - logger.info(f"开始刷新豆瓣榜单 ...") - addr_list = self._rss_addrs + [self._douban_address.get(rank) for rank in self._ranks] - if not addr_list: - logger.info(f"未设置榜单RSS地址") - return - else: - logger.info(f"共 {len(addr_list)} 个榜单RSS地址需要刷新") - - # 读取历史记录 - if self._clearflag: - history = [] - else: - history: List[dict] = self.get_data('history') or [] - - for addr in addr_list: - if not addr: - continue - try: - logger.info(f"获取RSS:{addr} ...") - rss_infos = self.__get_rss_info(addr) - if not rss_infos: - logger.error(f"RSS地址:{addr} ,未查询到数据") - continue - else: - logger.info(f"RSS地址:{addr} ,共 {len(rss_infos)} 条数据") - for rss_info in rss_infos: - if self._event.is_set(): - logger.info(f"订阅服务停止") - return - - title = rss_info.get('title') - douban_id = rss_info.get('doubanid') - unique_flag = f"doubanrank: {title} (DB:{douban_id})" - # 检查是否已处理过 - if unique_flag in [h.get("unique") for h in history]: - continue - # 元数据 - meta = MetaInfo(title) - # 识别媒体信息 - if douban_id: - # 识别豆瓣信息 - context = self.doubanchain.recognize_by_doubanid(douban_id) - mediainfo = context.media_info - if not mediainfo or not mediainfo.tmdb_id: - logger.warn(f'未识别到媒体信息,标题:{title},豆瓣ID:{douban_id}') - continue - - else: - # 匹配媒体信息 - mediainfo: MediaInfo = self.chain.recognize_media(meta=meta) - if not mediainfo: - logger.warn(f'未识别到媒体信息,标题:{title},豆瓣ID:{douban_id}') - continue - # 查询缺失的媒体信息 - exist_flag, _ = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo) - if exist_flag: - logger.info(f'{mediainfo.title_year} 媒体库中已存在') - continue - # 判断用户是否已经添加订阅 - if self.subscribechain.exists(mediainfo=mediainfo, meta=meta): - logger.info(f'{mediainfo.title_year} 订阅已存在') - continue - # 添加订阅 - self.subscribechain.add(title=mediainfo.title, - year=mediainfo.year, - mtype=mediainfo.type, - tmdbid=mediainfo.tmdb_id, - season=meta.begin_season, - exist_ok=True, - username="豆瓣榜单") - # 存储历史记录 - history.append({ - "title": title, - "type": mediainfo.type.value, - "year": mediainfo.year, - "poster": mediainfo.get_poster_image(), - "overview": mediainfo.overview, - "tmdbid": mediainfo.tmdb_id, - "doubanid": douban_id, - "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "unique": unique_flag - }) - except Exception as e: - logger.error(str(e)) - - # 保存历史记录 - self.save_data('history', history) - # 缓存只清理一次 - self._clearflag = False - logger.info(f"所有榜单RSS刷新完成") - - @staticmethod - def __get_rss_info(addr) -> List[dict]: - """ - 获取RSS - """ - try: - ret = RequestUtils().get_res(addr) - if not ret: - return [] - ret_xml = ret.text - ret_array = [] - # 解析XML - dom_tree = xml.dom.minidom.parseString(ret_xml) - rootNode = dom_tree.documentElement - items = rootNode.getElementsByTagName("item") - for item in items: - try: - # 标题 - title = DomUtils.tag_value(item, "title", default="") - # 链接 - link = DomUtils.tag_value(item, "link", default="") - if not title and not link: - logger.warn(f"条目标题和链接均为空,无法处理") - continue - doubanid = re.findall(r"/(\d+)/", link) - if doubanid: - doubanid = doubanid[0] - if doubanid and not str(doubanid).isdigit(): - logger.warn(f"解析的豆瓣ID格式不正确:{doubanid}") - continue - # 返回对象 - ret_array.append({ - 'title': title, - 'link': link, - 'doubanid': doubanid - }) - except Exception as e1: - logger.error("解析RSS条目失败:" + str(e1)) - continue - return ret_array - except Exception as e: - logger.error("获取RSS失败:" + str(e)) - return [] diff --git a/app/plugins/doubansync/__init__.py b/app/plugins/doubansync/__init__.py deleted file mode 100644 index fd81174a..00000000 --- a/app/plugins/doubansync/__init__.py +++ /dev/null @@ -1,561 +0,0 @@ -import datetime -from pathlib import Path -from threading import Lock -from typing import Optional, Any, List, Dict, Tuple - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.douban import DoubanChain -from app.chain.download import DownloadChain -from app.chain.search import SearchChain -from app.chain.subscribe import SubscribeChain -from app.core.config import settings -from app.core.event import Event -from app.core.event import eventmanager -from app.core.metainfo import MetaInfo -from app.helper.rss import RssHelper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType - -lock = Lock() - - -class DoubanSync(_PluginBase): - # 插件名称 - plugin_name = "豆瓣想看" - # 插件描述 - plugin_desc = "同步豆瓣想看数据,自动添加订阅。" - # 插件图标 - plugin_icon = "douban.png" - # 主题色 - plugin_color = "#05B711" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "doubansync_" - # 加载顺序 - plugin_order = 3 - # 可使用的用户级别 - auth_level = 2 - - # 私有变量 - _interests_url: str = "https://www.douban.com/feed/people/%s/interests" - _scheduler: Optional[BackgroundScheduler] = None - _cache_path: Optional[Path] = None - rsshelper = None - downloadchain = None - searchchain = None - subscribechain = None - doubanchain = None - - # 配置属性 - _enabled: bool = False - _onlyonce: bool = False - _cron: str = "" - _notify: bool = False - _days: int = 7 - _users: str = "" - _clear: bool = False - _clearflag: bool = False - - def init_plugin(self, config: dict = None): - self.rsshelper = RssHelper() - self.downloadchain = DownloadChain() - self.searchchain = SearchChain() - self.subscribechain = SubscribeChain() - self.doubanchain = DoubanChain() - - # 停止现有任务 - self.stop_service() - - # 配置 - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._days = config.get("days") - self._users = config.get("users") - self._onlyonce = config.get("onlyonce") - self._clear = config.get("clear") - - if self._enabled or self._onlyonce: - - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - try: - self._scheduler.add_job(func=self.sync, - trigger=CronTrigger.from_crontab(self._cron), - name="豆瓣想看") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - else: - self._scheduler.add_job(self.sync, "interval", minutes=30, name="豆瓣想看") - - if self._onlyonce: - logger.info(f"豆瓣想看服务启动,立即运行一次") - self._scheduler.add_job(func=self.sync, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - - if self._onlyonce or self._clear: - # 关闭一次性开关 - self._onlyonce = False - # 记录缓存清理标志 - self._clearflag = self._clear - # 关闭清理缓存 - self._clear = False - # 保存配置 - self.__update_config() - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - return [{ - "cmd": "/douban_sync", - "event": EventType.DoubanSync, - "desc": "同步豆瓣想看", - "category": "订阅", - "data": {} - }] - - def get_api(self) -> List[Dict[str, Any]]: - """ - 获取插件API - [{ - "path": "/xx", - "endpoint": self.xxx, - "methods": ["GET", "POST"], - "summary": "API说明" - }] - """ - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'days', - 'label': '同步天数' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'users', - 'label': '用户列表', - 'placeholder': '豆瓣用户ID,多个用英文逗号分隔' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear', - 'label': '清理历史记录', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "onlyonce": False, - "cron": "*/30 * * * *", - "days": 7, - "users": "", - "clear": False - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - title = history.get("title") - poster = history.get("poster") - mtype = history.get("type") - time_str = history.get("time") - doubanid = history.get("doubanid") - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': poster, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'VCardSubtitle', - 'props': { - 'class': 'pa-2 font-bold break-words whitespace-break-spaces' - }, - 'content': [ - { - 'component': 'a', - 'props': { - 'href': f"https://movie.douban.com/subject/{doubanid}", - 'target': '_blank' - }, - 'text': title - } - ] - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{mtype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{time_str}' - } - ] - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def __update_config(self): - """ - 更新配置 - """ - self.update_config({ - "enabled": self._enabled, - "notify": self._notify, - "onlyonce": self._onlyonce, - "cron": self._cron, - "days": self._days, - "users": self._users, - "clear": self._clear - }) - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - def sync(self): - """ - 通过用户RSS同步豆瓣想看数据 - """ - if not self._users: - return - # 读取历史记录 - if self._clearflag: - history = [] - else: - history: List[dict] = self.get_data('history') or [] - for user_id in self._users.split(","): - # 同步每个用户的豆瓣数据 - if not user_id: - continue - logger.info(f"开始同步用户 {user_id} 的豆瓣想看数据 ...") - url = self._interests_url % user_id - results = self.rsshelper.parse(url) - if not results: - logger.warn(f"未获取到用户 {user_id} 豆瓣RSS数据:{url}") - continue - else: - logger.info(f"获取到用户 {user_id} 豆瓣RSS数据:{len(results)}") - # 解析数据 - for result in results: - try: - dtype = result.get("title", "")[:2] - title = result.get("title", "")[2:] - if dtype not in ["想看", "在看"]: - logger.info(f'标题:{title},非想看/在看数据,跳过') - continue - if not result.get("link"): - logger.warn(f'标题:{title},未获取到链接,跳过') - continue - # 判断是否在天数范围 - pubdate: Optional[datetime.datetime] = result.get("pubdate") - if pubdate: - if (datetime.datetime.now(datetime.timezone.utc) - pubdate).days > float(self._days): - logger.info(f'已超过同步天数,标题:{title},发布时间:{pubdate}') - continue - douban_id = result.get("link", "").split("/")[-2] - # 检查是否处理过 - if not douban_id or douban_id in [h.get("doubanid") for h in history]: - logger.info(f'标题:{title},豆瓣ID:{douban_id} 已处理过') - continue - # 识别媒体信息 - meta = MetaInfo(title=title) - context = self.doubanchain.recognize_by_doubanid(douban_id) - mediainfo = context.media_info - if not mediainfo or not mediainfo.tmdb_id: - logger.warn(f'未识别到媒体信息,标题:{title},豆瓣ID:{douban_id}') - continue - # 查询缺失的媒体信息 - exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo) - if exist_flag: - logger.info(f'{mediainfo.title_year} 媒体库中已存在') - action = "exist" - else: - logger.info(f'{mediainfo.title_year} 媒体库中不存在,开始搜索 ...') - # 搜索 - contexts = self.searchchain.process(mediainfo=mediainfo, - no_exists=no_exists) - if not contexts: - logger.warn(f'{mediainfo.title_year} 未搜索到资源') - # 添加订阅 - self.subscribechain.add(title=mediainfo.title, - year=mediainfo.year, - mtype=mediainfo.type, - tmdbid=mediainfo.tmdb_id, - season=meta.begin_season, - exist_ok=True, - username="豆瓣想看") - action = "subscribe" - else: - # 自动下载 - downloads, lefts = self.downloadchain.batch_download(contexts=contexts, no_exists=no_exists, - username="豆瓣想看") - if downloads and not lefts: - # 全部下载完成 - logger.info(f'{mediainfo.title_year} 下载完成') - action = "download" - else: - # 未完成下载 - logger.info(f'{mediainfo.title_year} 未下载未完整,添加订阅 ...') - # 添加订阅 - self.subscribechain.add(title=mediainfo.title, - year=mediainfo.year, - mtype=mediainfo.type, - tmdbid=mediainfo.tmdb_id, - season=meta.begin_season, - exist_ok=True, - username="豆瓣想看") - action = "subscribe" - # 存储历史记录 - history.append({ - "action": action, - "title": title, - "type": mediainfo.type.value, - "year": mediainfo.year, - "poster": mediainfo.get_poster_image(), - "overview": mediainfo.overview, - "tmdbid": mediainfo.tmdb_id, - "doubanid": douban_id, - "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - }) - except Exception as err: - logger.error(f'同步用户 {user_id} 豆瓣想看数据出错:{str(err)}') - logger.info(f"用户 {user_id} 豆瓣想看同步完成") - # 保存历史记录 - self.save_data('history', history) - # 缓存只清理一次 - self._clearflag = False - - @eventmanager.register(EventType.DoubanSync) - def remote_sync(self, event: Event): - """ - 豆瓣想看同步 - """ - if event: - logger.info("收到命令,开始执行豆瓣想看同步 ...") - self.post_message(channel=event.event_data.get("channel"), - title="开始同步豆瓣想看 ...", - userid=event.event_data.get("user")) - self.sync() - - if event: - self.post_message(channel=event.event_data.get("channel"), - title="同步豆瓣想看数据完成!", userid=event.event_data.get("user")) diff --git a/app/plugins/downloadingmsg/__init__.py b/app/plugins/downloadingmsg/__init__.py deleted file mode 100644 index 818d4792..00000000 --- a/app/plugins/downloadingmsg/__init__.py +++ /dev/null @@ -1,324 +0,0 @@ -from apscheduler.schedulers.background import BackgroundScheduler - -from app.chain.download import DownloadChain -from app.chain.media import MediaChain -from app.core.config import settings -from app.db.downloadhistory_oper import DownloadHistoryOper -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple, Optional, Union -from app.log import logger -from app.schemas import NotificationType, TransferTorrent, DownloadingTorrent -from app.schemas.types import TorrentStatus, MessageChannel -from app.utils.string import StringUtils - - -class DownloadingMsg(_PluginBase): - # 插件名称 - plugin_name = "下载进度推送" - # 插件描述 - plugin_desc = "定时推送正在下载进度。" - # 插件图标 - plugin_icon = "downloadmsg.png" - # 主题色 - plugin_color = "#3DE75D" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "downloading_" - # 加载顺序 - plugin_order = 22 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _seconds = None - _type = None - _adminuser = None - _downloadhis = None - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - if config: - self._enabled = config.get("enabled") - self._seconds = config.get("seconds") or 300 - self._type = config.get("type") or 'admin' - self._adminuser = config.get("adminuser") - - # 加载模块 - if self._enabled: - self._downloadhis = DownloadHistoryOper() - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - if self._seconds: - try: - self._scheduler.add_job(func=self.__downloading, - trigger='interval', - seconds=int(self._seconds), - name="下载进度推送") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __downloading(self): - """ - 定时推送正在下载进度 - """ - # 正在下载种子 - torrents = DownloadChain().list_torrents(status=TorrentStatus.DOWNLOADING) - if not torrents: - logger.info("当前没有正在下载的任务!") - return - # 推送用户 - if self._type == "admin" or self._type == "both": - if not self._adminuser: - logger.error("未配置管理员用户") - return - - for username in str(self._adminuser).split(","): - self.__send_msg(torrents=torrents, username=username) - - if self._type == "user" or self._type == "both": - user_torrents = {} - # 根据正在下载种子hash获取下载历史 - for torrent in torrents: - downloadhis = self._downloadhis.get_by_hash(download_hash=torrent.hash) - if not downloadhis: - logger.warn(f"种子 {torrent.hash} 未获取到MoviePilot下载历史,无法推送下载进度") - continue - if not downloadhis.username: - logger.debug(f"种子 {torrent.hash} 未获取到下载用户记录,无法推送下载进度") - continue - user_torrent = user_torrents.get(downloadhis.username) or [] - user_torrent.append(torrent) - user_torrents[downloadhis.username] = user_torrent - - if not user_torrents or not user_torrents.keys(): - logger.warn("未获取到用户下载记录,无法推送下载进度") - return - - # 推送用户下载任务进度 - for username in list(user_torrents.keys()): - if not username: - continue - # 如果用户是管理员,无需重复推送 - if (self._type == "admin" or self._type == "both") and self._adminuser and username in str( - self._adminuser).split(","): - logger.debug("管理员已推送") - continue - - user_torrent = user_torrents.get(username) - if not user_torrent: - logger.warn(f"未获取到用户 {username} 下载任务") - continue - self.__send_msg(torrents=user_torrent, - username=username) - - if self._type == "all": - self.__send_msg(torrents=torrents) - - def __send_msg(self, torrents: Optional[List[Union[TransferTorrent, DownloadingTorrent]]], username: str = None): - """ - 发送消息 - """ - title = f"共 {len(torrents)} 个任务正在下载:" - messages = [] - index = 1 - channel_value = None - for torrent in torrents: - year = None - name = None - se = None - ep = None - # 先查询下载记录,没有再识别 - downloadhis = self._downloadhis.get_by_hash(download_hash=torrent.hash) - if downloadhis: - name = downloadhis.title - year = downloadhis.year - se = downloadhis.seasons - ep = downloadhis.episodes - if not channel_value: - channel_value = downloadhis.channel - else: - try: - context = MediaChain().recognize_by_title(title=torrent.title) - if not context or not context.media_info: - continue - media_info = context.media_info - year = media_info.year - name = media_info.title - if media_info.number_of_seasons: - se = f"S{str(media_info.number_of_seasons).rjust(2, '0')}" - if media_info.number_of_episodes: - ep = f"E{str(media_info.number_of_episodes).rjust(2, '0')}" - except Exception as e: - print(str(e)) - - # 拼装标题 - if year: - media_name = "%s (%s) %s%s" % (name, year, se, ep) - elif name: - media_name = "%s %s%s" % (name, se, ep) - else: - media_name = torrent.title - - if not self._adminuser or username not in str(self._adminuser).split(","): - # 下载用户发送精简消息 - messages.append(f"{index}. {media_name} {round(torrent.progress, 1)}%") - else: - messages.append(f"{index}. {media_name}\n" - f"{torrent.title} " - f"{StringUtils.str_filesize(torrent.size)} " - f"{round(torrent.progress, 1)}%") - index += 1 - - # 用户消息渠道 - if channel_value: - channel = next( - (channel for channel in MessageChannel.__members__.values() if channel.value == channel_value), None) - else: - channel = None - self.post_message(mtype=NotificationType.Download, - channel=channel, - title=title, - text="\n".join(messages), - userid=username) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'seconds', - 'label': '执行间隔', - 'placeholder': '单位(秒)' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'adminuser', - 'label': '管理员用户', - 'placeholder': '多个用户,分割' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'type', - 'label': '推送类型', - 'items': [ - {'title': '管理员', 'value': 'admin'}, - {'title': '下载用户', 'value': 'user'}, - {'title': '管理员和下载用户', 'value': 'both'}, - {'title': '所有用户', 'value': 'all'} - ] - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "seconds": 300, - "adminuser": "", - "type": "admin" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/invitessignin/__init__.py b/app/plugins/invitessignin/__init__.py deleted file mode 100644 index 8e2c5162..00000000 --- a/app/plugins/invitessignin/__init__.py +++ /dev/null @@ -1,313 +0,0 @@ -import json -import re -from datetime import datetime, timedelta - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.core.config import settings -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple, Optional -from app.log import logger -from app.schemas import NotificationType -from app.utils.http import RequestUtils - - -class InvitesSignin(_PluginBase): - # 插件名称 - plugin_name = "药丸签到" - # 插件描述 - plugin_desc = "药丸论坛签到。" - # 插件图标 - plugin_icon = "invites.png" - # 主题色 - plugin_color = "#FFFFFF" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "invitessignin_" - # 加载顺序 - plugin_order = 24 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _cron = None - _cookie = None - _onlyonce = False - _notify = False - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._cookie = config.get("cookie") - self._notify = config.get("notify") - self._onlyonce = config.get("onlyonce") - - # 加载模块 - if self._enabled: - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - if self._cron: - try: - self._scheduler.add_job(func=self.__signin, - trigger=CronTrigger.from_crontab(self._cron), - name="药丸签到") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - if self._onlyonce: - logger.info(f"药丸签到服务启动,立即运行一次") - self._scheduler.add_job(func=self.__signin, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="药丸签到") - # 关闭一次性开关 - self._onlyonce = False - self.update_config({ - "onlyonce": False, - "cron": self._cron, - "enabled": self._enabled, - "cookie": self._cookie, - "notify": self._notify, - }) - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __signin(self): - """ - 药丸签到 - """ - res = RequestUtils(cookies=self._cookie).get_res(url="https://invites.fun") - if not res or res.status_code != 200: - logger.error("请求药丸错误") - return - - # 获取csrfToken - pattern = r'"csrfToken":"(.*?)"' - csrfToken = re.findall(pattern, res.text) - if not csrfToken: - logger.error("请求csrfToken失败") - return - - csrfToken = csrfToken[0] - logger.info(f"获取csrfToken成功 {csrfToken}") - - # 获取userid - pattern = r'"userId":(\d+)' - match = re.search(pattern, res.text) - - if match: - userId = match.group(1) - logger.info(f"获取userid成功 {userId}") - else: - logger.error("未找到userId") - return - - headers = { - "X-Csrf-Token": csrfToken, - "X-Http-Method-Override": "PATCH", - "Cookie": self._cookie - } - - data = { - "data": { - "type": "users", - "attributes": { - "canCheckin": False, - "totalContinuousCheckIn": 2 - }, - "id": userId - } - } - - # 开始签到 - res = RequestUtils(headers=headers).post_res(url=f"https://invites.fun/api/users/{userId}", json=data) - - if not res or res.status_code != 200: - logger.error("药丸签到失败") - return - - sign_dict = json.loads(res.text) - money = sign_dict['data']['attributes']['money'] - totalContinuousCheckIn = sign_dict['data']['attributes']['totalContinuousCheckIn'] - - # 发送通知 - if self._notify: - self.post_message( - mtype=NotificationType.SiteMessage, - title="【药丸签到任务完成】", - text=f"累计签到 {totalContinuousCheckIn} \n" - f"剩余药丸 {money}") - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '开启通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '签到周期' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cookie', - 'label': '药丸cookie' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '整点定时签到失败?不妨换个时间试试' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "notify": False, - "cookie": "", - "cron": "0 9 * * *" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/iyuuautoseed/__init__.py b/app/plugins/iyuuautoseed/__init__.py deleted file mode 100644 index b378c5ac..00000000 --- a/app/plugins/iyuuautoseed/__init__.py +++ /dev/null @@ -1,1028 +0,0 @@ -import os -import re -from datetime import datetime, timedelta -from threading import Event -from typing import Any, List, Dict, Tuple, Optional - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from lxml import etree -from ruamel.yaml import CommentedMap - -from app.core.config import settings -from app.core.event import eventmanager -from app.db.site_oper import SiteOper -from app.helper.sites import SitesHelper -from app.helper.torrent import TorrentHelper -from app.log import logger -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.plugins.iyuuautoseed.iyuu_helper import IyuuHelper -from app.schemas import NotificationType -from app.schemas.types import EventType -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class IYUUAutoSeed(_PluginBase): - # 插件名称 - plugin_name = "IYUU自动辅种" - # 插件描述 - plugin_desc = "基于IYUU官方Api实现自动辅种。" - # 插件图标 - plugin_icon = "iyuu.png" - # 主题色 - plugin_color = "#F3B70B" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "iyuuautoseed_" - # 加载顺序 - plugin_order = 17 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - _scheduler = None - iyuuhelper = None - qb = None - tr = None - sites = None - siteoper = None - torrent = None - # 开关 - _enabled = False - _cron = None - _onlyonce = False - _token = None - _downloaders = [] - _sites = [] - _notify = False - _nolabels = None - _nopaths = None - _clearcache = False - # 退出事件 - _event = Event() - # 种子链接xpaths - _torrent_xpaths = [ - "//form[contains(@action, 'download.php?id=')]/@action", - "//a[contains(@href, 'download.php?hash=')]/@href", - "//a[contains(@href, 'download.php?id=')]/@href", - "//a[@class='index'][contains(@href, '/dl/')]/@href", - ] - _torrent_tags = ["已整理", "辅种"] - # 待校全种子hash清单 - _recheck_torrents = {} - _is_recheck_running = False - # 辅种缓存,出错的种子不再重复辅种,可清除 - _error_caches = [] - # 辅种缓存,辅种成功的种子,可清除 - _success_caches = [] - # 辅种缓存,出错的种子不再重复辅种,且无法清除。种子被删除404等情况 - _permanent_error_caches = [] - # 辅种计数 - total = 0 - realtotal = 0 - success = 0 - exist = 0 - fail = 0 - cached = 0 - - def init_plugin(self, config: dict = None): - self.sites = SitesHelper() - self.siteoper = SiteOper() - self.torrent = TorrentHelper() - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._token = config.get("token") - self._downloaders = config.get("downloaders") - self._sites = config.get("sites") or [] - self._notify = config.get("notify") - self._nolabels = config.get("nolabels") - self._nopaths = config.get("nopaths") - self._clearcache = config.get("clearcache") - self._permanent_error_caches = [] if self._clearcache else config.get("permanent_error_caches") or [] - self._error_caches = [] if self._clearcache else config.get("error_caches") or [] - self._success_caches = [] if self._clearcache else config.get("success_caches") or [] - - # 过滤掉已删除的站点 - all_sites = [site.id for site in self.siteoper.list_order_by_pri()] + [site.get("id") for site in - self.__custom_sites()] - self._sites = [site_id for site_id in all_sites if site_id in self._sites] - self.__update_config() - - # 停止现有任务 - self.stop_service() - - # 启动定时任务 & 立即运行一次 - if self.get_state() or self._onlyonce: - self.iyuuhelper = IyuuHelper(token=self._token) - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - self.qb = Qbittorrent() - self.tr = Transmission() - - if self._cron: - try: - self._scheduler.add_job(self.auto_seed, - CronTrigger.from_crontab(self._cron)) - logger.info(f"辅种服务启动,周期:{self._cron}") - except Exception as err: - logger.error(f"辅种服务启动失败:{str(err)}") - self.systemmessage.put(f"辅种服务启动失败:{str(err)}") - if self._onlyonce: - logger.info(f"辅种服务启动,立即运行一次") - self._scheduler.add_job(self.auto_seed, 'date', - run_date=datetime.now( - tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3) - ) - # 关闭一次性开关 - self._onlyonce = False - - if self._clearcache: - # 关闭清除缓存开关 - self._clearcache = False - - if self._clearcache or self._onlyonce: - # 保存配置 - self.__update_config() - - if self._scheduler.get_jobs(): - # 追加种子校验服务 - self._scheduler.add_job(self.check_recheck, 'interval', minutes=3) - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return True if self._enabled and self._cron and self._token and self._downloaders else False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - # 站点的可选项(内置站点 + 自定义站点) - customSites = self.__custom_sites() - - # 站点的可选项 - site_options = ([{"title": site.name, "value": site.id} - for site in self.siteoper.list_order_by_pri()] - + [{"title": site.get("name"), "value": site.get("id")} - for site in customSites]) - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'token', - 'label': 'IYUU Token', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '0 0 0 ? *' - } - } - ] - }, - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'downloaders', - 'label': '辅种下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'sites', - 'label': '辅种站点', - 'items': site_options - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'nolabels', - 'label': '不辅种标签', - 'placeholder': '使用,分隔多个标签' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'nopaths', - 'label': '不辅种数据文件目录', - 'rows': 3, - 'placeholder': '每一行一个目录' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clearcache', - 'label': '清除缓存后运行', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "notify": False, - "clearcache": False, - "cron": "", - "token": "", - "downloaders": [], - "sites": [], - "nopaths": "", - "nolabels": "" - } - - def get_page(self) -> List[dict]: - pass - - def __update_config(self): - self.update_config({ - "enabled": self._enabled, - "onlyonce": self._onlyonce, - "clearcache": self._clearcache, - "cron": self._cron, - "token": self._token, - "downloaders": self._downloaders, - "sites": self._sites, - "notify": self._notify, - "nolabels": self._nolabels, - "nopaths": self._nopaths, - "success_caches": self._success_caches, - "error_caches": self._error_caches, - "permanent_error_caches": self._permanent_error_caches - }) - - def __get_downloader(self, dtype: str): - """ - 根据类型返回下载器实例 - """ - if dtype == "qbittorrent": - return self.qb - elif dtype == "transmission": - return self.tr - else: - return None - - def auto_seed(self): - """ - 开始辅种 - """ - if not self.iyuuhelper: - return - logger.info("开始辅种任务 ...") - - # 计数器初始化 - self.total = 0 - self.realtotal = 0 - self.success = 0 - self.exist = 0 - self.fail = 0 - self.cached = 0 - # 扫描下载器辅种 - for downloader in self._downloaders: - logger.info(f"开始扫描下载器 {downloader} ...") - downloader_obj = self.__get_downloader(downloader) - # 获取下载器中已完成的种子 - torrents = downloader_obj.get_completed_torrents() - if torrents: - logger.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}") - else: - logger.info(f"下载器 {downloader} 没有已完成种子") - continue - hash_strs = [] - for torrent in torrents: - if self._event.is_set(): - logger.info(f"辅种服务停止") - return - # 获取种子hash - hash_str = self.__get_hash(torrent, downloader) - if hash_str in self._error_caches or hash_str in self._permanent_error_caches: - logger.info(f"种子 {hash_str} 辅种失败且已缓存,跳过 ...") - continue - save_path = self.__get_save_path(torrent, downloader) - - if self._nopaths and save_path: - # 过滤不需要转移的路径 - nopath_skip = False - for nopath in self._nopaths.split('\n'): - if os.path.normpath(save_path).startswith(os.path.normpath(nopath)): - logger.info(f"种子 {hash_str} 保存路径 {save_path} 不需要辅种,跳过 ...") - nopath_skip = True - break - if nopath_skip: - continue - - # 获取种子标签 - torrent_labels = self.__get_label(torrent, downloader) - if torrent_labels and self._nolabels: - is_skip = False - for label in self._nolabels.split(','): - if label in torrent_labels: - logger.info(f"种子 {hash_str} 含有不辅种标签 {label},跳过 ...") - is_skip = True - break - if is_skip: - continue - hash_strs.append({ - "hash": hash_str, - "save_path": save_path - }) - if hash_strs: - logger.info(f"总共需要辅种的种子数:{len(hash_strs)}") - # 分组处理,减少IYUU Api请求次数 - chunk_size = 200 - for i in range(0, len(hash_strs), chunk_size): - # 切片操作 - chunk = hash_strs[i:i + chunk_size] - # 处理分组 - self.__seed_torrents(hash_strs=chunk, - downloader=downloader) - # 触发校验检查 - self.check_recheck() - else: - logger.info(f"没有需要辅种的种子") - # 保存缓存 - self.__update_config() - # 发送消息 - if self._notify: - if self.success or self.fail: - self.post_message( - mtype=NotificationType.SiteMessage, - title="【IYUU自动辅种任务完成】", - text=f"服务器返回可辅种总数:{self.total}\n" - f"实际可辅种数:{self.realtotal}\n" - f"已存在:{self.exist}\n" - f"成功:{self.success}\n" - f"失败:{self.fail}\n" - f"{self.cached} 条失败记录已加入缓存" - ) - logger.info("辅种任务执行完成") - - def check_recheck(self): - """ - 定时检查下载器中种子是否校验完成,校验完成且完整的自动开始辅种 - """ - if not self._recheck_torrents: - return - if self._is_recheck_running: - return - self._is_recheck_running = True - for downloader in self._downloaders: - # 需要检查的种子 - recheck_torrents = self._recheck_torrents.get(downloader) or [] - if not recheck_torrents: - continue - logger.info(f"开始检查下载器 {downloader} 的校验任务 ...") - # 下载器 - downloader_obj = self.__get_downloader(downloader) - # 获取下载器中的种子状态 - torrents, _ = downloader_obj.get_torrents(ids=recheck_torrents) - if torrents: - can_seeding_torrents = [] - for torrent in torrents: - # 获取种子hash - hash_str = self.__get_hash(torrent, downloader) - if self.__can_seeding(torrent, downloader): - can_seeding_torrents.append(hash_str) - if can_seeding_torrents: - logger.info(f"共 {len(can_seeding_torrents)} 个任务校验完成,开始辅种 ...") - # 开始任务 - downloader_obj.start_torrents(ids=can_seeding_torrents) - # 去除已经处理过的种子 - self._recheck_torrents[downloader] = list( - set(recheck_torrents).difference(set(can_seeding_torrents))) - elif torrents is None: - logger.info(f"下载器 {downloader} 查询校验任务失败,将在下次继续查询 ...") - continue - else: - logger.info(f"下载器 {downloader} 中没有需要检查的校验任务,清空待处理列表 ...") - self._recheck_torrents[downloader] = [] - self._is_recheck_running = False - - def __seed_torrents(self, hash_strs: list, downloader: str): - """ - 执行一批种子的辅种 - """ - if not hash_strs: - return - logger.info(f"下载器 {downloader} 开始查询辅种,数量:{len(hash_strs)} ...") - # 下载器中的Hashs - hashs = [item.get("hash") for item in hash_strs] - # 每个Hash的保存目录 - save_paths = {} - for item in hash_strs: - save_paths[item.get("hash")] = item.get("save_path") - # 查询可辅种数据 - seed_list, msg = self.iyuuhelper.get_seed_info(hashs) - if not isinstance(seed_list, dict): - logger.warn(f"当前种子列表没有可辅种的站点:{msg}") - return - else: - logger.info(f"IYUU返回可辅种数:{len(seed_list)}") - # 遍历 - for current_hash, seed_info in seed_list.items(): - if not seed_info: - continue - seed_torrents = seed_info.get("torrent") - if not isinstance(seed_torrents, list): - seed_torrents = [seed_torrents] - - # 本次辅种成功的种子 - success_torrents = [] - - for seed in seed_torrents: - if not seed: - continue - if not isinstance(seed, dict): - continue - if not seed.get("sid") or not seed.get("info_hash"): - continue - if seed.get("info_hash") in hashs: - logger.info(f"{seed.get('info_hash')} 已在下载器中,跳过 ...") - continue - if seed.get("info_hash") in self._success_caches: - logger.info(f"{seed.get('info_hash')} 已处理过辅种,跳过 ...") - continue - if seed.get("info_hash") in self._error_caches or seed.get("info_hash") in self._permanent_error_caches: - logger.info(f"种子 {seed.get('info_hash')} 辅种失败且已缓存,跳过 ...") - continue - # 添加任务 - success = self.__download_torrent(seed=seed, - downloader=downloader, - save_path=save_paths.get(current_hash)) - if success: - success_torrents.append(seed.get("info_hash")) - - # 辅种成功的去重放入历史 - if len(success_torrents) > 0: - self.__save_history(current_hash=current_hash, - downloader=downloader, - success_torrents=success_torrents) - - logger.info(f"下载器 {downloader} 辅种完成") - - def __save_history(self, current_hash: str, downloader: str, success_torrents: []): - """ - [ - { - "downloader":"2", - "torrents":[ - "248103a801762a66c201f39df7ea325f8eda521b", - "bd13835c16a5865b01490962a90b3ec48889c1f0" - ] - }, - { - "downloader":"3", - "torrents":[ - "248103a801762a66c201f39df7ea325f8eda521b", - "bd13835c16a5865b01490962a90b3ec48889c1f0" - ] - } - ] - """ - try: - # 查询当前Hash的辅种历史 - seed_history = self.get_data(key=current_hash) or [] - - new_history = True - if len(seed_history) > 0: - for history in seed_history: - if not history: - continue - if not isinstance(history, dict): - continue - if not history.get("downloader"): - continue - # 如果本次辅种下载器之前有过记录则继续添加 - if str(history.get("downloader")) == downloader: - history_torrents = history.get("torrents") or [] - history["torrents"] = list(set(history_torrents + success_torrents)) - new_history = False - break - - # 本次辅种下载器之前没有成功记录则新增 - if new_history: - seed_history.append({ - "downloader": downloader, - "torrents": list(set(success_torrents)) - }) - - # 保存历史 - self.save_data(key=current_hash, - value=seed_history) - except Exception as e: - print(str(e)) - - def __download(self, downloader: str, content: bytes, - save_path: str) -> Optional[str]: - """ - 添加下载任务 - """ - if downloader == "qbittorrent": - # 生成随机Tag - tag = StringUtils.generate_random_str(10) - state = self.qb.add_torrent(content=content, - download_dir=save_path, - is_paused=True, - tag=["已整理", "辅种", tag]) - if not state: - return None - else: - # 获取种子Hash - torrent_hash = self.qb.get_torrent_id_by_tag(tags=tag) - if not torrent_hash: - logger.error(f"{downloader} 获取种子Hash失败") - return None - return torrent_hash - elif downloader == "transmission": - # 添加任务 - torrent = self.tr.add_torrent(content=content, - download_dir=save_path, - is_paused=True, - labels=["已整理", "辅种"]) - if not torrent: - return None - else: - return torrent.hashString - - logger.error(f"不支持的下载器:{downloader}") - return None - - def __download_torrent(self, seed: dict, downloader: str, save_path: str): - """ - 下载种子 - torrent: { - "sid": 3, - "torrent_id": 377467, - "info_hash": "a444850638e7a6f6220e2efdde94099c53358159" - } - """ - - def __is_special_site(url): - """ - 判断是否为特殊站点(是否需要添加https) - """ - if "hdsky.me" in url: - return False - return True - - self.total += 1 - # 获取种子站点及下载地址模板 - site_url, download_page = self.iyuuhelper.get_torrent_url(seed.get("sid")) - if not site_url or not download_page: - # 加入缓存 - self._error_caches.append(seed.get("info_hash")) - self.fail += 1 - self.cached += 1 - return False - # 查询站点 - site_domain = StringUtils.get_url_domain(site_url) - # 站点信息 - site_info = self.sites.get_indexer(site_domain) - if not site_info: - logger.debug(f"没有维护种子对应的站点:{site_url}") - return False - if self._sites and site_info.get('id') not in self._sites: - logger.info("当前站点不在选择的辅种站点范围,跳过 ...") - return False - self.realtotal += 1 - # 查询hash值是否已经在下载器中 - downloader_obj = self.__get_downloader(downloader) - torrent_info, _ = downloader_obj.get_torrents(ids=[seed.get("info_hash")]) - if torrent_info: - logger.info(f"{seed.get('info_hash')} 已在下载器中,跳过 ...") - self.exist += 1 - return False - # 站点流控 - check, checkmsg = self.sites.check(site_domain) - if check: - logger.warn(checkmsg) - self.fail += 1 - return False - # 下载种子 - torrent_url = self.__get_download_url(seed=seed, - site=site_info, - base_url=download_page) - if not torrent_url: - # 加入失败缓存 - self._error_caches.append(seed.get("info_hash")) - self.fail += 1 - self.cached += 1 - return False - # 强制使用Https - if __is_special_site(torrent_url): - if "?" in torrent_url: - torrent_url += "&https=1" - else: - torrent_url += "?https=1" - # 下载种子文件 - _, content, _, _, error_msg = self.torrent.download_torrent( - url=torrent_url, - cookie=site_info.get("cookie"), - ua=site_info.get("ua") or settings.USER_AGENT, - proxy=site_info.get("proxy")) - if not content: - # 下载失败 - self.fail += 1 - # 加入失败缓存 - if error_msg and ('无法打开链接' in error_msg or '触发站点流控' in error_msg): - self._error_caches.append(seed.get("info_hash")) - else: - # 种子不存在的情况 - self._permanent_error_caches.append(seed.get("info_hash")) - logger.error(f"下载种子文件失败:{torrent_url}") - return False - # 添加下载,辅种任务默认暂停 - logger.info(f"添加下载任务:{torrent_url} ...") - download_id = self.__download(downloader=downloader, - content=content, - save_path=save_path) - if not download_id: - # 下载失败 - self.fail += 1 - # 加入失败缓存 - self._error_caches.append(seed.get("info_hash")) - return False - else: - self.success += 1 - # 追加校验任务 - logger.info(f"添加校验检查任务:{download_id} ...") - if not self._recheck_torrents.get(downloader): - self._recheck_torrents[downloader] = [] - self._recheck_torrents[downloader].append(download_id) - # 下载成功 - logger.info(f"成功添加辅种下载,站点:{site_info.get('name')},种子链接:{torrent_url}") - # TR会自动校验 - if downloader == "qbittorrent": - # 开始校验种子 - downloader_obj.recheck_torrents(ids=[download_id]) - # 成功也加入缓存,有一些改了路径校验不通过的,手动删除后,下一次又会辅上 - self._success_caches.append(seed.get("info_hash")) - return True - - @staticmethod - def __get_hash(torrent: Any, dl_type: str): - """ - 获取种子hash - """ - try: - return torrent.get("hash") if dl_type == "qbittorrent" else torrent.hashString - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_label(torrent: Any, dl_type: str): - """ - 获取种子标签 - """ - try: - return [str(tag).strip() for tag in torrent.get("tags").split(',')] \ - if dl_type == "qbittorrent" else torrent.labels or [] - except Exception as e: - print(str(e)) - return [] - - @staticmethod - def __can_seeding(torrent: Any, dl_type: str): - """ - 判断种子是否可以做种并处于暂停状态 - """ - try: - return torrent.get("state") == "pausedUP" if dl_type == "qbittorrent" \ - else (torrent.status.stopped and torrent.percent_done == 1) - except Exception as e: - print(str(e)) - return False - - @staticmethod - def __get_save_path(torrent: Any, dl_type: str): - """ - 获取种子保存路径 - """ - try: - return torrent.get("save_path") if dl_type == "qbittorrent" else torrent.download_dir - except Exception as e: - print(str(e)) - return "" - - def __get_download_url(self, seed: dict, site: CommentedMap, base_url: str): - """ - 拼装种子下载链接 - """ - - def __is_special_site(url): - """ - 判断是否为特殊站点 - """ - spec_params = ["hash=", "authkey="] - if any(field in base_url for field in spec_params): - return True - if "hdchina.org" in url: - return True - if "hdsky.me" in url: - return True - if "hdcity.in" in url: - return True - if "totheglory.im" in url: - return True - return False - - try: - if __is_special_site(site.get('url')): - # 从详情页面获取下载链接 - return self.__get_torrent_url_from_page(seed=seed, site=site) - else: - download_url = base_url.replace( - "id={}", - "id={id}" - ).replace( - "/{}", - "/{id}" - ).replace( - "/{torrent_key}", - "" - ).format( - **{ - "id": seed.get("torrent_id"), - "passkey": site.get("passkey") or '', - "uid": site.get("uid") or '', - } - ) - if download_url.count("{"): - logger.warn(f"当前不支持该站点的辅助任务,Url转换失败:{seed}") - return None - download_url = re.sub(r"[&?]passkey=", "", - re.sub(r"[&?]uid=", "", - download_url, - flags=re.IGNORECASE), - flags=re.IGNORECASE) - return f"{site.get('url')}{download_url}" - except Exception as e: - logger.warn(f"站点 {site.get('name')} Url转换失败:{str(e)},尝试通过详情页面获取种子下载链接 ...") - return self.__get_torrent_url_from_page(seed=seed, site=site) - - def __get_torrent_url_from_page(self, seed: dict, site: dict): - """ - 从详情页面获取下载链接 - """ - if not site.get('url'): - logger.warn(f"站点 {site.get('name')} 未获取站点地址,无法获取种子下载链接") - return None - try: - page_url = f"{site.get('url')}details.php?id={seed.get('torrent_id')}&hit=1" - logger.info(f"正在获取种子下载链接:{page_url} ...") - res = RequestUtils( - cookies=site.get("cookie"), - ua=site.get("ua"), - proxies=settings.PROXY if site.get("proxy") else None - ).get_res(url=page_url) - if res is not None and res.status_code in (200, 500): - if "charset=utf-8" in res.text or "charset=UTF-8" in res.text: - res.encoding = "UTF-8" - else: - res.encoding = res.apparent_encoding - if not res.text: - logger.warn(f"获取种子下载链接失败,页面内容为空:{page_url}") - return None - # 使用xpath从页面中获取下载链接 - html = etree.HTML(res.text) - for xpath in self._torrent_xpaths: - download_url = html.xpath(xpath) - if download_url: - download_url = download_url[0] - logger.info(f"获取种子下载链接成功:{download_url}") - if not download_url.startswith("http"): - if download_url.startswith("/"): - download_url = f"{site.get('url')}{download_url[1:]}" - else: - download_url = f"{site.get('url')}{download_url}" - return download_url - logger.warn(f"获取种子下载链接失败,未找到下载链接:{page_url}") - return None - else: - logger.error(f"获取种子下载链接失败,请求失败:{page_url},{res.status_code if res else ''}") - return None - except Exception as e: - logger.warn(f"获取种子下载链接失败:{str(e)}") - return None - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) - - def __custom_sites(self) -> List[Any]: - custom_sites = [] - custom_sites_config = self.get_config("CustomSites") - if custom_sites_config and custom_sites_config.get("enabled"): - custom_sites = custom_sites_config.get("sites") - return custom_sites - - @eventmanager.register(EventType.SiteDeleted) - def site_deleted(self, event): - """ - 删除对应站点选中 - """ - site_id = event.event_data.get("site_id") - config = self.get_config() - if config: - sites = config.get("sites") - if sites: - if isinstance(sites, str): - sites = [sites] - - # 删除对应站点 - if site_id: - sites = [site for site in sites if int(site) != int(site_id)] - else: - # 清空 - sites = [] - - # 若无站点,则停止 - if len(sites) == 0: - self._enabled = False - - self._sites = sites - # 保存配置 - self.__update_config() diff --git a/app/plugins/iyuuautoseed/iyuu_helper.py b/app/plugins/iyuuautoseed/iyuu_helper.py deleted file mode 100644 index df660660..00000000 --- a/app/plugins/iyuuautoseed/iyuu_helper.py +++ /dev/null @@ -1,166 +0,0 @@ -import hashlib -import json -import time - -from app.utils.http import RequestUtils - - -class IyuuHelper(object): - _version = "2.0.0" - _api_base = "https://api.iyuu.cn/%s" - _sites = {} - _token = None - - def __init__(self, token): - self._token = token - if self._token: - self.init_config() - - def init_config(self): - pass - - def __request_iyuu(self, url, method="get", params=None): - """ - 向IYUUApi发送请求 - """ - if params: - if not params.get("sign"): - params.update({"sign": self._token}) - if not params.get("version"): - params.update({"version": self._version}) - else: - params = {"sign": self._token, "version": self._version} - # 开始请求 - if method == "get": - ret = RequestUtils( - accept_type="application/json" - ).get_res(f"{url}", params=params) - else: - ret = RequestUtils( - accept_type="application/json" - ).post_res(f"{url}", data=params) - if ret: - result = ret.json() - if result.get('ret') == 200: - return result.get('data'), "" - else: - return None, f"请求IYUU失败,状态码:{result.get('ret')},返回信息:{result.get('msg')}" - elif ret is not None: - return None, f"请求IYUU失败,状态码:{ret.status_code},错误原因:{ret.reason}" - else: - return None, f"请求IYUU失败,未获取到返回信息" - - def get_torrent_url(self, sid): - if not sid: - return None, None - if not self._sites: - self._sites = self.__get_sites() - if not self._sites.get(sid): - return None, None - site = self._sites.get(sid) - return site.get('base_url'), site.get('download_page') - - def __get_sites(self): - """ - 返回支持辅种的全部站点 - :return: 站点列表、错误信息 - { - "ret": 200, - "data": { - "sites": [ - { - "id": 1, - "site": "keepfrds", - "nickname": "朋友", - "base_url": "pt.keepfrds.com", - "download_page": "download.php?id={}&passkey={passkey}", - "reseed_check": "passkey", - "is_https": 2 - }, - ] - } - } - """ - result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Sites') - if result: - ret_sites = {} - sites = result.get('sites') or [] - for site in sites: - ret_sites[site.get('id')] = site - return ret_sites - else: - print(msg) - return {} - - def get_seed_info(self, info_hashs: list): - """ - 返回info_hash对应的站点id、种子id - { - "ret": 200, - "data": [ - { - "sid": 3, - "torrent_id": 377467, - "info_hash": "a444850638e7a6f6220e2efdde94099c53358159" - }, - { - "sid": 7, - "torrent_id": 35538, - "info_hash": "cf7d88fd656d10fe5130d13567aec27068b96676" - } - ], - "msg": "", - "version": "1.0.0" - } - """ - info_hashs.sort() - json_data = json.dumps(info_hashs, separators=(',', ':'), ensure_ascii=False) - sha1 = self.get_sha1(json_data) - result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Infohash', - method="post", - params={ - "timestamp": time.time(), - "hash": json_data, - "sha1": sha1 - }) - return result, msg - - @staticmethod - def get_sha1(json_str) -> str: - return hashlib.sha1(json_str.encode('utf-8')).hexdigest() - - def get_auth_sites(self): - """ - 返回支持鉴权的站点列表 - [ - { - "id": 2, - "site": "pthome", - "bind_check": "passkey,uid" - } - ] - """ - result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.GetRecommendSites') - if result: - return result.get('recommend') or [] - else: - print(msg) - return [] - - def bind_site(self, site, passkey, uid): - """ - 绑定站点 - :param site: 站点名称 - :param passkey: passkey - :param uid: 用户id - :return: 状态码、错误信息 - """ - result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Bind', - method="get", - params={ - "token": self._token, - "site": site, - "passkey": self.get_sha1(passkey), - "id": uid - }) - return result, msg diff --git a/app/plugins/libraryscraper/__init__.py b/app/plugins/libraryscraper/__init__.py deleted file mode 100644 index 6a638bc4..00000000 --- a/app/plugins/libraryscraper/__init__.py +++ /dev/null @@ -1,433 +0,0 @@ -from datetime import datetime, timedelta -from pathlib import Path -from threading import Event -from typing import List, Tuple, Dict, Any - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.core.config import settings -from app.core.meta import MetaBase -from app.core.metainfo import MetaInfo -from app.db.transferhistory_oper import TransferHistoryOper -from app.helper.nfo import NfoReader -from app.log import logger -from app.plugins import _PluginBase -from app.schemas import MediaType -from app.utils.system import SystemUtils - - -class LibraryScraper(_PluginBase): - - # 插件名称 - plugin_name = "媒体库刮削" - # 插件描述 - plugin_desc = "定时对媒体库进行刮削,补齐缺失元数据和图片。" - # 插件图标 - plugin_icon = "scraper.png" - # 主题色 - plugin_color = "#FF7D00" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "libraryscraper_" - # 加载顺序 - plugin_order = 7 - # 可使用的用户级别 - user_level = 1 - - # 私有属性 - transferhis = None - _scheduler = None - _scraper = None - # 限速开关 - _enabled = False - _onlyonce = False - _cron = None - _mode = "" - _scraper_paths = "" - _exclude_paths = "" - # 退出事件 - _event = Event() - - def init_plugin(self, config: dict = None): - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._mode = config.get("mode") or "" - self._scraper_paths = config.get("scraper_paths") or "" - self._exclude_paths = config.get("exclude_paths") or "" - - # 停止现有任务 - self.stop_service() - - # 启动定时任务 & 立即运行一次 - if self._enabled or self._onlyonce: - self.transferhis = TransferHistoryOper() - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - logger.info(f"媒体库刮削服务启动,周期:{self._cron}") - try: - self._scheduler.add_job(func=self.__libraryscraper, - trigger=CronTrigger.from_crontab(self._cron), - name="媒体库刮削") - except Exception as e: - logger.error(f"媒体库刮削服务启动失败,原因:{str(e)}") - self.systemmessage.put(f"媒体库刮削服务启动失败,原因:{str(e)}") - else: - logger.info(f"媒体库刮削服务启动,周期:每7天") - self._scheduler.add_job(func=self.__libraryscraper, - trigger=CronTrigger.from_crontab("0 0 */7 * *"), - name="媒体库刮削") - if self._onlyonce: - logger.info(f"媒体库刮削服务,立即运行一次") - self._scheduler.add_job(func=self.__libraryscraper, trigger='date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3), - name="Cloudflare优选") - # 关闭一次性开关 - self._onlyonce = False - self.update_config({ - "onlyonce": False, - "enabled": self._enabled, - "cron": self._cron, - "mode": self._mode, - "scraper_paths": self._scraper_paths, - "exclude_paths": self._exclude_paths - }) - if self._scheduler.get_jobs(): - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'mode', - 'label': '刮削模式', - 'items': [ - {'title': '仅刮削缺失元数据和图片', 'value': ''}, - {'title': '覆盖所有元数据和图片', 'value': 'force_all'}, - {'title': '覆盖所有元数据', 'value': 'force_nfo'}, - {'title': '覆盖所有图片', 'value': 'force_image'}, - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'scraper_paths', - 'label': '削刮路径', - 'rows': 5, - 'placeholder': '每一行一个目录,需配置到媒体文件的上级目录,即开了二级分类时需要配置到二级分类目录' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'exclude_paths', - 'label': '排除路径', - 'rows': 2, - 'placeholder': '每一行一个目录' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "cron": "0 0 */7 * *", - "mode": "", - "scraper_paths": "", - "err_hosts": "" - } - - def get_page(self) -> List[dict]: - pass - - def __libraryscraper(self): - """ - 开始刮削媒体库 - """ - if not self._scraper_paths: - return - # 排除目录 - exclude_paths = self._exclude_paths.split("\n") - # 已选择的目录 - paths = self._scraper_paths.split("\n") - for path in paths: - if not path: - continue - scraper_path = Path(path) - if not scraper_path.exists(): - logger.warning(f"媒体库刮削路径不存在:{path}") - continue - logger.info(f"开始刮削媒体库:{path} ...") - # 遍历一层文件夹 - for sub_path in scraper_path.iterdir(): - if self._event.is_set(): - logger.info(f"媒体库刮削服务停止") - return - # 排除目录 - exclude_flag = False - for exclude_path in exclude_paths: - try: - if sub_path.is_relative_to(Path(exclude_path)): - exclude_flag = True - break - except Exception as err: - print(str(err)) - if exclude_flag: - logger.debug(f"{sub_path} 在排除目录中,跳过 ...") - continue - # 开始刮削目录 - if sub_path.is_dir(): - # 判断目录是不是媒体目录 - dir_meta = MetaInfo(sub_path.name) - if not dir_meta.name or not dir_meta.year: - logger.warn(f"{sub_path} 可能不是媒体目录,请检查刮削目录配置,跳过 ...") - continue - logger.info(f"开始刮削目录:{sub_path} ...") - self.__scrape_dir(path=sub_path, dir_meta=dir_meta) - logger.info(f"目录 {sub_path} 刮削完成") - logger.info(f"媒体库 {path} 刮削完成") - - def __scrape_dir(self, path: Path, dir_meta: MetaBase): - """ - 削刮一个目录,该目录必须是媒体文件目录 - """ - - # 媒体信息 - mediainfo = None - - # 查找目录下所有的文件 - files = SystemUtils.list_files(path, settings.RMT_MEDIAEXT) - for file in files: - if self._event.is_set(): - logger.info(f"媒体库刮削服务停止") - return - - # 识别元数据 - meta_info = MetaInfo(file.stem) - # 合并 - meta_info.merge(dir_meta) - # 是否刮削 - scrap_metadata = settings.SCRAP_METADATA - - # 没有媒体信息或者名字出现变化时,需要重新识别 - if not mediainfo \ - or meta_info.name != dir_meta.name: - # 优先读取本地nfo文件 - tmdbid = None - if meta_info.type == MediaType.MOVIE: - # 电影 - movie_nfo = file.parent / "movie.nfo" - if movie_nfo.exists(): - tmdbid = self.__get_tmdbid_from_nfo(movie_nfo) - file_nfo = file.with_suffix(".nfo") - if not tmdbid and file_nfo.exists(): - tmdbid = self.__get_tmdbid_from_nfo(file_nfo) - else: - # 电视剧 - tv_nfo = file.parent.parent / "tvshow.nfo" - if tv_nfo.exists(): - tmdbid = self.__get_tmdbid_from_nfo(tv_nfo) - if tmdbid: - # 按TMDBID识别 - logger.info(f"读取到本地nfo文件的tmdbid:{tmdbid}") - mediainfo = self.chain.recognize_media(tmdbid=tmdbid, mtype=meta_info.type) - else: - # 按名称识别 - mediainfo = self.chain.recognize_media(meta=meta_info) - if not mediainfo: - logger.warn(f"未识别到媒体信息:{file}") - continue - - # 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title - if not settings.SCRAP_FOLLOW_TMDB: - transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id, - mtype=mediainfo.type.value) - if transfer_history: - mediainfo.title = transfer_history.title - - # 覆盖模式时,提前删除nfo - if self._mode in ["force_all", "force_nfo"]: - scrap_metadata = True - nfo_files = SystemUtils.list_files(path, [".nfo"]) - for nfo_file in nfo_files: - try: - logger.warn(f"删除nfo文件:{nfo_file}") - nfo_file.unlink() - except Exception as err: - print(str(err)) - - # 覆盖模式时,提前删除图片文件 - if self._mode in ["force_all", "force_image"]: - scrap_metadata = True - image_files = SystemUtils.list_files(path, [".jpg", ".png"]) - for image_file in image_files: - if ".actors" in str(image_file): - continue - try: - logger.warn(f"删除图片文件:{image_file}") - image_file.unlink() - except Exception as err: - print(str(err)) - - # 刮削单个文件 - if scrap_metadata: - self.chain.scrape_metadata(path=file, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE) - - @staticmethod - def __get_tmdbid_from_nfo(file_path: Path): - """ - 从nfo文件中获取信息 - :param file_path: - :return: tmdbid - """ - if not file_path: - return None - xpaths = [ - "uniqueid[@type='Tmdb']", - "uniqueid[@type='tmdb']", - "uniqueid[@type='TMDB']", - "tmdbid" - ] - reader = NfoReader(file_path) - for xpath in xpaths: - try: - tmdbid = reader.get_element_value(xpath) - if tmdbid: - return tmdbid - except Exception as err: - print(str(err)) - return None - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) diff --git a/app/plugins/mediaservermsg/__init__.py b/app/plugins/mediaservermsg/__init__.py deleted file mode 100644 index d0cbf73e..00000000 --- a/app/plugins/mediaservermsg/__init__.py +++ /dev/null @@ -1,244 +0,0 @@ -import time -from typing import Any, List, Dict, Tuple - -from app.core.event import eventmanager, Event -from app.log import logger -from app.plugins import _PluginBase -from app.schemas import WebhookEventInfo -from app.schemas.types import EventType, MediaType, MediaImageType, NotificationType -from app.utils.web import WebUtils - - -class MediaServerMsg(_PluginBase): - # 插件名称 - plugin_name = "媒体库服务器通知" - # 插件描述 - plugin_desc = "发送Emby/Jellyfin/Plex服务器的播放、入库等通知消息。" - # 插件图标 - plugin_icon = "mediaplay.png" - # 主题色 - plugin_color = "#42A3DB" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "mediaservermsg_" - # 加载顺序 - plugin_order = 14 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - _types = [] - - # 拼装消息内容 - _webhook_actions = { - "library.new": "新入库", - "system.webhooktest": "测试", - "playback.start": "开始播放", - "playback.stop": "停止播放", - "user.authenticated": "登录成功", - "user.authenticationfailed": "登录失败", - "media.play": "开始播放", - "media.stop": "停止播放", - "PlaybackStart": "开始播放", - "PlaybackStop": "停止播放", - "item.rate": "标记了" - } - _webhook_images = { - "emby": "https://emby.media/notificationicon.png", - "plex": "https://www.plex.tv/wp-content/uploads/2022/04/new-logo-process-lines-gray.png", - "jellyfin": "https://play-lh.googleusercontent.com/SCsUK3hCCRqkJbmLDctNYCfehLxsS4ggD1ZPHIFrrAN1Tn9yhjmGMPep2D9lMaaa9eQi" - } - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._types = config.get("types") or [] - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - types_options = [ - {"title": "新入库", "value": "library.new"}, - {"title": "开始播放", "value": "playback.start|media.play|PlaybackStart"}, - {"title": "停止播放", "value": "playback.stop|media.stop|PlaybackStop"}, - {"title": "用户标记", "value": "item.rate"}, - {"title": "测试", "value": "system.webhooktest"}, - ] - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'types', - 'label': '消息类型', - 'items': types_options - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '需要设置媒体服务器Webhook,回调相对路径为 /api/v1/webhook?token=moviepilot(3001端口),其中 moviepilot 为设置的 API_TOKEN。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "types": [] - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.WebhookMessage) - def send(self, event: Event): - """ - 发送通知消息 - """ - if not self._enabled: - return - - event_info: WebhookEventInfo = event.event_data - if not event_info: - return - - # 不在支持范围不处理 - if not self._webhook_actions.get(event_info.event): - return - - # 不在选中范围不处理 - msgflag = False - for _type in self._types: - if event_info.event in _type.split("|"): - msgflag = True - break - if not msgflag: - logger.info(f"未开启 {event_info.event} 类型的消息通知") - return - - # 消息标题 - if event_info.item_type in ["TV", "SHOW"]: - message_title = f"{self._webhook_actions.get(event_info.event)}剧集 {event_info.item_name}" - elif event_info.item_type == "MOV": - message_title = f"{self._webhook_actions.get(event_info.event)}电影 {event_info.item_name}" - elif event_info.item_type == "AUD": - message_title = f"{self._webhook_actions.get(event_info.event)}有声书 {event_info.item_name}" - else: - message_title = f"{self._webhook_actions.get(event_info.event)}" - - # 消息内容 - message_texts = [] - if event_info.user_name: - message_texts.append(f"用户:{event_info.user_name}") - if event_info.device_name: - message_texts.append(f"设备:{event_info.client} {event_info.device_name}") - if event_info.ip: - message_texts.append(f"IP地址:{event_info.ip} {WebUtils.get_location(event_info.ip)}") - if event_info.percentage: - percentage = round(float(event_info.percentage), 2) - message_texts.append(f"进度:{percentage}%") - if event_info.overview: - message_texts.append(f"剧情:{event_info.overview}") - message_texts.append(f"时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}") - - # 消息内容 - message_content = "\n".join(message_texts) - - # 消息图片 - image_url = event_info.image_url - # 查询剧集图片 - if (event_info.tmdb_id - and event_info.season_id - and event_info.episode_id): - specific_image = self.chain.obtain_specific_image( - mediaid=event_info.tmdb_id, - mtype=MediaType.TV, - image_type=MediaImageType.Backdrop, - season=event_info.season_id, - episode=event_info.episode_id - ) - if specific_image: - image_url = specific_image - # 使用默认图片 - if not image_url: - image_url = self._webhook_images.get(event_info.channel) - - # 发送消息 - self.post_message(mtype=NotificationType.MediaServer, - title=message_title, text=message_content, image=image_url) - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/mediaserverrefresh/__init__.py b/app/plugins/mediaserverrefresh/__init__.py deleted file mode 100644 index 436de708..00000000 --- a/app/plugins/mediaserverrefresh/__init__.py +++ /dev/null @@ -1,136 +0,0 @@ -from typing import Any, List, Dict, Tuple - -from app.core.config import settings -from app.core.context import MediaInfo -from app.core.event import eventmanager, Event -from app.modules.emby import Emby -from app.modules.jellyfin import Jellyfin -from app.modules.plex import Plex -from app.plugins import _PluginBase -from app.schemas import TransferInfo, RefreshMediaItem -from app.schemas.types import EventType - - -class MediaServerRefresh(_PluginBase): - # 插件名称 - plugin_name = "媒体库服务器刷新" - # 插件描述 - plugin_desc = "入库后自动刷新Emby/Jellyfin/Plex服务器海报墙。" - # 插件图标 - plugin_icon = "refresh2.png" - # 主题色 - plugin_color = "#347180" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "mediaserverrefresh_" - # 加载顺序 - plugin_order = 14 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.TransferComplete) - def refresh(self, event: Event): - """ - 发送通知消息 - """ - if not self._enabled: - return - - event_info: dict = event.event_data - if not event_info: - return - - # 刷新媒体库 - if not settings.MEDIASERVER: - return - - # 入库数据 - transferinfo: TransferInfo = event_info.get("transferinfo") - mediainfo: MediaInfo = event_info.get("mediainfo") - items = [ - RefreshMediaItem( - title=mediainfo.title, - year=mediainfo.year, - type=mediainfo.type, - category=mediainfo.category, - target_path=transferinfo.target_path - ) - ] - # Emby - if "emby" in settings.MEDIASERVER: - Emby().refresh_library_by_items(items) - - # Jeyllyfin - if "jellyfin" in settings.MEDIASERVER: - # FIXME Jellyfin未找到刷新单个项目的API - Jellyfin().refresh_root_library() - - # Plex - if "plex" in settings.MEDIASERVER: - Plex().refresh_library_by_items(items) - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/mediasyncdel/__init__.py b/app/plugins/mediasyncdel/__init__.py deleted file mode 100644 index 12f61e0e..00000000 --- a/app/plugins/mediasyncdel/__init__.py +++ /dev/null @@ -1,1395 +0,0 @@ -import datetime -import json -import os -import re -import time -from pathlib import Path -from typing import List, Tuple, Dict, Any, Optional - -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.transfer import TransferChain -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.db.models.transferhistory import TransferHistory -from app.log import logger -from app.modules.emby import Emby -from app.modules.jellyfin import Jellyfin -from app.modules.qbittorrent import Qbittorrent -from app.modules.themoviedb.tmdbv3api import Episode -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.schemas.types import NotificationType, EventType, MediaType, MediaImageType - - -class MediaSyncDel(_PluginBase): - # 插件名称 - plugin_name = "媒体文件同步删除" - # 插件描述 - plugin_desc = "同步删除历史记录、源文件和下载任务。" - # 插件图标 - plugin_icon = "mediasyncdel.png" - # 主题色 - plugin_color = "#ff1a1a" - # 插件版本 - plugin_version = "1.1" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "mediasyncdel_" - # 加载顺序 - plugin_order = 9 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - episode = None - _scheduler: Optional[BackgroundScheduler] = None - _enabled = False - _sync_type: str = "" - _cron: str = "" - _notify = False - _del_source = False - _exclude_path = None - _library_path = None - _transferchain = None - _transferhis = None - _downloadhis = None - qb = None - tr = None - - def init_plugin(self, config: dict = None): - self._transferchain = TransferChain() - self._transferhis = self._transferchain.transferhis - self._downloadhis = self._transferchain.downloadhis - self.episode = Episode() - self.qb = Qbittorrent() - self.tr = Transmission() - - # 停止现有任务 - self.stop_service() - - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._sync_type = config.get("sync_type") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._del_source = config.get("del_source") - self._exclude_path = config.get("exclude_path") - self._library_path = config.get("library_path") - - if self._enabled and str(self._sync_type) == "log": - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - try: - self._scheduler.add_job(func=self.sync_del_by_log, - trigger=CronTrigger.from_crontab(self._cron), - name="媒体库同步删除") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - else: - self._scheduler.add_job(self.sync_del_by_log, "interval", minutes=30, name="媒体库同步删除") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'del_source', - 'label': '删除源文件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'sync_type', - 'label': '媒体库同步方式', - 'items': [ - {'title': 'Webhook', 'value': 'webhook'}, - {'title': '日志', 'value': 'log'}, - {'title': 'Scripter X', 'value': 'plugin'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '日志检查周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'exclude_path', - 'label': '排除路径' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'library_path', - 'rows': '2', - 'label': '媒体库路径映射', - 'placeholder': '媒体服务器路径:MoviePilot路径(一行一个)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '媒体库同步方式分为Webhook、日志同步和Scripter X:' - '1、Webhook需要Emby4.8.0.45及以上开启媒体删除的Webhook。' - '2、日志同步需要配置检查周期,默认30分钟执行一次。' - '3、Scripter X方式需要emby安装并配置Scripter X插件,无需配置执行周期。' - '4、启用该插件后,非媒体服务器触发的源文件删除,也会同步处理下载器中的下载任务。' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '关于路径映射(转移后文件):' - 'emby:/data/series/A.mp4,' - 'moviepilot:/mnt/link/series/A.mp4。' - '路径映射填/data:/mnt/link。' - '不正确配置会导致查询不到转移记录!' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '排除路径:命中排除路径后请求云盘删除插件删除云盘资源。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "del_source": False, - "library_path": "", - "sync_type": "webhook", - "cron": "*/30 * * * *", - "exclude_path": "", - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('del_time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - htype = history.get("type") - title = history.get("title") - year = history.get("year") - season = history.get("season") - episode = history.get("episode") - image = history.get("image") - del_time = history.get("del_time") - - if season: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'年份:{year}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'季:{season}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'集:{episode}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - else: - sub_contents = [ - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{htype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'标题:{title}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'年份:{year}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{del_time}' - } - ] - - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': image, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': sub_contents - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - @eventmanager.register(EventType.WebhookMessage) - def sync_del_by_webhook(self, event: Event): - """ - emby删除媒体库同步删除历史记录 - webhook - """ - if not self._enabled or str(self._sync_type) != "webhook": - return - - event_data = event.event_data - event_type = event_data.event - - # Emby Webhook event_type = library.deleted - if not event_type or str(event_type) != 'library.deleted': - return - - # 媒体类型 - media_type = event_data.media_type - # 媒体名称 - media_name = event_data.item_name - # 媒体路径 - media_path = event_data.item_path - # tmdb_id - tmdb_id = event_data.tmdb_id - # 季数 - season_num = event_data.season_id - # 集数 - episode_num = event_data.episode_id - - # 兼容emby webhook season删除没有发送tmdbid - if not tmdb_id and str(media_type) != 'Season': - logger.error(f"{media_name} 同步删除失败,未获取到TMDB ID,请检查媒体库媒体是否刮削") - return - - self.__sync_del(media_type=media_type, - media_name=media_name, - media_path=media_path, - tmdb_id=tmdb_id, - season_num=season_num, - episode_num=episode_num) - - @eventmanager.register(EventType.WebhookMessage) - def sync_del_by_plugin(self, event): - """ - emby删除媒体库同步删除历史记录 - Scripter X插件 - """ - if not self._enabled or str(self._sync_type) != "plugin": - return - - event_data = event.event_data - event_type = event_data.event - - # Scripter X插件 event_type = media_del - if not event_type or str(event_type) != 'media_del': - return - - # Scripter X插件 需要是否虚拟标识 - item_isvirtual = event_data.item_isvirtual - if not item_isvirtual: - logger.error("Scripter X插件方式,item_isvirtual参数未配置,为防止误删除,暂停插件运行") - self.update_config({ - "enabled": False, - "del_source": self._del_source, - "exclude_path": self._exclude_path, - "library_path": self._library_path, - "notify": self._notify, - "cron": self._cron, - "sync_type": self._sync_type, - }) - return - - # 如果是虚拟item,则直接return,不进行删除 - if item_isvirtual == 'True': - return - - # 媒体类型 - media_type = event_data.item_type - # 媒体名称 - media_name = event_data.item_name - # 媒体路径 - media_path = event_data.item_path - # tmdb_id - tmdb_id = event_data.tmdb_id - # 季数 - season_num = event_data.season_id - # 集数 - episode_num = event_data.episode_id - - if not tmdb_id or not str(tmdb_id).isdigit(): - logger.error(f"{media_name} 同步删除失败,未获取到TMDB ID,请检查媒体库媒体是否刮削") - return - - self.__sync_del(media_type=media_type, - media_name=media_name, - media_path=media_path, - tmdb_id=tmdb_id, - season_num=season_num, - episode_num=episode_num) - - def __sync_del(self, media_type: str, media_name: str, media_path: str, - tmdb_id: int, season_num: str, episode_num: str): - """ - 执行删除逻辑 - """ - if self._exclude_path and media_path and any( - os.path.abspath(media_path).startswith(os.path.abspath(path)) for path in - self._exclude_path.split(",")): - logger.info(f"媒体路径 {media_path} 已被排除,暂不处理") - # 发送消息通知网盘删除插件删除网盘资源 - self.eventmanager.send_event(EventType.NetworkDiskDel, - { - "media_path": media_path, - "media_name": media_name, - "tmdb_id": tmdb_id, - "media_type": media_type, - "season_num": season_num, - "episode_num": episode_num, - }) - return - - if not media_type: - logger.error(f"{media_name} 同步删除失败,未获取到媒体类型,请检查媒体是否刮削") - return - - # 查询转移记录 - msg, transfer_history = self.__get_transfer_his(media_type=media_type, - media_name=media_name, - media_path=media_path, - tmdb_id=tmdb_id, - season_num=season_num, - episode_num=episode_num) - - logger.info(f"正在同步删除{msg}") - - if not transfer_history: - logger.warn( - f"{media_type} {media_name} 未获取到可删除数据,请检查路径映射是否配置错误,请检查tmdbid获取是否正确") - return - - # 开始删除 - year = None - del_torrent_hashs = [] - stop_torrent_hashs = [] - error_cnt = 0 - image = 'https://emby.media/notificationicon.png' - for transferhis in transfer_history: - title = transferhis.title - if title not in media_name: - logger.warn( - f"当前转移记录 {transferhis.id} {title} {transferhis.tmdbid} 与删除媒体{media_name}不符,防误删,暂不自动删除") - continue - image = transferhis.image or image - year = transferhis.year - - # 0、删除转移记录 - self._transferhis.delete(transferhis.id) - - # 删除种子任务 - if self._del_source: - # 1、直接删除源文件 - if transferhis.src and Path(transferhis.src).suffix in settings.RMT_MEDIAEXT: - self._transferchain.delete_files(Path(transferhis.src)) - if transferhis.download_hash: - try: - # 2、判断种子是否被删除完 - delete_flag, success_flag, handle_torrent_hashs = self.handle_torrent( - src=transferhis.src, - torrent_hash=transferhis.download_hash) - if not success_flag: - error_cnt += 1 - else: - if delete_flag: - del_torrent_hashs += handle_torrent_hashs - else: - stop_torrent_hashs += handle_torrent_hashs - except Exception as e: - logger.error("删除种子失败:%s" % str(e)) - - logger.info(f"同步删除 {msg} 完成!") - - media_type = MediaType.MOVIE if media_type in ["Movie", "MOV"] else MediaType.TV - - # 发送消息 - if self._notify: - backrop_image = self.chain.obtain_specific_image( - mediaid=tmdb_id, - mtype=media_type, - image_type=MediaImageType.Backdrop, - season=season_num, - episode=episode_num - ) or image - - torrent_cnt_msg = "" - if del_torrent_hashs: - torrent_cnt_msg += f"删除种子{len(set(del_torrent_hashs))}个\n" - if stop_torrent_hashs: - stop_cnt = 0 - # 排除已删除 - for stop_hash in set(stop_torrent_hashs): - if stop_hash not in set(del_torrent_hashs): - stop_cnt += 1 - if stop_cnt > 0: - torrent_cnt_msg += f"暂停种子{stop_cnt}个\n" - if error_cnt: - torrent_cnt_msg += f"删种失败{error_cnt}个\n" - # 发送通知 - self.post_message( - mtype=NotificationType.MediaServer, - title="媒体库同步删除任务完成", - image=backrop_image, - text=f"{msg}\n" - f"删除记录{len(transfer_history)}个\n" - f"{torrent_cnt_msg}" - f"时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}" - ) - - # 读取历史记录 - history = self.get_data('history') or [] - - # 获取poster - poster_image = self.chain.obtain_specific_image( - mediaid=tmdb_id, - mtype=media_type, - image_type=MediaImageType.Poster, - ) or image - history.append({ - "type": media_type.value, - "title": media_name, - "year": year, - "path": media_path, - "season": season_num if season_num and str(season_num).isdigit() else None, - "episode": episode_num if episode_num and str(episode_num).isdigit() else None, - "image": poster_image, - "del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) - }) - - # 保存历史 - self.save_data("history", history) - - def __get_transfer_his(self, media_type: str, media_name: str, media_path: str, - tmdb_id: int, season_num: str, episode_num: str): - """ - 查询转移记录 - """ - # 季数 - if season_num and str(season_num).isdigit(): - season_num = str(season_num).rjust(2, '0') - else: - season_num = None - # 集数 - if episode_num and str(episode_num).isdigit(): - episode_num = str(episode_num).rjust(2, '0') - else: - episode_num = None - - # 类型 - mtype = MediaType.MOVIE if media_type in ["Movie", "MOV"] else MediaType.TV - - # 处理路径映射 (处理同一媒体多分辨率的情况) - if self._library_path: - paths = self._library_path.split("\n") - for path in paths: - sub_paths = path.split(":") - if len(sub_paths) < 2: - continue - media_path = media_path.replace(sub_paths[0], sub_paths[1]).replace('\\', '/') - - # 删除电影 - if mtype == MediaType.MOVIE: - msg = f'电影 {media_name} {tmdb_id}' - transfer_history: List[TransferHistory] = self._transferhis.get_by(tmdbid=tmdb_id, - mtype=mtype.value, - dest=media_path) - # 删除电视剧 - elif mtype == MediaType.TV and not season_num and not episode_num: - msg = f'剧集 {media_name} {tmdb_id}' - transfer_history: List[TransferHistory] = self._transferhis.get_by(tmdbid=tmdb_id, - mtype=mtype.value) - # 删除季 S02 - elif mtype == MediaType.TV and season_num and not episode_num: - if not season_num or not str(season_num).isdigit(): - logger.error(f"{media_name} 季同步删除失败,未获取到具体季") - return - msg = f'剧集 {media_name} S{season_num} {tmdb_id}' - if tmdb_id and str(tmdb_id).isdigit(): - # 根据tmdb_id查询转移记录 - transfer_history: List[TransferHistory] = self._transferhis.get_by(tmdbid=tmdb_id, - mtype=mtype.value, - season=f'S{season_num}') - else: - # 兼容emby webhook不发送tmdb场景 - transfer_history: List[TransferHistory] = self._transferhis.get_by(mtype=mtype.value, - season=f'S{season_num}', - dest=media_path) - # 删除剧集S02E02 - elif mtype == MediaType.TV and season_num and episode_num: - if not season_num or not str(season_num).isdigit() or not episode_num or not str(episode_num).isdigit(): - logger.error(f"{media_name} 集同步删除失败,未获取到具体集") - return - msg = f'剧集 {media_name} S{season_num}E{episode_num} {tmdb_id}' - transfer_history: List[TransferHistory] = self._transferhis.get_by(tmdbid=tmdb_id, - mtype=mtype.value, - season=f'S{season_num}', - episode=f'E{episode_num}', - dest=media_path) - else: - return "", [] - - return msg, transfer_history - - def sync_del_by_log(self): - """ - emby删除媒体库同步删除历史记录 - 日志方式 - """ - # 读取历史记录 - history = self.get_data('history') or [] - last_time = self.get_data("last_time") - del_medias = [] - - # 媒体服务器类型,多个以,分隔 - if not settings.MEDIASERVER: - return - media_servers = settings.MEDIASERVER.split(',') - for media_server in media_servers: - if media_server == 'emby': - del_medias.extend(self.parse_emby_log(last_time)) - elif media_server == 'jellyfin': - del_medias.extend(self.parse_jellyfin_log(last_time)) - elif media_server == 'plex': - # TODO plex解析日志 - return - - if not del_medias: - logger.error("未解析到已删除媒体信息") - return - - # 遍历删除 - last_del_time = None - for del_media in del_medias: - # 删除时间 - del_time = del_media.get("time") - last_del_time = del_time - # 媒体类型 Movie|Series|Season|Episode - media_type = del_media.get("type") - # 媒体名称 蜀山战纪 - media_name = del_media.get("name") - # 媒体年份 2015 - media_year = del_media.get("year") - # 媒体路径 /data/series/国产剧/蜀山战纪 (2015)/Season 2/蜀山战纪 - S02E01 - 第1集.mp4 - media_path = del_media.get("path") - # 季数 S02 - media_season = del_media.get("season") - # 集数 E02 - media_episode = del_media.get("episode") - - # 排除路径不处理 - if self._exclude_path and media_path and any( - os.path.abspath(media_path).startswith(os.path.abspath(path)) for path in - self._exclude_path.split(",")): - logger.info(f"媒体路径 {media_path} 已被排除,暂不处理") - self.save_data("last_time", last_del_time or datetime.datetime.now()) - return - - # 处理路径映射 (处理同一媒体多分辨率的情况) - if self._library_path: - paths = self._library_path.split("\n") - for path in paths: - sub_paths = path.split(":") - if len(sub_paths) < 2: - continue - media_path = media_path.replace(sub_paths[0], sub_paths[1]).replace('\\', '/') - - # 获取删除的记录 - # 删除电影 - if media_type == "Movie": - msg = f'电影 {media_name}' - transfer_history: List[TransferHistory] = self._transferhis.get_by( - title=media_name, - year=media_year, - dest=media_path) - # 删除电视剧 - elif media_type == "Series": - msg = f'剧集 {media_name}' - transfer_history: List[TransferHistory] = self._transferhis.get_by( - title=media_name, - year=media_year) - # 删除季 S02 - elif media_type == "Season": - msg = f'剧集 {media_name} {media_season}' - transfer_history: List[TransferHistory] = self._transferhis.get_by( - title=media_name, - year=media_year, - season=media_season) - # 删除剧集S02E02 - elif media_type == "Episode": - msg = f'剧集 {media_name} {media_season}{media_episode}' - transfer_history: List[TransferHistory] = self._transferhis.get_by( - title=media_name, - year=media_year, - season=media_season, - episode=media_episode, - dest=media_path) - else: - self.save_data("last_time", last_del_time or datetime.datetime.now()) - continue - - logger.info(f"正在同步删除 {msg}") - - if not transfer_history: - logger.info(f"未获取到 {msg} 转移记录,请检查路径映射是否配置错误,请检查tmdbid获取是否正确") - self.save_data("last_time", last_del_time or datetime.datetime.now()) - continue - - logger.info(f"获取到删除历史记录数量 {len(transfer_history)}") - - # 开始删除 - image = 'https://emby.media/notificationicon.png' - del_torrent_hashs = [] - stop_torrent_hashs = [] - error_cnt = 0 - for transferhis in transfer_history: - title = transferhis.title - if title not in media_name: - logger.warn( - f"当前转移记录 {transferhis.id} {title} {transferhis.tmdbid} 与删除媒体{media_name}不符,防误删,暂不自动删除") - self.save_data("last_time", last_del_time or datetime.datetime.now()) - continue - image = transferhis.image or image - # 0、删除转移记录 - self._transferhis.delete(transferhis.id) - - # 删除种子任务 - if self._del_source: - # 1、直接删除源文件 - if transferhis.src and Path(transferhis.src).suffix in settings.RMT_MEDIAEXT: - self._transferchain.delete_files(Path(transferhis.src)) - if transferhis.download_hash: - try: - # 2、判断种子是否被删除完 - delete_flag, success_flag, handle_torrent_hashs = self.handle_torrent( - src=transferhis.src, - torrent_hash=transferhis.download_hash) - if not success_flag: - error_cnt += 1 - else: - if delete_flag: - del_torrent_hashs += handle_torrent_hashs - else: - stop_torrent_hashs += handle_torrent_hashs - except Exception as e: - logger.error("删除种子失败:%s" % str(e)) - - logger.info(f"同步删除 {msg} 完成!") - - # 发送消息 - if self._notify: - torrent_cnt_msg = "" - if del_torrent_hashs: - torrent_cnt_msg += f"删除种子{len(set(del_torrent_hashs))}个\n" - if stop_torrent_hashs: - stop_cnt = 0 - # 排除已删除 - for stop_hash in set(stop_torrent_hashs): - if stop_hash not in set(del_torrent_hashs): - stop_cnt += 1 - if stop_cnt > 0: - torrent_cnt_msg += f"暂停种子{stop_cnt}个\n" - self.post_message( - mtype=NotificationType.MediaServer, - title="媒体库同步删除任务完成", - text=f"{msg}\n" - f"删除记录{len(transfer_history)}个\n" - f"{torrent_cnt_msg}" - f"时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}", - image=image) - - history.append({ - "type": "电影" if media_type == "Movie" else "电视剧", - "title": media_name, - "year": media_year, - "path": media_path, - "season": media_season, - "episode": media_episode, - "image": image, - "del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) - }) - - # 保存历史 - self.save_data("history", history) - - self.save_data("last_time", last_del_time or datetime.datetime.now()) - - def handle_torrent(self, src: str, torrent_hash: str): - """ - 判断种子是否局部删除 - 局部删除则暂停种子 - 全部删除则删除种子 - """ - download_id = torrent_hash - download = settings.DOWNLOADER - history_key = "%s-%s" % (download, torrent_hash) - plugin_id = "TorrentTransfer" - transfer_history = self.get_data(key=history_key, - plugin_id=plugin_id) - logger.info(f"查询到 {history_key} 转种历史 {transfer_history}") - - handle_torrent_hashs = [] - try: - # 删除本次种子记录 - self._downloadhis.delete_file_by_fullpath(fullpath=src) - - # 根据种子hash查询所有下载器文件记录 - download_files = self._downloadhis.get_files_by_hash(download_hash=torrent_hash) - if not download_files: - logger.error( - f"未查询到种子任务 {torrent_hash} 存在文件记录,未执行下载器文件同步或该种子已被删除") - return False, False, 0 - - # 查询未删除数 - no_del_cnt = 0 - for download_file in download_files: - if download_file and download_file.state and int(download_file.state) == 1: - no_del_cnt += 1 - - if no_del_cnt > 0: - logger.info( - f"查询种子任务 {torrent_hash} 存在 {no_del_cnt} 个未删除文件,执行暂停种子操作") - delete_flag = False - else: - logger.info( - f"查询种子任务 {torrent_hash} 文件已全部删除,执行删除种子操作") - delete_flag = True - - # 如果有转种记录,则删除转种后的下载任务 - if transfer_history and isinstance(transfer_history, dict): - download = transfer_history['to_download'] - download_id = transfer_history['to_download_id'] - delete_source = transfer_history['delete_source'] - - # 删除种子 - if delete_flag: - # 删除转种记录 - self.del_data(key=history_key, plugin_id=plugin_id) - - # 转种后未删除源种时,同步删除源种 - if not delete_source: - logger.info(f"{history_key} 转种时未删除源下载任务,开始删除源下载任务…") - - # 删除源种子 - logger.info(f"删除源下载器下载任务:{settings.DOWNLOADER} - {torrent_hash}") - self.chain.remove_torrents(torrent_hash) - handle_torrent_hashs.append(torrent_hash) - - # 删除转种后任务 - logger.info(f"删除转种后下载任务:{download} - {download_id}") - # 删除转种后下载任务 - if download == "transmission": - self.tr.delete_torrents(delete_file=True, - ids=download_id) - else: - self.qb.delete_torrents(delete_file=True, - ids=download_id) - handle_torrent_hashs.append(download_id) - else: - # 暂停种子 - # 转种后未删除源种时,同步暂停源种 - if not delete_source: - logger.info(f"{history_key} 转种时未删除源下载任务,开始暂停源下载任务…") - - # 暂停源种子 - logger.info(f"暂停源下载器下载任务:{settings.DOWNLOADER} - {torrent_hash}") - self.chain.stop_torrents(torrent_hash) - handle_torrent_hashs.append(torrent_hash) - - logger.info(f"暂停转种后下载任务:{download} - {download_id}") - # 删除转种后下载任务 - if download == "transmission": - self.tr.stop_torrents(ids=download_id) - else: - self.qb.stop_torrents(ids=download_id) - handle_torrent_hashs.append(download_id) - else: - # 未转种de情况 - if delete_flag: - # 删除源种子 - logger.info(f"删除源下载器下载任务:{download} - {download_id}") - self.chain.remove_torrents(download_id) - else: - # 暂停源种子 - logger.info(f"暂停源下载器下载任务:{download} - {download_id}") - self.chain.stop_torrents(download_id) - handle_torrent_hashs.append(download_id) - - # 处理辅种 - handle_cnt = self.__del_seed(download=download, - download_id=download_id, - action_flag="del" if delete_flag else 'stop', - handle_torrent_hashs=handle_torrent_hashs) - - return delete_flag, True, handle_cnt - except Exception as e: - logger.error(f"删种失败: {str(e)}") - return False, False, 0 - - def __del_seed(self, download, download_id, action_flag, handle_torrent_hashs): - """ - 删除辅种 - """ - # 查询是否有辅种记录 - history_key = download_id - plugin_id = "IYUUAutoSeed" - seed_history = self.get_data(key=history_key, - plugin_id=plugin_id) or [] - logger.info(f"查询到 {history_key} 辅种历史 {seed_history}") - - # 有辅种记录则处理辅种 - if seed_history and isinstance(seed_history, list): - for history in seed_history: - downloader = history['downloader'] - torrents = history['torrents'] - if not downloader or not torrents: - return - if not isinstance(torrents, list): - torrents = [torrents] - - # 删除辅种历史 - for torrent in torrents: - handle_torrent_hashs.append(torrent) - if str(download) == "qbittorrent": - # 删除辅种 - if action_flag == "del": - logger.info(f"删除辅种:{downloader} - {torrent}") - self.qb.delete_torrents(delete_file=True, - ids=torrent) - # 暂停辅种 - if action_flag == "stop": - self.qb.stop_torrents(torrent) - logger.info(f"辅种:{downloader} - {torrent} 暂停") - else: - # 删除辅种 - if action_flag == "del": - logger.info(f"删除辅种:{downloader} - {torrent}") - self.tr.delete_torrents(delete_file=True, - ids=torrent) - # 暂停辅种 - if action_flag == "stop": - self.tr.stop_torrents(torrent) - logger.info(f"辅种:{downloader} - {torrent} 暂停") - break - - # 删除辅种历史 - if action_flag == "del": - self.del_data(key=history_key, - plugin_id=plugin_id) - return handle_torrent_hashs - - @staticmethod - def parse_emby_log(last_time): - """ - 获取emby日志列表、解析emby日志 - """ - - def __parse_log(file_name: str, del_list: list): - """ - 解析emby日志 - """ - log_url = f"[HOST]System/Logs/{file_name}?api_key=[APIKEY]" - log_res = Emby().get_data(log_url) - if not log_res or log_res.status_code != 200: - logger.error("获取emby日志失败,请检查服务器配置") - return del_list - - # 正则解析删除的媒体信息 - pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}) Info App: Removing item from database, Type: (\w+), Name: (.*), Path: (.*), Id: (\d+)' - matches = re.findall(pattern, log_res.text) - - # 循环获取媒体信息 - for match in matches: - mtime = match[0] - # 排除已处理的媒体信息 - if last_time and mtime < last_time: - continue - - mtype = match[1] - name = match[2] - path = match[3] - - year = None - year_pattern = r'\(\d+\)' - year_match = re.search(year_pattern, path) - if year_match: - year = year_match.group()[1:-1] - - season = None - episode = None - if mtype == 'Episode' or mtype == 'Season': - name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()" - season_pattern = r"Season\s*(\d+)" - episode_pattern = r"S\d+E(\d+)" - name_match = re.search(name_pattern, path) - season_match = re.search(season_pattern, path) - episode_match = re.search(episode_pattern, path) - - if name_match: - name = name_match.group(1) - - if season_match: - season = season_match.group(1) - if int(season) < 10: - season = f'S0{season}' - else: - season = f'S{season}' - else: - season = None - - if episode_match: - episode = episode_match.group(1) - episode = f'E{episode}' - else: - episode = None - - media = { - "time": mtime, - "type": mtype, - "name": name, - "year": year, - "path": path, - "season": season, - "episode": episode, - } - logger.debug(f"解析到删除媒体:{json.dumps(media)}") - del_list.append(media) - - return del_list - - log_files = [] - try: - # 获取所有emby日志 - log_list_url = "[HOST]System/Logs/Query?Limit=3&api_key=[APIKEY]" - log_list_res = Emby().get_data(log_list_url) - - if log_list_res and log_list_res.status_code == 200: - log_files_dict = json.loads(log_list_res.text) - for item in log_files_dict.get("Items"): - if str(item.get('Name')).startswith("embyserver"): - log_files.append(str(item.get('Name'))) - except Exception as e: - print(str(e)) - - if not log_files: - log_files.append("embyserver.txt") - - del_medias = [] - log_files.reverse() - for log_file in log_files: - del_medias = __parse_log(log_file, del_medias) - - return del_medias - - @staticmethod - def parse_jellyfin_log(last_time: datetime): - """ - 获取jellyfin日志列表、解析jellyfin日志 - """ - - def __parse_log(file_name: str, del_list: list): - """ - 解析jellyfin日志 - """ - log_url = f"[HOST]System/Logs/Log?name={file_name}&api_key=[APIKEY]" - log_res = Jellyfin().get_data(log_url) - if not log_res or log_res.status_code != 200: - logger.error("获取jellyfin日志失败,请检查服务器配置") - return del_list - - # 正则解析删除的媒体信息 - pattern = r'\[(.*?)\].*?Removing item, Type: "(.*?)", Name: "(.*?)", Path: "(.*?)"' - matches = re.findall(pattern, log_res.text) - - # 循环获取媒体信息 - for match in matches: - mtime = match[0] - # 排除已处理的媒体信息 - if last_time and mtime < last_time: - continue - - mtype = match[1] - name = match[2] - path = match[3] - - year = None - year_pattern = r'\(\d+\)' - year_match = re.search(year_pattern, path) - if year_match: - year = year_match.group()[1:-1] - - season = None - episode = None - if mtype == 'Episode' or mtype == 'Season': - name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()" - season_pattern = r"Season\s*(\d+)" - episode_pattern = r"S\d+E(\d+)" - name_match = re.search(name_pattern, path) - season_match = re.search(season_pattern, path) - episode_match = re.search(episode_pattern, path) - - if name_match: - name = name_match.group(1) - - if season_match: - season = season_match.group(1) - if int(season) < 10: - season = f'S0{season}' - else: - season = f'S{season}' - else: - season = None - - if episode_match: - episode = episode_match.group(1) - episode = f'E{episode}' - else: - episode = None - - media = { - "time": mtime, - "type": mtype, - "name": name, - "year": year, - "path": path, - "season": season, - "episode": episode, - } - logger.debug(f"解析到删除媒体:{json.dumps(media)}") - del_list.append(media) - - return del_list - - log_files = [] - try: - # 获取所有jellyfin日志 - log_list_url = "[HOST]System/Logs?api_key=[APIKEY]" - log_list_res = Jellyfin().get_data(log_list_url) - - if log_list_res and log_list_res.status_code == 200: - log_files_dict = json.loads(log_list_res.text) - for item in log_files_dict: - if str(item.get('Name')).startswith("log_"): - log_files.append(str(item.get('Name'))) - except Exception as e: - print(str(e)) - - if not log_files: - log_files.append("log_%s.log" % datetime.date.today().strftime("%Y%m%d")) - - del_medias = [] - log_files.reverse() - for log_file in log_files: - del_medias = __parse_log(log_file, del_medias) - - return del_medias - - def get_state(self): - return self._enabled - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - @eventmanager.register(EventType.DownloadFileDeleted) - def downloadfile_del_sync(self, event: Event): - """ - 下载文件删除处理事件 - """ - if not event: - return - event_data = event.event_data - src = event_data.get("src") - if not src: - return - # 查询下载hash - download_hash = self._downloadhis.get_hash_by_fullpath(src) - if download_hash: - self.handle_torrent(src=src, torrent_hash=download_hash) - else: - logger.warn(f"未查询到文件 {src} 对应的下载记录") - - @staticmethod - def get_tmdbimage_url(path: str, prefix="w500"): - if not path: - return "" - tmdb_image_url = f"https://{settings.TMDB_IMAGE_DOMAIN}" - return tmdb_image_url + f"/t/p/{prefix}{path}" diff --git a/app/plugins/messageforward/__init__.py b/app/plugins/messageforward/__init__.py deleted file mode 100644 index 4351dd37..00000000 --- a/app/plugins/messageforward/__init__.py +++ /dev/null @@ -1,387 +0,0 @@ -import json -import re -from datetime import datetime - -from app.core.config import settings -from app.plugins import _PluginBase -from app.core.event import eventmanager -from app.schemas.types import EventType, MessageChannel -from app.utils.http import RequestUtils -from typing import Any, List, Dict, Tuple, Optional -from app.log import logger - - -class MessageForward(_PluginBase): - # 插件名称 - plugin_name = "消息转发" - # 插件描述 - plugin_desc = "根据正则转发通知到其他WeChat应用。" - # 插件图标 - plugin_icon = "forward.png" - # 主题色 - plugin_color = "#32ABD1" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "messageforward_" - # 加载顺序 - plugin_order = 16 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - _wechat = None - _pattern = None - _pattern_token = {} - - # 企业微信发送消息URL - _send_msg_url = f"{settings.WECHAT_PROXY}/cgi-bin/message/send?access_token=%s" - # 企业微信获取TokenURL - _token_url = f"{settings.WECHAT_PROXY}/cgi-bin/gettoken?corpid=%s&corpsecret=%s" - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._wechat = config.get("wechat") - self._pattern = config.get("pattern") - - # 获取token存库 - if self._enabled and self._wechat: - self.__save_wechat_token() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '开启转发' - } - } - ] - }, - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'wechat', - 'rows': '3', - 'label': '应用配置', - 'placeholder': 'appid:corpid:appsecret(一行一个配置)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'pattern', - 'rows': '3', - 'label': '正则配置', - 'placeholder': '对应上方应用配置,一行一个,一一对应' - } - } - ] - } - ] - }, - ] - } - ], { - "enabled": False, - "wechat": "", - "pattern": "" - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.NoticeMessage) - def send(self, event): - """ - 消息转发 - """ - if not self._enabled: - return - - # 消息体 - data = event.event_data - channel = data['channel'] - if channel and channel != MessageChannel.Wechat: - return - - title = data['title'] - text = data['text'] - image = data['image'] - userid = data['userid'] - - # 正则匹配 - patterns = self._pattern.split("\n") - for index, pattern in enumerate(patterns): - msg_match = re.search(pattern, title) - if msg_match: - access_token, appid = self.__flush_access_token(index) - if not access_token: - logger.error("未获取到有效token,请检查配置") - continue - - # 发送消息 - if image: - self.__send_image_message(title, text, image, userid, access_token, appid, index) - else: - self.__send_message(title, text, userid, access_token, appid, index) - - def __save_wechat_token(self): - """ - 获取并存储wechat token - """ - # 解析配置 - wechats = self._wechat.split("\n") - for index, wechat in enumerate(wechats): - wechat_config = wechat.split(":") - if len(wechat_config) != 3: - logger.error(f"{wechat} 应用配置不正确") - continue - appid = wechat_config[0] - corpid = wechat_config[1] - appsecret = wechat_config[2] - - # 已过期,重新获取token - access_token, expires_in, access_token_time = self.__get_access_token(corpid=corpid, - appsecret=appsecret) - if not access_token: - # 没有token,获取token - logger.error(f"wechat配置 appid = {appid} 获取token失败,请检查配置") - continue - - self._pattern_token[index] = { - "appid": appid, - "corpid": corpid, - "appsecret": appsecret, - "access_token": access_token, - "expires_in": expires_in, - "access_token_time": access_token_time, - } - - def __flush_access_token(self, index: int, force: bool = False): - """ - 获取第i个配置wechat token - """ - wechat_token = self._pattern_token[index] - if not wechat_token: - logger.error(f"未获取到第 {index} 条正则对应的wechat应用token,请检查配置") - return None - access_token = wechat_token['access_token'] - expires_in = wechat_token['expires_in'] - access_token_time = wechat_token['access_token_time'] - appid = wechat_token['appid'] - corpid = wechat_token['corpid'] - appsecret = wechat_token['appsecret'] - - # 判断token有效期 - if force or (datetime.now() - access_token_time).seconds >= expires_in: - # 重新获取token - access_token, expires_in, access_token_time = self.__get_access_token(corpid=corpid, - appsecret=appsecret) - if not access_token: - logger.error(f"wechat配置 appid = {appid} 获取token失败,请检查配置") - return None, None - - self._pattern_token[index] = { - "appid": appid, - "corpid": corpid, - "appsecret": appsecret, - "access_token": access_token, - "expires_in": expires_in, - "access_token_time": access_token_time, - } - return access_token, appid - - def __send_message(self, title: str, text: str = None, userid: str = None, access_token: str = None, - appid: str = None, index: int = None) -> Optional[bool]: - """ - 发送文本消息 - :param title: 消息标题 - :param text: 消息内容 - :param userid: 消息发送对象的ID,为空则发给所有人 - :return: 发送状态,错误信息 - """ - if text: - conent = "%s\n%s" % (title, text.replace("\n\n", "\n")) - else: - conent = title - - if not userid: - userid = "@all" - req_json = { - "touser": userid, - "msgtype": "text", - "agentid": appid, - "text": { - "content": conent - }, - "safe": 0, - "enable_id_trans": 0, - "enable_duplicate_check": 0 - } - return self.__post_request(access_token=access_token, req_json=req_json, index=index, title=title) - - def __send_image_message(self, title: str, text: str, image_url: str, userid: str = None, - access_token: str = None, appid: str = None, index: int = None) -> Optional[bool]: - """ - 发送图文消息 - :param title: 消息标题 - :param text: 消息内容 - :param image_url: 图片地址 - :param userid: 消息发送对象的ID,为空则发给所有人 - :return: 发送状态,错误信息 - """ - if text: - text = text.replace("\n\n", "\n") - if not userid: - userid = "@all" - req_json = { - "touser": userid, - "msgtype": "news", - "agentid": appid, - "news": { - "articles": [ - { - "title": title, - "description": text, - "picurl": image_url, - "url": '' - } - ] - } - } - return self.__post_request(access_token=access_token, req_json=req_json, index=index, title=title) - - def __post_request(self, access_token: str, req_json: dict, index: int, title: str, retry: int = 0) -> bool: - message_url = self._send_msg_url % access_token - """ - 向微信发送请求 - """ - try: - res = RequestUtils(content_type='application/json').post( - message_url, - data=json.dumps(req_json, ensure_ascii=False).encode('utf-8') - ) - if res and res.status_code == 200: - ret_json = res.json() - if ret_json.get('errcode') == 0: - logger.info(f"转发消息 {title} 成功") - return True - else: - if ret_json.get('errcode') == 81013: - return False - - logger.error(f"转发消息 {title} 失败,错误信息:{ret_json}") - if ret_json.get('errcode') == 42001 or ret_json.get('errcode') == 40014: - logger.info("token已过期,正在重新刷新token重试") - # 重新获取token - access_token, appid = self.__flush_access_token(index=index, - force=True) - if access_token: - retry += 1 - # 重发请求 - if retry <= 3: - return self.__post_request(access_token=access_token, - req_json=req_json, - index=index, - title=title, - retry=retry) - return False - elif res is not None: - logger.error(f"转发消息 {title} 失败,错误码:{res.status_code},错误原因:{res.reason}") - return False - else: - logger.error(f"转发消息 {title} 失败,未获取到返回信息") - return False - except Exception as err: - logger.error(f"转发消息 {title} 异常,错误信息:{str(err)}") - return False - - def __get_access_token(self, corpid: str, appsecret: str): - """ - 获取微信Token - :return: 微信Token - """ - try: - token_url = self._token_url % (corpid, appsecret) - res = RequestUtils().get_res(token_url) - if res: - ret_json = res.json() - if ret_json.get('errcode') == 0: - access_token = ret_json.get('access_token') - expires_in = ret_json.get('expires_in') - access_token_time = datetime.now() - - return access_token, expires_in, access_token_time - else: - logger.error(f"{ret_json.get('errmsg')}") - return None, None, None - else: - logger.error(f"{corpid} {appsecret} 获取token失败") - return None, None, None - except Exception as e: - logger.error(f"获取微信access_token失败,错误信息:{str(e)}") - return None, None, None - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/moviepilotupdatenotify/__init__.py b/app/plugins/moviepilotupdatenotify/__init__.py deleted file mode 100644 index a3abfed9..00000000 --- a/app/plugins/moviepilotupdatenotify/__init__.py +++ /dev/null @@ -1,267 +0,0 @@ -import datetime - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.system import SystemChain -from app.core.config import settings -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple, Optional -from app.log import logger -from app.schemas import NotificationType -from app.utils.http import RequestUtils -from app.utils.system import SystemUtils - - -class MoviePilotUpdateNotify(_PluginBase): - # 插件名称 - plugin_name = "MoviePilot更新推送" - # 插件描述 - plugin_desc = "MoviePilot推送release更新通知、自动重启。" - # 插件图标 - plugin_icon = "update.png" - # 主题色 - plugin_color = "#4179F4" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "moviepilotupdatenotify_" - # 加载顺序 - plugin_order = 25 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _cron = None - _restart = False - _notify = False - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._restart = config.get("restart") - self._notify = config.get("notify") - - # 加载模块 - if self._enabled: - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - if self._cron: - try: - self._scheduler.add_job(func=self.__check_update, - trigger=CronTrigger.from_crontab(self._cron), - name="检查MoviePilot更新") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def __check_update(self): - """ - 检查MoviePilot更新 - """ - release_version, description, update_time = self.__get_release_version() - if not release_version: - logger.error("最新版本获取失败,停止运行") - return - - # 本地版本 - local_version = SystemChain().get_local_version() - if local_version and release_version <= local_version: - logger.info(f"当前版本:{local_version} 远程版本:{release_version} 停止运行") - return - - # 推送更新消息 - if self._notify: - # 将时间字符串转为datetime对象 - dt = datetime.datetime.strptime(update_time, "%Y-%m-%dT%H:%M:%SZ") - # 设置时区 - timezone = pytz.timezone(settings.TZ) - dt = dt.replace(tzinfo=timezone) - # 将datetime对象转换为带时区的字符串 - update_time = dt.strftime("%Y-%m-%d %H:%M:%S") - self.post_message( - mtype=NotificationType.SiteMessage, - title="【MoviePilot更新通知】", - text=f"{release_version} \n" - f"\n" - f"{description} \n" - f"\n" - f"{update_time}") - - # 自动重启 - if self._restart: - logger.info("开始执行自动重启…") - SystemUtils.restart() - - @staticmethod - def __get_release_version(): - """ - 获取最新版本 - """ - version_res = RequestUtils(proxies=settings.PROXY).get_res( - "https://api.github.com/repos/jxxghp/MoviePilot/releases/latest") - if version_res: - ver_json = version_res.json() - version = f"{ver_json['tag_name']}" - description = f"{ver_json['body']}" - update_time = f"{ver_json['published_at']}" - return version, description, update_time - else: - return None, None, None - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'restart', - 'label': '自动重启', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '检查周期', - 'placeholder': '5位cron表达式' - } - } - ] - }, - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '如要开启自动重启,请确认MOVIEPILOT_AUTO_UPDATE设置为true,重启即更新。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "restart": False, - "notify": False, - "cron": "0 9 * * *" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/nastoolsync/__init__.py b/app/plugins/nastoolsync/__init__.py deleted file mode 100644 index 1b408151..00000000 --- a/app/plugins/nastoolsync/__init__.py +++ /dev/null @@ -1,657 +0,0 @@ -import json -import os -import sqlite3 -from datetime import datetime - -from app.core.config import settings -from app.db.downloadhistory_oper import DownloadHistoryOper -from app.db.plugindata_oper import PluginDataOper -from app.db.transferhistory_oper import TransferHistoryOper -from app.plugins import _PluginBase -from typing import Any, List, Dict, Tuple -from app.log import logger - - -class NAStoolSync(_PluginBase): - # 插件名称 - plugin_name = "历史记录同步" - # 插件描述 - plugin_desc = "同步NAStool历史记录、下载记录、插件记录到MoviePilot。" - # 插件图标 - plugin_icon = "sync.png" - # 主题色 - plugin_color = "#53BA47" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "nastoolsync_" - # 加载顺序 - plugin_order = 15 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _transferhistory = None - _plugindata = None - _downloadhistory = None - _clear = None - _nt_db_path = None - _path = None - _site = None - _downloader = None - _transfer = False - - def init_plugin(self, config: dict = None): - self._transferhistory = TransferHistoryOper() - self._plugindata = PluginDataOper() - self._downloadhistory = DownloadHistoryOper() - - if config: - self._clear = config.get("clear") - self._nt_db_path = config.get("nt_db_path") - self._path = config.get("path") - self._site = config.get("site") - self._downloader = config.get("downloader") - self._transfer = config.get("transfer") - - if self._nt_db_path and self._transfer: - # 读取sqlite数据 - try: - gradedb = sqlite3.connect(self._nt_db_path) - except Exception as e: - self.update_config( - { - "transfer": False, - "clear": False, - "nt_db_path": None, - "path": self._path, - "downloader": self._downloader, - "site": self._site, - } - ) - logger.error(f"无法打开数据库文件 {self._nt_db_path},请检查路径是否正确:{str(e)}") - return - - # 创建游标cursor来执行executeSQL语句 - cursor = gradedb.cursor() - - download_history = self.get_nt_download_history(cursor) - plugin_history = self.get_nt_plugin_history(cursor) - transfer_history = self.get_nt_transfer_history(cursor) - - # 关闭游标 - cursor.close() - - # 导入下载记录 - if download_history: - self.sync_download_history(download_history) - - # 导入插件记录 - if plugin_history: - self.sync_plugin_history(plugin_history) - - # 导入历史记录 - if transfer_history: - self.sync_transfer_history(transfer_history) - - self.update_config( - { - "transfer": False, - "clear": False, - "nt_db_path": self._nt_db_path, - "path": self._path, - "downloader": self._downloader, - "site": self._site, - } - ) - - def sync_plugin_history(self, plugin_history): - """ - 导入插件记录 - - NAStool - { - "id": "TorrentTransfer", - "key: "1-4bdc22bc1e062803c8686beb2796369c59ee141f", - "value": "{"to_download": 2, "to_download_id": "4bdc22bc1e062803c8686beb2796369c59ee141f", "delete_source": true}" - }, - { - "id": "IYUUAutoSeed", - "key: "f161efaf008d2e56e7939272e8d95eca58fa71dd", - "value": "[{"downloader": "2", "torrents": ["bd64a8edc5afe6b4beb8813bdbf6faedfb1d4cc4"]}]" - } - """ - # 开始计时 - start_time = datetime.now() - logger.info("开始同步NAStool插件历史记录到MoviePilot") - # 清空MoviePilot插件记录 - if self._clear: - logger.info("MoviePilot插件记录已清空") - self._plugindata.truncate() - - cnt = 0 - for history in plugin_history: - plugin_id = history[1] - plugin_key = history[2] - plugin_value = history[3] - - # 替换转种记录 - if str(plugin_id) == "TorrentTransfer": - keys = str(plugin_key).split("-") - - # 1-2cd5d6fe32dca4e39a3e9f10961bfbdb00437e91 - if len(keys) == 2 and keys[0].isdigit(): - mp_downloader = self.__get_target_downloader(int(keys[0])) - # 替换key - plugin_key = mp_downloader + "-" + keys[1] - - # 替换value - """ - { - "to_download":2, - "to_download_id":"2cd5d6fe32dca4e39a3e9f10961bfbdb00437e91", - "delete_source":true - } - """ - if isinstance(plugin_value, str): - plugin_value: dict = json.loads(plugin_value) - if isinstance(plugin_value, dict): - if str(plugin_value.get("to_download")).isdigit(): - to_downloader = self.__get_target_downloader(int(plugin_value.get("to_download"))) - plugin_value["to_download"] = to_downloader - - # 替换辅种记录 - elif str(plugin_id) == "IYUUAutoSeed": - """ - [ - { - "downloader":"2", - "torrents":[ - "a18aa62abab42613edba15e7dbad0d729d8500da", - "e494f372316bbfd8572da80138a6ef4c491d5991", - "cc2bbc1e654d8fc0f83297f6cd36a38805aa2864", - "68aec0db3aa7fe28a887e5e41a0d0d5bc284910f", - "f02962474287e11441e34e40b8326ddf28d034f6" - ] - }, - { - "downloader":"2", - "torrents":[ - "4f042003ce90519e1aadd02b76f51c0c0711adb3" - ] - } - ] - """ - if isinstance(plugin_value, str): - plugin_value: list = json.loads(plugin_value) - if not isinstance(plugin_value, list): - plugin_value = [plugin_value] - for value in plugin_value: - if str(value.get("downloader")).isdigit(): - downloader = self.__get_target_downloader(int(value.get("downloader"))) - value["downloader"] = downloader - - self._plugindata.save(plugin_id=plugin_id, - key=plugin_key, - value=plugin_value) - cnt += 1 - if cnt % 100 == 0: - logger.info(f"插件记录同步进度 {cnt} / {len(plugin_history)}") - - # 计算耗时 - end_time = datetime.now() - - logger.info(f"插件记录已同步完成。总耗时 {(end_time - start_time).seconds} 秒") - - def __get_target_downloader(self, download_id: int): - """ - 获取NAStool下载器id对应的Moviepilot下载器 - """ - # 处理下载器映射 - if self._downloader: - downloaders = self._downloader.split("\n") - for downloader in downloaders: - if not downloader: - continue - sub_downloaders = downloader.split(":") - if not str(sub_downloaders[0]).isdigit(): - logger.error(f"下载器映射配置错误:NAStool下载器id 应为数字!") - continue - if int(sub_downloaders[0]) == download_id: - return str(sub_downloaders[1]) - return download_id - - def sync_download_history(self, download_history): - """ - 导入下载记录 - """ - # 开始计时 - start_time = datetime.now() - logger.info("开始同步NAStool下载历史记录到MoviePilot") - # 清空MoviePilot下载记录 - if self._clear: - logger.info("MoviePilot下载记录已清空") - self._downloadhistory.truncate() - - cnt = 0 - for history in download_history: - mpath = history[0] - mtype = history[1] - mtitle = history[2] - myear = history[3] - mtmdbid = history[4] - mseasons = history[5] - mepisodes = history[6] - mimages = history[7] - mdownload_hash = history[8] - mtorrent = history[9] - mdesc = history[10] - msite = history[11] - mdate = history[12] - - # 处理站点映射 - if self._site: - sites = self._site.split("\n") - for site in sites: - sub_sites = site.split(":") - if str(msite) == str(sub_sites[0]): - msite = str(sub_sites[1]) - - self._downloadhistory.add( - path=os.path.basename(mpath), - type=mtype, - title=mtitle, - year=myear, - tmdbid=mtmdbid, - seasons=mseasons, - episodes=mepisodes, - image=mimages, - download_hash=mdownload_hash, - torrent_name=mtorrent, - torrent_description=mdesc, - torrent_site=msite, - userid=settings.SUPERUSER, - date=mdate - ) - cnt += 1 - if cnt % 100 == 0: - logger.info(f"下载记录同步进度 {cnt} / {len(download_history)}") - - # 计算耗时 - end_time = datetime.now() - - logger.info(f"下载记录已同步完成。总耗时 {(end_time - start_time).seconds} 秒") - - def sync_transfer_history(self, transfer_history): - """ - 导入nt转移记录 - """ - # 开始计时 - start_time = datetime.now() - logger.info("开始同步NAStool转移历史记录到MoviePilot") - - # 清空MoviePilot转移记录 - if self._clear: - logger.info("MoviePilot转移记录已清空") - self._transferhistory.truncate() - - # 处理数据,存入mp数据库 - cnt = 0 - for history in transfer_history: - msrc_path = history[0] - msrc_filename = history[1] - mdest_path = history[2] - mdest_filename = history[3] - mmode = history[4] - mtype = history[5] - mcategory = history[6] - mtitle = history[7] - myear = history[8] - mtmdbid = history[9] - mseasons = history[10] - mepisodes = history[11] - mimage = history[12] - mdate = history[13] - - if not msrc_path or not mdest_path: - continue - - msrc = msrc_path + "/" + msrc_filename - mdest = mdest_path + "/" + mdest_filename - - # 处理路径映射 - if self._path: - paths = self._path.split("\n") - for path in paths: - sub_paths = path.split(":") - msrc = msrc.replace(sub_paths[0], sub_paths[1]).replace('\\', '/') - mdest = mdest.replace(sub_paths[0], sub_paths[1]).replace('\\', '/') - - # 存库 - self._transferhistory.add( - src=msrc, - dest=mdest, - mode=mmode, - type=mtype, - category=mcategory, - title=mtitle, - year=myear, - tmdbid=mtmdbid, - seasons=mseasons, - episodes=mepisodes, - image=mimage, - date=mdate - ) - logger.debug(f"{mtitle} {myear} {mtmdbid} {mseasons} {mepisodes} 已同步") - - cnt += 1 - if cnt % 100 == 0: - logger.info(f"转移记录同步进度 {cnt} / {len(transfer_history)}") - - # 计算耗时 - end_time = datetime.now() - - logger.info(f"转移记录已同步完成。总耗时 {(end_time - start_time).seconds} 秒") - - @staticmethod - def get_nt_plugin_history(cursor): - """ - 获取插件历史记录 - """ - sql = 'select * from PLUGIN_HISTORY;' - cursor.execute(sql) - plugin_history = cursor.fetchall() - - if not plugin_history: - logger.error("未获取到NAStool数据库文件中的插件历史,请检查数据库路径是正确") - return - - logger.info(f"获取到NAStool插件记录 {len(plugin_history)} 条") - return plugin_history - - @staticmethod - def get_nt_download_history(cursor): - """ - 获取下载历史记录 - """ - sql = ''' - SELECT - SAVE_PATH, - TYPE, - TITLE, - YEAR, - TMDBID, - CASE - SE - WHEN NULL THEN - NULL ELSE substr( SE, 1, instr ( SE, ' ' ) - 1 ) - END AS seasons, - CASE - SE - WHEN NULL THEN - NULL ELSE substr( SE, instr ( SE, ' ' ) + 1 ) - END AS episodes, - POSTER, - DOWNLOAD_ID, - TORRENT, - DESC, - SITE, - DATE - FROM - DOWNLOAD_HISTORY - WHERE - SAVE_PATH IS NOT NULL; - ''' - cursor.execute(sql) - download_history = cursor.fetchall() - - if not download_history: - logger.error("未获取到NAStool数据库文件中的下载历史,请检查数据库路径是正确") - return - - logger.info(f"获取到NAStool下载记录 {len(download_history)} 条") - return download_history - - @staticmethod - def get_nt_transfer_history(cursor): - """ - 获取nt转移记录 - """ - sql = ''' - SELECT - t.SOURCE_PATH AS src_path, - t.SOURCE_FILENAME AS src_filename, - t.DEST_PATH AS dest_path, - t.DEST_FILENAME AS dest_filename, - CASE - t.MODE - WHEN '硬链接' THEN - 'link' - WHEN '移动' THEN - 'move' - WHEN '复制' THEN - 'copy' - END AS mode, - CASE - t.TYPE - WHEN '动漫' THEN - '电视剧' ELSE t.TYPE - END AS type, - t.CATEGORY AS category, - t.TITLE AS title, - t.YEAR AS year, - t.TMDBID AS tmdbid, - CASE - t.SEASON_EPISODE - WHEN NULL THEN - NULL ELSE substr( t.SEASON_EPISODE, 1, instr ( t.SEASON_EPISODE, ' ' ) - 1 ) - END AS seasons, - CASE - t.SEASON_EPISODE - WHEN NULL THEN - NULL ELSE substr( t.SEASON_EPISODE, instr ( t.SEASON_EPISODE, ' ' ) + 1 ) - END AS episodes, - d.POSTER AS image, - t.DATE AS date - FROM - TRANSFER_HISTORY t - LEFT JOIN ( SELECT * FROM DOWNLOAD_HISTORY GROUP BY TMDBID ) d ON t.TMDBID = d.TMDBID - AND t.TYPE = d.TYPE; - ''' - cursor.execute(sql) - nt_historys = cursor.fetchall() - - if not nt_historys: - logger.error("未获取到NAStool数据库文件中的转移历史,请检查数据库路径是正确") - return - - logger.info(f"获取到NAStool转移记录 {len(nt_historys)} 条") - return nt_historys - - def get_state(self) -> bool: - return False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'transfer', - 'label': '同步记录' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear', - 'label': '清空记录' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'nt_db_path', - 'label': 'NAStool数据库user.db路径', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'path', - 'rows': '2', - 'label': '历史记录路径映射', - 'placeholder': 'NAStool路径:MoviePilot路径(一行一个)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'downloader', - 'rows': '2', - 'label': '插件数据下载器映射', - 'placeholder': 'NAStool下载器id:qbittorrent|transmission(一行一个)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'site', - 'label': '下载历史站点映射', - 'placeholder': 'NAStool站点名:MoviePilot站点名(一行一个)' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '开启清空记录时,会在导入历史数据之前删除MoviePilot之前的记录。' - '如果转移记录很多,同步时间可能会长(3-10分钟),' - '所以点击确定后页面没反应是正常现象,后台正在处理。' - } - } - ] - } - ] - } - ] - } - ], { - "transfer": False, - "clear": False, - "supp": False, - "nt_db_path": "", - "path": "", - "downloader": "", - "site": "", - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/personmeta/__init__.py b/app/plugins/personmeta/__init__.py deleted file mode 100644 index da8b863a..00000000 --- a/app/plugins/personmeta/__init__.py +++ /dev/null @@ -1,1019 +0,0 @@ -import base64 -import copy -import datetime -import json -import re -import threading -import time -from pathlib import Path -from typing import Any, List, Dict, Tuple, Optional - -import pytz -import zhconv -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from requests import RequestException - -from app.chain.mediaserver import MediaServerChain -from app.chain.tmdb import TmdbChain -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.core.meta import MetaBase -from app.log import logger -from app.modules.emby import Emby -from app.modules.jellyfin import Jellyfin -from app.modules.plex import Plex -from app.plugins import _PluginBase -from app.schemas import MediaInfo, MediaServerItem -from app.schemas.types import EventType, MediaType -from app.utils.common import retry -from app.utils.http import RequestUtils -from app.utils.string import StringUtils - - -class PersonMeta(_PluginBase): - # 插件名称 - plugin_name = "演职人员刮削" - # 插件描述 - plugin_desc = "刮削演职人员图片以及中文名称。" - # 插件图标 - plugin_icon = "actor.png" - # 主题色 - plugin_color = "#E66E72" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "personmeta_" - # 加载顺序 - plugin_order = 24 - # 可使用的用户级别 - auth_level = 1 - - # 退出事件 - _event = threading.Event() - - # 私有属性 - _scheduler = None - tmdbchain = None - mschain = None - _enabled = False - _onlyonce = False - _cron = None - _delay = 0 - _type = "all" - _remove_nozh = False - - def init_plugin(self, config: dict = None): - self.tmdbchain = TmdbChain() - self.mschain = MediaServerChain() - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._type = config.get("type") or "all" - self._delay = config.get("delay") or 0 - self._remove_nozh = config.get("remove_nozh") or False - - # 停止现有任务 - self.stop_service() - - # 启动服务 - if self._enabled or self._onlyonce: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron or self._onlyonce: - if self._cron: - try: - self._scheduler.add_job(func=self.scrap_library, - trigger=CronTrigger.from_crontab(self._cron), - name="演职人员刮削") - logger.info(f"演职人员刮削服务启动,周期:{self._cron}") - except Exception as e: - logger.error(f"演职人员刮削服务启动失败,错误信息:{str(e)}") - self.systemmessage.put(f"演职人员刮削服务启动失败,错误信息:{str(e)}") - if self._onlyonce: - self._scheduler.add_job(func=self.scrap_library, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - logger.info(f"演职人员刮削服务启动,立即运行一次") - # 关闭一次性开关 - self._onlyonce = False - # 保存配置 - self.__update_config() - - if self._scheduler.get_jobs(): - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def __update_config(self): - """ - 更新配置 - """ - self.update_config({ - "enabled": self._enabled, - "onlyonce": self._onlyonce, - "cron": self._cron, - "type": self._type, - "delay": self._delay, - "remove_nozh": self._remove_nozh - }) - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '媒体库扫描周期', - 'placeholder': '5位cron表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'delay', - 'label': '入库延迟时间(秒)', - 'placeholder': '30' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'type', - 'label': '刮削条件', - 'items': [ - {'title': '全部', 'value': 'all'}, - {'title': '演员非中文', 'value': 'name'}, - {'title': '角色非中文', 'value': 'role'}, - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'remove_nozh', - 'label': '删除非中文演员', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "cron": "", - "type": "all", - "delay": 30, - "remove_nozh": False - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.TransferComplete) - def scrap_rt(self, event: Event): - """ - 根据事件实时刮削演员信息 - """ - if not self._enabled: - return - # 事件数据 - mediainfo: MediaInfo = event.event_data.get("mediainfo") - meta: MetaBase = event.event_data.get("meta") - if not mediainfo or not meta: - return - # 延迟 - if self._delay: - time.sleep(int(self._delay)) - # 查询媒体服务器中的条目 - existsinfo = self.chain.media_exists(mediainfo=mediainfo) - if not existsinfo or not existsinfo.itemid: - logger.warn(f"演职人员刮削 {mediainfo.title_year} 在媒体库中不存在") - return - # 查询条目详情 - iteminfo = self.mschain.iteminfo(server=existsinfo.server, item_id=existsinfo.itemid) - if not iteminfo: - logger.warn(f"演职人员刮削 {mediainfo.title_year} 条目详情获取失败") - return - # 刮削演职人员信息 - self.__update_item(server=existsinfo.server, item=iteminfo, - mediainfo=mediainfo, season=meta.begin_season) - - def scrap_library(self): - """ - 扫描整个媒体库,刮削演员信息 - """ - # 所有媒体服务器 - if not settings.MEDIASERVER: - return - for server in settings.MEDIASERVER.split(","): - # 扫描所有媒体库 - logger.info(f"开始刮削服务器 {server} 的演员信息 ...") - for library in self.mschain.librarys(server): - logger.info(f"开始刮削媒体库 {library.name} 的演员信息 ...") - for item in self.mschain.items(server, library.id): - if not item: - continue - if not item.item_id: - continue - if "Series" not in item.item_type \ - and "Movie" not in item.item_type: - continue - if self._event.is_set(): - logger.info(f"演职人员刮削服务停止") - return - # 处理条目 - logger.info(f"开始刮削 {item.title} 的演员信息 ...") - self.__update_item(server=server, item=item) - logger.info(f"{item.title} 的演员信息刮削完成") - logger.info(f"媒体库 {library.name} 的演员信息刮削完成") - logger.info(f"服务器 {server} 的演员信息刮削完成") - - def __update_peoples(self, server: str, itemid: str, iteminfo: dict, douban_actors): - # 处理媒体项中的人物信息 - """ - "People": [ - { - "Name": "丹尼尔·克雷格", - "Id": "33625", - "Role": "James Bond", - "Type": "Actor", - "PrimaryImageTag": "bef4f764540f10577f804201d8d27918" - } - ] - """ - peoples = [] - # 更新当前媒体项人物 - for people in iteminfo["People"] or []: - if self._event.is_set(): - logger.info(f"演职人员刮削服务停止") - return - if not people.get("Name"): - continue - if StringUtils.is_chinese(people.get("Name")) \ - and StringUtils.is_chinese(people.get("Role")): - peoples.append(people) - continue - info = self.__update_people(server=server, people=people, - douban_actors=douban_actors) - if info: - peoples.append(info) - elif not self._remove_nozh: - peoples.append(people) - # 保存媒体项信息 - if peoples: - iteminfo["People"] = peoples - self.set_iteminfo(server=server, itemid=itemid, iteminfo=iteminfo) - - def __update_item(self, server: str, item: MediaServerItem, - mediainfo: MediaInfo = None, season: int = None): - """ - 更新媒体服务器中的条目 - """ - - def __need_trans_actor(_item): - """ - 是否需要处理人物信息 - """ - if self._type == "name": - # 是否需要处理人物名称 - _peoples = [x for x in _item.get("People", []) if - (x.get("Name") and not StringUtils.is_chinese(x.get("Name")))] - elif self._type == "role": - # 是否需要处理人物角色 - _peoples = [x for x in _item.get("People", []) if - (x.get("Role") and not StringUtils.is_chinese(x.get("Role")))] - else: - _peoples = [x for x in _item.get("People", []) if - (x.get("Name") and not StringUtils.is_chinese(x.get("Name"))) - or (x.get("Role") and not StringUtils.is_chinese(x.get("Role")))] - if _peoples: - return True - return False - - # 识别媒体信息 - if not mediainfo: - if not item.tmdbid: - logger.warn(f"{item.title} 未找到tmdbid,无法识别媒体信息") - return - mtype = MediaType.TV if item.item_type in ['Series', 'show'] else MediaType.MOVIE - mediainfo = self.chain.recognize_media(mtype=mtype, tmdbid=item.tmdbid) - if not mediainfo: - logger.warn(f"{item.title} 未识别到媒体信息") - return - - # 获取媒体项 - iteminfo = self.get_iteminfo(server=server, itemid=item.item_id) - if not iteminfo: - logger.warn(f"{item.title} 未找到媒体项") - return - - if __need_trans_actor(iteminfo): - # 获取豆瓣演员信息 - logger.info(f"开始获取 {item.title} 的豆瓣演员信息 ...") - douban_actors = self.__get_douban_actors(mediainfo=mediainfo, season=season) - self.__update_peoples(server=server, itemid=item.item_id, iteminfo=iteminfo, douban_actors=douban_actors) - else: - logger.info(f"{item.title} 的人物信息已是中文,无需更新") - - # 处理季和集人物 - if iteminfo.get("Type") and "Series" in iteminfo["Type"]: - # 获取季媒体项 - seasons = self.get_items(server=server, parentid=item.item_id, mtype="Season") - if not seasons: - logger.warn(f"{item.title} 未找到季媒体项") - return - for season in seasons["Items"]: - # 获取豆瓣演员信息 - season_actors = self.__get_douban_actors(mediainfo=mediainfo, season=season.get("IndexNumber")) - # 如果是Jellyfin,更新季的人物,Emby/Plex季没有人物 - if server == "jellyfin": - seasoninfo = self.get_iteminfo(server=server, itemid=season.get("Id")) - if not seasoninfo: - logger.warn(f"{item.title} 未找到季媒体项:{season.get('Id')}") - continue - - if __need_trans_actor(seasoninfo): - # 更新季媒体项人物 - self.__update_peoples(server=server, itemid=season.get("Id"), iteminfo=seasoninfo, - douban_actors=season_actors) - logger.info(f"季 {seasoninfo.get('Id')} 的人物信息更新完成") - else: - logger.info(f"季 {seasoninfo.get('Id')} 的人物信息已是中文,无需更新") - # 获取集媒体项 - episodes = self.get_items(server=server, parentid=season.get("Id"), mtype="Episode") - if not episodes: - logger.warn(f"{item.title} 未找到集媒体项") - continue - # 更新集媒体项人物 - for episode in episodes["Items"]: - # 获取集媒体项详情 - episodeinfo = self.get_iteminfo(server=server, itemid=episode.get("Id")) - if not episodeinfo: - logger.warn(f"{item.title} 未找到集媒体项:{episode.get('Id')}") - continue - if __need_trans_actor(episodeinfo): - # 更新集媒体项人物 - self.__update_peoples(server=server, itemid=episode.get("Id"), iteminfo=episodeinfo, - douban_actors=season_actors) - logger.info(f"集 {episodeinfo.get('Id')} 的人物信息更新完成") - else: - logger.info(f"集 {episodeinfo.get('Id')} 的人物信息已是中文,无需更新") - - def __update_people(self, server: str, people: dict, douban_actors: list = None) -> Optional[dict]: - """ - 更新人物信息,返回替换后的人物信息 - """ - - def __get_peopleid(p: dict) -> Tuple[Optional[str], Optional[str]]: - """ - 获取人物的TMDBID、IMDBID - """ - if not p.get("ProviderIds"): - return None, None - peopletmdbid, peopleimdbid = None, None - if "Tmdb" in p["ProviderIds"]: - peopletmdbid = p["ProviderIds"]["Tmdb"] - if "tmdb" in p["ProviderIds"]: - peopletmdbid = p["ProviderIds"]["tmdb"] - if "Imdb" in p["ProviderIds"]: - peopleimdbid = p["ProviderIds"]["Imdb"] - if "imdb" in p["ProviderIds"]: - peopleimdbid = p["ProviderIds"]["imdb"] - return peopletmdbid, peopleimdbid - - # 返回的人物信息 - ret_people = copy.deepcopy(people) - - try: - # 查询媒体库人物详情 - personinfo = self.get_iteminfo(server=server, itemid=people.get("Id")) - if not personinfo: - logger.debug(f"未找到人物 {people.get('Name')} 的信息") - return None - - # 是否更新标志 - updated_name = False - updated_overview = False - update_character = False - profile_path = None - - # 从TMDB信息中更新人物信息 - person_tmdbid, person_imdbid = __get_peopleid(personinfo) - if person_tmdbid: - person_tmdbinfo = self.tmdbchain.person_detail(int(person_tmdbid)) - if person_tmdbinfo: - cn_name = self.__get_chinese_name(person_tmdbinfo) - if cn_name: - # 更新中文名 - logger.debug(f"{people.get('Name')} 从TMDB获取到中文名:{cn_name}") - personinfo["Name"] = cn_name - ret_people["Name"] = cn_name - updated_name = True - # 更新中文描述 - biography = person_tmdbinfo.get("biography") - if biography and StringUtils.is_chinese(biography): - logger.debug(f"{people.get('Name')} 从TMDB获取到中文描述") - personinfo["Overview"] = biography - updated_overview = True - # 图片 - profile_path = person_tmdbinfo.get('profile_path') - if profile_path: - logger.debug(f"{people.get('Name')} 从TMDB获取到图片:{profile_path}") - profile_path = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{profile_path}" - - # 从豆瓣信息中更新人物信息 - """ - { - "name": "丹尼尔·克雷格", - "roles": [ - "演员", - "制片人", - "配音" - ], - "title": "丹尼尔·克雷格(同名)英国,英格兰,柴郡,切斯特影视演员", - "url": "https://movie.douban.com/celebrity/1025175/", - "user": null, - "character": "饰 詹姆斯·邦德 James Bond 007", - "uri": "douban://douban.com/celebrity/1025175?subject_id=27230907", - "avatar": { - "large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/600/h/3000/format/webp", - "normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/200/h/300/format/webp" - }, - "sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1025175/", - "type": "celebrity", - "id": "1025175", - "latin_name": "Daniel Craig" - } - """ - if douban_actors and (not updated_name - or not updated_overview - or not update_character): - # 从豆瓣演员中匹配中文名称、角色和简介 - for douban_actor in douban_actors: - if douban_actor.get("latin_name") == people.get("Name") \ - or douban_actor.get("name") == people.get("Name"): - # 名称 - if not updated_name: - logger.debug(f"{people.get('Name')} 从豆瓣中获取到中文名:{douban_actor.get('name')}") - personinfo["Name"] = douban_actor.get("name") - ret_people["Name"] = douban_actor.get("name") - updated_name = True - # 描述 - if not updated_overview: - if douban_actor.get("title"): - logger.debug(f"{people.get('Name')} 从豆瓣中获取到中文描述:{douban_actor.get('title')}") - personinfo["Overview"] = douban_actor.get("title") - updated_overview = True - # 饰演角色 - if not update_character: - if douban_actor.get("character"): - # "饰 詹姆斯·邦德 James Bond 007" - character = re.sub(r"饰\s+", "", - douban_actor.get("character")) - character = re.sub("演员", "", - character) - if character: - logger.debug(f"{people.get('Name')} 从豆瓣中获取到饰演角色:{character}") - ret_people["Role"] = character - update_character = True - # 图片 - if not profile_path: - avatar = douban_actor.get("avatar") or {} - if avatar.get("large"): - logger.debug(f"{people.get('Name')} 从豆瓣中获取到图片:{avatar.get('large')}") - profile_path = avatar.get("large") - break - - # 更新人物图片 - if profile_path: - logger.debug(f"更新人物 {people.get('Name')} 的图片:{profile_path}") - self.set_item_image(server=server, itemid=people.get("Id"), imageurl=profile_path) - - # 锁定人物信息 - if updated_name: - if "Name" not in personinfo["LockedFields"]: - personinfo["LockedFields"].append("Name") - if updated_overview: - if "Overview" not in personinfo["LockedFields"]: - personinfo["LockedFields"].append("Overview") - - # 更新人物信息 - if updated_name or updated_overview or update_character: - logger.debug(f"更新人物 {people.get('Name')} 的信息:{personinfo}") - ret = self.set_iteminfo(server=server, itemid=people.get("Id"), iteminfo=personinfo) - if ret: - return ret_people - else: - logger.debug(f"人物 {people.get('Name')} 未找到中文数据") - except Exception as err: - logger.error(f"更新人物信息失败:{str(err)}") - return None - - def __get_douban_actors(self, mediainfo: MediaInfo, season: int = None) -> List[dict]: - """ - 获取豆瓣演员信息 - """ - # 随机休眠 3-10 秒 - sleep_time = 3 + int(time.time()) % 7 - logger.debug(f"随机休眠 {sleep_time}秒 ...") - time.sleep(sleep_time) - # 匹配豆瓣信息 - doubaninfo = self.chain.match_doubaninfo(name=mediainfo.title, - imdbid=mediainfo.imdb_id, - mtype=mediainfo.type.value, - year=mediainfo.year, - season=season) - # 豆瓣演员 - if doubaninfo: - doubanitem = self.chain.douban_info(doubaninfo.get("id")) or {} - return (doubanitem.get("actors") or []) + (doubanitem.get("directors") or []) - else: - logger.debug(f"未找到豆瓣信息:{mediainfo.title_year}") - return [] - - @staticmethod - def get_iteminfo(server: str, itemid: str) -> dict: - """ - 获得媒体项详情 - """ - - def __get_emby_iteminfo() -> dict: - """ - 获得Emby媒体项详情 - """ - try: - url = f'[HOST]emby/Users/[USER]/Items/{itemid}?' \ - f'Fields=ChannelMappingInfo&api_key=[APIKEY]' - res = Emby().get_data(url=url) - if res: - return res.json() - except Exception as err: - logger.error(f"获取Emby媒体项详情失败:{str(err)}") - return {} - - def __get_jellyfin_iteminfo() -> dict: - """ - 获得Jellyfin媒体项详情 - """ - try: - url = f'[HOST]Users/[USER]/Items/{itemid}?Fields=ChannelMappingInfo&api_key=[APIKEY]' - res = Jellyfin().get_data(url=url) - if res: - result = res.json() - if result: - result['FileName'] = Path(result['Path']).name - return result - except Exception as err: - logger.error(f"获取Jellyfin媒体项详情失败:{str(err)}") - return {} - - def __get_plex_iteminfo() -> dict: - """ - 获得Plex媒体项详情 - """ - iteminfo = {} - try: - plexitem = Plex().get_plex().library.fetchItem(ekey=itemid) - if 'movie' in plexitem.METADATA_TYPE: - iteminfo['Type'] = 'Movie' - iteminfo['IsFolder'] = False - elif 'episode' in plexitem.METADATA_TYPE: - iteminfo['Type'] = 'Series' - iteminfo['IsFolder'] = False - if 'show' in plexitem.TYPE: - iteminfo['ChildCount'] = plexitem.childCount - iteminfo['Name'] = plexitem.title - iteminfo['Id'] = plexitem.key - iteminfo['ProductionYear'] = plexitem.year - iteminfo['ProviderIds'] = {} - for guid in plexitem.guids: - idlist = str(guid.id).split(sep='://') - if len(idlist) < 2: - continue - iteminfo['ProviderIds'][idlist[0]] = idlist[1] - for location in plexitem.locations: - iteminfo['Path'] = location - iteminfo['FileName'] = Path(location).name - iteminfo['Overview'] = plexitem.summary - iteminfo['CommunityRating'] = plexitem.audienceRating - return iteminfo - except Exception as err: - logger.error(f"获取Plex媒体项详情失败:{str(err)}") - return {} - - if server == "emby": - return __get_emby_iteminfo() - elif server == "jellyfin": - return __get_jellyfin_iteminfo() - else: - return __get_plex_iteminfo() - - @staticmethod - def get_items(server: str, parentid: str, mtype: str = None) -> dict: - """ - 获得媒体的所有子媒体项 - """ - pass - - def __get_emby_items() -> dict: - """ - 获得Emby媒体的所有子媒体项 - """ - try: - if parentid: - url = f'[HOST]emby/Users/[USER]/Items?ParentId={parentid}&api_key=[APIKEY]' - else: - url = '[HOST]emby/Users/[USER]/Items?api_key=[APIKEY]' - res = Emby().get_data(url=url) - if res: - return res.json() - except Exception as err: - logger.error(f"获取Emby媒体的所有子媒体项失败:{str(err)}") - return {} - - def __get_jellyfin_items() -> dict: - """ - 获得Jellyfin媒体的所有子媒体项 - """ - try: - if parentid: - url = f'[HOST]Users/[USER]/Items?ParentId={parentid}&api_key=[APIKEY]' - else: - url = '[HOST]Users/[USER]/Items?api_key=[APIKEY]' - res = Jellyfin().get_data(url=url) - if res: - return res.json() - except Exception as err: - logger.error(f"获取Jellyfin媒体的所有子媒体项失败:{str(err)}") - return {} - - def __get_plex_items() -> dict: - """ - 获得Plex媒体的所有子媒体项 - """ - items = {} - try: - plex = Plex().get_plex() - items['Items'] = [] - if parentid: - if mtype and 'Season' in mtype: - plexitem = plex.library.fetchItem(ekey=parentid) - items['Items'] = [] - for season in plexitem.seasons(): - item = { - 'Name': season.title, - 'Id': season.key, - 'IndexNumber': season.seasonNumber, - 'Overview': season.summary - } - items['Items'].append(item) - elif mtype and 'Episode' in mtype: - plexitem = plex.library.fetchItem(ekey=parentid) - items['Items'] = [] - for episode in plexitem.episodes(): - item = { - 'Name': episode.title, - 'Id': episode.key, - 'IndexNumber': episode.episodeNumber, - 'Overview': episode.summary, - 'CommunityRating': episode.audienceRating - } - items['Items'].append(item) - else: - plexitems = plex.library.sectionByID(sectionID=parentid) - for plexitem in plexitems.all(): - item = {} - if 'movie' in plexitem.METADATA_TYPE: - item['Type'] = 'Movie' - item['IsFolder'] = False - elif 'episode' in plexitem.METADATA_TYPE: - item['Type'] = 'Series' - item['IsFolder'] = False - item['Name'] = plexitem.title - item['Id'] = plexitem.key - items['Items'].append(item) - else: - plexitems = plex.library.sections() - for plexitem in plexitems: - item = {} - if 'Directory' in plexitem.TAG: - item['Type'] = 'Folder' - item['IsFolder'] = True - elif 'movie' in plexitem.METADATA_TYPE: - item['Type'] = 'Movie' - item['IsFolder'] = False - elif 'episode' in plexitem.METADATA_TYPE: - item['Type'] = 'Series' - item['IsFolder'] = False - item['Name'] = plexitem.title - item['Id'] = plexitem.key - items['Items'].append(item) - return items - except Exception as err: - logger.error(f"获取Plex媒体的所有子媒体项失败:{str(err)}") - return {} - - if server == "emby": - return __get_emby_items() - elif server == "jellyfin": - return __get_jellyfin_items() - else: - return __get_plex_items() - - @staticmethod - def set_iteminfo(server: str, itemid: str, iteminfo: dict): - """ - 更新媒体项详情 - """ - - def __set_emby_iteminfo(): - """ - 更新Emby媒体项详情 - """ - try: - res = Emby().post_data( - url=f'[HOST]emby/Items/{itemid}?api_key=[APIKEY]&reqformat=json', - data=json.dumps(iteminfo), - headers={ - "Content-Type": "application/json" - } - ) - if res and res.status_code in [200, 204]: - return True - else: - logger.error(f"更新Emby媒体项详情失败,错误码:{res.status_code}") - return False - except Exception as err: - logger.error(f"更新Emby媒体项详情失败:{str(err)}") - return False - - def __set_jellyfin_iteminfo(): - """ - 更新Jellyfin媒体项详情 - """ - try: - res = Jellyfin().post_data( - url=f'[HOST]Items/{itemid}?api_key=[APIKEY]', - data=json.dumps(iteminfo), - headers={ - "Content-Type": "application/json" - } - ) - if res and res.status_code in [200, 204]: - return True - else: - logger.error(f"更新Jellyfin媒体项详情失败,错误码:{res.status_code}") - return False - except Exception as err: - logger.error(f"更新Jellyfin媒体项详情失败:{str(err)}") - return False - - def __set_plex_iteminfo(): - """ - 更新Plex媒体项详情 - """ - try: - plexitem = Plex().get_plex().library.fetchItem(ekey=itemid) - if 'CommunityRating' in iteminfo: - edits = { - 'audienceRating.value': iteminfo['CommunityRating'], - 'audienceRating.locked': 1 - } - plexitem.edit(**edits) - plexitem.editTitle(iteminfo['Name']).editSummary(iteminfo['Overview']).reload() - return True - except Exception as err: - logger.error(f"更新Plex媒体项详情失败:{str(err)}") - return False - - if server == "emby": - return __set_emby_iteminfo() - elif server == "jellyfin": - return __set_jellyfin_iteminfo() - else: - return __set_plex_iteminfo() - - @staticmethod - @retry(RequestException, logger=logger) - def set_item_image(server: str, itemid: str, imageurl: str): - """ - 更新媒体项图片 - """ - - def __download_image(): - """ - 下载图片 - """ - try: - if "doubanio.com" in imageurl: - r = RequestUtils(headers={ - 'Referer': "https://movie.douban.com/" - }, ua=settings.USER_AGENT).get_res(url=imageurl, raise_exception=True) - else: - r = RequestUtils().get_res(url=imageurl, raise_exception=True) - if r: - return base64.b64encode(r.content).decode() - else: - logger.warn(f"{imageurl} 图片下载失败,请检查网络连通性") - except Exception as err: - logger.error(f"下载图片失败:{str(err)}") - return None - - def __set_emby_item_image(_base64: str): - """ - 更新Emby媒体项图片 - """ - try: - url = f'[HOST]emby/Items/{itemid}/Images/Primary?api_key=[APIKEY]' - res = Emby().post_data( - url=url, - data=_base64, - headers={ - "Content-Type": "image/png" - } - ) - if res and res.status_code in [200, 204]: - return True - else: - logger.error(f"更新Emby媒体项图片失败,错误码:{res.status_code}") - return False - except Exception as result: - logger.error(f"更新Emby媒体项图片失败:{result}") - return False - - def __set_jellyfin_item_image(): - """ - 更新Jellyfin媒体项图片 - # FIXME 改为预下载图片 - """ - try: - url = f'[HOST]Items/{itemid}/RemoteImages/Download?' \ - f'Type=Primary&ImageUrl={imageurl}&ProviderName=TheMovieDb&api_key=[APIKEY]' - res = Jellyfin().post_data(url=url) - if res and res.status_code in [200, 204]: - return True - else: - logger.error(f"更新Jellyfin媒体项图片失败,错误码:{res.status_code}") - return False - except Exception as err: - logger.error(f"更新Jellyfin媒体项图片失败:{err}") - return False - - def __set_plex_item_image(): - """ - 更新Plex媒体项图片 - # FIXME 改为预下载图片 - """ - try: - plexitem = Plex().get_plex().library.fetchItem(ekey=itemid) - plexitem.uploadPoster(url=imageurl) - return True - except Exception as err: - logger.error(f"更新Plex媒体项图片失败:{err}") - return False - - if server == "emby": - # 下载图片获取base64 - image_base64 = __download_image() - if image_base64: - return __set_emby_item_image(image_base64) - elif server == "jellyfin": - return __set_jellyfin_item_image() - else: - return __set_plex_item_image() - return None - - @staticmethod - def __get_chinese_name(personinfo: dict) -> str: - """ - 获取TMDB别名中的中文名 - """ - try: - also_known_as = personinfo.get("also_known_as") or [] - if also_known_as: - for name in also_known_as: - if name and StringUtils.is_chinese(name): - # 使用cn2an将繁体转化为简体 - return zhconv.convert(name, "zh-hans") - except Exception as err: - logger.error(f"获取人物中文名失败:{err}") - return "" - - def stop_service(self): - """ - 停止服务 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) diff --git a/app/plugins/rsssubscribe/__init__.py b/app/plugins/rsssubscribe/__init__.py deleted file mode 100644 index 6f375bec..00000000 --- a/app/plugins/rsssubscribe/__init__.py +++ /dev/null @@ -1,668 +0,0 @@ -import datetime -import re -from pathlib import Path -from threading import Lock -from typing import Optional, Any, List, Dict, Tuple - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.chain.download import DownloadChain -from app.chain.search import SearchChain -from app.chain.subscribe import SubscribeChain -from app.core.config import settings -from app.core.context import MediaInfo, TorrentInfo, Context -from app.core.metainfo import MetaInfo -from app.helper.rss import RssHelper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import SystemConfigKey, MediaType - -lock = Lock() - - -class RssSubscribe(_PluginBase): - # 插件名称 - plugin_name = "自定义订阅" - # 插件描述 - plugin_desc = "定时刷新RSS报文,识别内容后添加订阅或直接下载。" - # 插件图标 - plugin_icon = "rss.png" - # 主题色 - plugin_color = "#F78421" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "rsssubscribe_" - # 加载顺序 - plugin_order = 19 - # 可使用的用户级别 - auth_level = 2 - - # 私有变量 - _scheduler: Optional[BackgroundScheduler] = None - _cache_path: Optional[Path] = None - rsshelper = None - downloadchain = None - searchchain = None - subscribechain = None - - # 配置属性 - _enabled: bool = False - _cron: str = "" - _notify: bool = False - _onlyonce: bool = False - _address: str = "" - _include: str = "" - _exclude: str = "" - _proxy: bool = False - _filter: bool = False - _clear: bool = False - _clearflag: bool = False - _action: str = "subscribe" - _save_path: str = "" - - def init_plugin(self, config: dict = None): - self.rsshelper = RssHelper() - self.downloadchain = DownloadChain() - self.searchchain = SearchChain() - self.subscribechain = SubscribeChain() - - # 停止现有任务 - self.stop_service() - - # 配置 - if config: - self._enabled = config.get("enabled") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._onlyonce = config.get("onlyonce") - self._address = config.get("address") - self._include = config.get("include") - self._exclude = config.get("exclude") - self._proxy = config.get("proxy") - self._filter = config.get("filter") - self._clear = config.get("clear") - self._action = config.get("action") - self._save_path = config.get("save_path") - - if self._enabled or self._onlyonce: - - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - try: - self._scheduler.add_job(func=self.check, - trigger=CronTrigger.from_crontab(self._cron), - name="RSS订阅") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - else: - self._scheduler.add_job(self.check, "interval", minutes=30, name="RSS订阅") - - if self._onlyonce: - logger.info(f"RSS订阅服务启动,立即运行一次") - self._scheduler.add_job(func=self.check, trigger='date', - run_date=datetime.datetime.now( - tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3) - ) - - if self._onlyonce or self._clear: - # 关闭一次性开关 - self._onlyonce = False - # 记录清理缓存设置 - self._clearflag = self._clear - # 关闭清理缓存开关 - self._clear = False - # 保存设置 - self.__update_config() - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - pass - - def get_api(self) -> List[Dict[str, Any]]: - """ - 获取插件API - [{ - "path": "/xx", - "endpoint": self.xxx, - "methods": ["GET", "POST"], - "summary": "API说明" - }] - """ - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'action', - 'label': '动作', - 'items': [ - {'title': '订阅', 'value': 'subscribe'}, - {'title': '下载', 'value': 'download'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'address', - 'label': 'RSS地址', - 'rows': 3, - 'placeholder': '每行一个RSS地址' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'include', - 'label': '包含', - 'placeholder': '支持正则表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'exclude', - 'label': '排除', - 'placeholder': '支持正则表达式' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'save_path', - 'label': '保存目录', - 'placeholder': '下载时有效,留空自动' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'proxy', - 'label': '使用代理服务器', - } - } - ] - }, { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4, - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'filter', - 'label': '使用过滤规则', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear', - 'label': '清理历史记录', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "onlyonce": False, - "cron": "*/30 * * * *", - "address": "", - "include": "", - "exclude": "", - "proxy": False, - "clear": False, - "filter": False, - "action": "subscribe", - "save_path": "" - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # 查询同步详情 - historys = self.get_data('history') - if not historys: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - historys = sorted(historys, key=lambda x: x.get('time'), reverse=True) - # 拼装页面 - contents = [] - for history in historys: - title = history.get("title") - poster = history.get("poster") - mtype = history.get("type") - time_str = history.get("time") - contents.append( - { - 'component': 'VCard', - 'content': [ - { - 'component': 'div', - 'props': { - 'class': 'd-flex justify-space-start flex-nowrap flex-row', - }, - 'content': [ - { - 'component': 'div', - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': poster, - 'height': 120, - 'width': 80, - 'aspect-ratio': '2/3', - 'class': 'object-cover shadow ring-gray-500', - 'cover': True - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'VCardSubtitle', - 'props': { - 'class': 'pa-2 font-bold break-words whitespace-break-spaces' - }, - 'text': title - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'类型:{mtype}' - }, - { - 'component': 'VCardText', - 'props': { - 'class': 'pa-0 px-2' - }, - 'text': f'时间:{time_str}' - } - ] - } - ] - } - ] - } - ) - - return [ - { - 'component': 'div', - 'props': { - 'class': 'grid gap-3 grid-info-card', - }, - 'content': contents - } - ] - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - def __update_config(self): - """ - 更新设置 - """ - self.update_config({ - "enabled": self._enabled, - "notify": self._notify, - "onlyonce": self._onlyonce, - "cron": self._cron, - "address": self._address, - "include": self._include, - "exclude": self._exclude, - "proxy": self._proxy, - "clear": self._clear, - "filter": self._filter, - "action": self._action, - "save_path": self._save_path - }) - - def check(self): - """ - 通过用户RSS同步豆瓣想看数据 - """ - if not self._address: - return - # 读取历史记录 - if self._clearflag: - history = [] - else: - history: List[dict] = self.get_data('history') or [] - for url in self._address.split("\n"): - # 处理每一个RSS链接 - if not url: - continue - logger.info(f"开始刷新RSS:{url} ...") - results = self.rsshelper.parse(url, proxy=self._proxy) - if not results: - logger.error(f"未获取到RSS数据:{url}") - return - # 过滤规则 - filter_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules) - # 解析数据 - for result in results: - try: - title = result.get("title") - description = result.get("description") - enclosure = result.get("enclosure") - link = result.get("link") - sise = result.get("sise") - pubdate: datetime.datetime = result.get("pubdate") - # 检查是否处理过 - if not title or title in [h.get("key") for h in history]: - continue - # 检查规则 - if self._include and not re.search(r"%s" % self._include, - f"{title} {description}", re.IGNORECASE): - logger.info(f"{title} - {description} 不符合包含规则") - continue - if self._exclude and re.search(r"%s" % self._exclude, - f"{title} {description}", re.IGNORECASE): - logger.info(f"{title} - {description} 不符合排除规则") - continue - # 识别媒体信息 - meta = MetaInfo(title=title, subtitle=description) - if not meta.name: - logger.warn(f"{title} 未识别到有效数据") - continue - mediainfo: MediaInfo = self.chain.recognize_media(meta=meta) - if not mediainfo: - logger.warn(f'未识别到媒体信息,标题:{title}') - continue - # 种子 - torrentinfo = TorrentInfo( - title=title, - description=description, - enclosure=enclosure, - page_url=link, - size=sise, - pubdate=pubdate.strftime("%Y-%m-%d %H:%M:%S") if pubdate else None, - site_proxy=self._proxy, - ) - # 过滤种子 - if self._filter: - result = self.chain.filter_torrents( - rule_string=filter_rule, - torrent_list=[torrentinfo], - mediainfo=mediainfo - ) - if not result: - logger.info(f"{title} {description} 不匹配过滤规则") - continue - # 查询缺失的媒体信息 - exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo) - if exist_flag: - logger.info(f'{mediainfo.title_year} 媒体库中已存在') - continue - else: - if self._action == "download": - if mediainfo.type == MediaType.TV: - if no_exists: - exist_info = no_exists.get(mediainfo.tmdb_id) - season_info = exist_info.get(meta.begin_season or 1) - if not season_info: - logger.info(f'{mediainfo.title_year} {meta.season} 己存在') - continue - if (season_info.episodes - and not set(meta.episode_list).issubset(set(season_info.episodes))): - logger.info(f'{mediainfo.title_year} {meta.season_episode} 己存在') - continue - # 添加下载 - result = self.downloadchain.download_single( - context=Context( - meta_info=meta, - media_info=mediainfo, - torrent_info=torrentinfo, - ), - save_path=self._save_path, - username="RSS订阅" - ) - if not result: - logger.error(f'{title} 下载失败') - continue - else: - # 检查是否在订阅中 - subflag = self.subscribechain.exists(mediainfo=mediainfo, meta=meta) - if subflag: - logger.info(f'{mediainfo.title_year} {meta.season} 正在订阅中') - continue - # 添加订阅 - self.subscribechain.add(title=mediainfo.title, - year=mediainfo.year, - mtype=mediainfo.type, - tmdbid=mediainfo.tmdb_id, - season=meta.begin_season, - exist_ok=True, - username="RSS订阅") - # 存储历史记录 - history.append({ - "title": f"{mediainfo.title} {meta.season}", - "key": f"{title}", - "type": mediainfo.type.value, - "year": mediainfo.year, - "poster": mediainfo.get_poster_image(), - "overview": mediainfo.overview, - "tmdbid": mediainfo.tmdb_id, - "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - }) - except Exception as err: - logger.error(f'刷新RSS数据出错:{str(err)}') - logger.info(f"RSS {url} 刷新完成") - # 保存历史记录 - self.save_data('history', history) - # 缓存只清理一次 - self._clearflag = False diff --git a/app/plugins/siterefresh/__init__.py b/app/plugins/siterefresh/__init__.py deleted file mode 100644 index b1613b34..00000000 --- a/app/plugins/siterefresh/__init__.py +++ /dev/null @@ -1,227 +0,0 @@ -from typing import Any, List, Dict, Tuple - -from app.chain.site import SiteChain -from app.core.event import eventmanager -from app.db.site_oper import SiteOper -from app.log import logger -from app.plugins import _PluginBase -from app.schemas.types import EventType, NotificationType -from app.utils.string import StringUtils - - -class SiteRefresh(_PluginBase): - # 插件名称 - plugin_name = "站点自动更新" - # 插件描述 - plugin_desc = "自动登录获取站点Cookie和User-Agent。" - # 插件图标 - plugin_icon = "login.png" - # 主题色 - plugin_color = "#99b3ff" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "siterefresh_" - # 加载顺序 - plugin_order = 2 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - siteoper: SiteOper = None - - # 配置属性 - _enabled: bool = False - _notify: bool = False - """ - 格式 - 站点domain|用户名|用户密码 - """ - _siteconf: list = [] - - def init_plugin(self, config: dict = None): - self.siteoper = SiteOper() - # 配置 - if config: - self._enabled = config.get("enabled") - self._notify = config.get("notify") - self._siteconf = str(config.get("siteconf")).split('\n') - - def get_state(self) -> bool: - return self._enabled - - @eventmanager.register(EventType.SiteLogin) - def site_login(self, event): - """ - 开始站点登录 - """ - if not self.get_state(): - return - - # 站点id - site_id = event.event_data.get("site_id") - if not site_id: - logger.error(f"未获取到site_id") - return - - site = self.siteoper.get(site_id) - if not site: - logger.error(f"未获取到site_id {site_id} 对应的站点数据") - return - - site_name = site.name - logger.info(f"开始尝试登录站点 {site_name}") - siteurl, siteuser, sitepwd = None, None, None - # 判断site是否已配置用户名密码 - for site_conf in self._siteconf: - if not site_conf: - continue - site_confs = str(site_conf).split("|") - if len(site_confs) == 3: - siteurl = site_confs[0] - siteuser = site_confs[1] - sitepwd = site_confs[2] - else: - logger.error(f"{site_conf}配置有误,已跳过") - continue - - # 判断是否是目标域名 - if str(siteurl) in StringUtils.get_url_domain(site.url): - # 找到目标域名配置,跳出循环 - break - - # 开始登录更新cookie和ua - if siteurl and siteuser and sitepwd: - state, messages = SiteChain().update_cookie(site_info=site, - username=siteuser, - password=sitepwd) - if state: - logger.info(f"站点{site_name}自动更新Cookie和Ua成功") - else: - logger.error(f"站点{site_name}自动更新Cookie和Ua失败") - - if self._notify: - self.post_message(mtype=NotificationType.SiteMessage, - title=f"站点 {site_name} Cookie已失效。", - text=f"自动更新Cookie和Ua{'成功' if state else '失败'}") - else: - logger.error(f"未获取到站点{site_name}配置,已跳过") - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '开启通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'siteconf', - 'label': '站点配置', - 'rows': 5, - 'placeholder': '每一行一个站点,配置方式:\n' - '域名domain|用户名|用户密码\n' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '站点签到提示Cookie过期时自动触发。' - '不支持开启两步认证的站点。' - '不是所有站点都支持,失败请手动更新。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": False, - "siteconf": "" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/app/plugins/sitestatistic/__init__.py b/app/plugins/sitestatistic/__init__.py deleted file mode 100644 index 9ba89c26..00000000 --- a/app/plugins/sitestatistic/__init__.py +++ /dev/null @@ -1,1172 +0,0 @@ -import re -import warnings -from datetime import datetime, timedelta -from multiprocessing.dummy import Pool as ThreadPool -from threading import Lock -from typing import Optional, Any, List, Dict, Tuple - -import pytz -import requests -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from ruamel.yaml import CommentedMap - -from app import schemas -from app.core.config import settings -from app.core.event import Event -from app.core.event import eventmanager -from app.db.models.site import Site -from app.db.site_oper import SiteOper -from app.helper.browser import PlaywrightHelper -from app.helper.module import ModuleHelper -from app.helper.sites import SitesHelper -from app.log import logger -from app.plugins import _PluginBase -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo -from app.schemas.types import EventType, NotificationType -from app.utils.http import RequestUtils -from app.utils.string import StringUtils -from app.utils.timer import TimerUtils - -warnings.filterwarnings("ignore", category=FutureWarning) - -lock = Lock() - - -class SiteStatistic(_PluginBase): - # 插件名称 - plugin_name = "站点数据统计" - # 插件描述 - plugin_desc = "自动统计和展示站点数据。" - # 插件图标 - plugin_icon = "statistic.png" - # 主题色 - plugin_color = "#324A5E" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "lightolly" - # 作者主页 - author_url = "https://github.com/lightolly" - # 插件配置项ID前缀 - plugin_config_prefix = "sitestatistic_" - # 加载顺序 - plugin_order = 1 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - sites = None - siteoper = None - _scheduler: Optional[BackgroundScheduler] = None - _last_update_time: Optional[datetime] = None - _sites_data: dict = {} - _site_schema: List[ISiteUserInfo] = None - - # 配置属性 - _enabled: bool = False - _onlyonce: bool = False - _cron: str = "" - _notify: bool = False - _queue_cnt: int = 5 - _statistic_type: str = None - _statistic_sites: list = [] - - def init_plugin(self, config: dict = None): - self.sites = SitesHelper() - self.siteoper = SiteOper() - # 停止现有任务 - self.stop_service() - - # 配置 - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._queue_cnt = config.get("queue_cnt") - self._statistic_type = config.get("statistic_type") or "all" - self._statistic_sites = config.get("statistic_sites") or [] - - # 过滤掉已删除的站点 - all_sites = [site.id for site in self.siteoper.list_order_by_pri()] + [site.get("id") for site in - self.__custom_sites()] - self._statistic_sites = [site_id for site_id in all_sites if site_id in self._statistic_sites] - self.__update_config() - - if self._enabled or self._onlyonce: - # 加载模块 - self._site_schema = ModuleHelper.load('app.plugins.sitestatistic.siteuserinfo', - filter_func=lambda _, obj: hasattr(obj, 'schema')) - - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - - self._site_schema.sort(key=lambda x: x.order) - # 站点上一次更新时间 - self._last_update_time = None - # 站点数据 - self._sites_data = {} - - # 立即运行一次 - if self._onlyonce: - logger.info(f"站点数据统计服务启动,立即运行一次") - self._scheduler.add_job(self.refresh_all_site_data, 'date', - run_date=datetime.now( - tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3) - ) - # 关闭一次性开关 - self._onlyonce = False - - # 保存配置 - self.__update_config() - - # 周期运行 - if self._enabled and self._cron: - try: - self._scheduler.add_job(func=self.refresh_all_site_data, - trigger=CronTrigger.from_crontab(self._cron), - name="站点数据统计") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - # 推送实时消息 - self.systemmessage.put(f"执行周期配置错误:{str(err)}") - else: - triggers = TimerUtils.random_scheduler(num_executions=1, - begin_hour=0, - end_hour=1, - min_interval=1, - max_interval=60) - for trigger in triggers: - self._scheduler.add_job(self.refresh_all_site_data, "cron", - hour=trigger.hour, minute=trigger.minute, - name="站点数据统计") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - """ - 定义远程控制命令 - :return: 命令关键字、事件、描述、附带数据 - """ - return [{ - "cmd": "/site_statistic", - "event": EventType.SiteStatistic, - "desc": "站点数据统计", - "category": "站点", - "data": {} - }] - - def get_api(self) -> List[Dict[str, Any]]: - """ - 获取插件API - [{ - "path": "/xx", - "endpoint": self.xxx, - "methods": ["GET", "POST"], - "summary": "API说明" - }] - """ - return [{ - "path": "/refresh_by_domain", - "endpoint": self.refresh_by_domain, - "methods": ["GET"], - "summary": "刷新站点数据", - "description": "刷新对应域名的站点数据", - }] - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - # 站点的可选项(内置站点 + 自定义站点) - customSites = self.__custom_sites() - - site_options = ([{"title": site.name, "value": site.id} - for site in self.siteoper.list_order_by_pri()] - + [{"title": site.get("name"), "value": site.get("id")} - for site in customSites]) - - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '5位cron表达式,留空自动' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'queue_cnt', - 'label': '队列数量' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'statistic_type', - 'label': '统计类型', - 'items': [ - {'title': '全量', 'value': 'all'}, - {'title': '增量', 'value': 'add'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'statistic_sites', - 'label': '统计站点', - 'items': site_options - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "notify": True, - "cron": "5 1 * * *", - "queue_cnt": 5, - "statistic_type": "all", - "statistic_sites": [] - } - - def get_page(self) -> List[dict]: - """ - 拼装插件详情页面,需要返回页面配置,同时附带数据 - """ - # - # 最近两天的日期数组 - date_list = [(datetime.now() - timedelta(days=i)).date() for i in range(2)] - # 最近一天的签到数据 - stattistic_data: Dict[str, Dict[str, Any]] = {} - for day in date_list: - current_day = day.strftime("%Y-%m-%d") - stattistic_data = self.get_data(current_day) - if stattistic_data: - break - if not stattistic_data: - return [ - { - 'component': 'div', - 'text': '暂无数据', - 'props': { - 'class': 'text-center', - } - } - ] - # 数据按时间降序排序 - stattistic_data = dict(sorted(stattistic_data.items(), - key=lambda item: item[1].get('upload') or 0, - reverse=True)) - # 总上传量 - total_upload = sum([data.get("upload") - for data in stattistic_data.values() if data.get("upload")]) - # 总下载量 - total_download = sum([data.get("download") - for data in stattistic_data.values() if data.get("download")]) - # 总做种数 - total_seed = sum([data.get("seeding") - for data in stattistic_data.values() if data.get("seeding")]) - # 总做种体积 - total_seed_size = sum([data.get("seeding_size") - for data in stattistic_data.values() if data.get("seeding_size")]) - - # 站点数据明细 - site_trs = [ - { - 'component': 'tr', - 'props': { - 'class': 'text-sm' - }, - 'content': [ - { - 'component': 'td', - 'props': { - 'class': 'whitespace-nowrap break-keep text-high-emphasis' - }, - 'text': site - }, - { - 'component': 'td', - 'text': data.get("username") - }, - { - 'component': 'td', - 'text': data.get("user_level") - }, - { - 'component': 'td', - 'props': { - 'class': 'text-success' - }, - 'text': StringUtils.str_filesize(data.get("upload")) - }, - { - 'component': 'td', - 'props': { - 'class': 'text-error' - }, - 'text': StringUtils.str_filesize(data.get("download")) - }, - { - 'component': 'td', - 'text': data.get('ratio') - }, - { - 'component': 'td', - 'text': '{:,.1f}'.format(data.get('bonus') or 0) - }, - { - 'component': 'td', - 'text': data.get('seeding') - }, - { - 'component': 'td', - 'text': StringUtils.str_filesize(data.get('seeding_size')) - } - ] - } for site, data in stattistic_data.items() if not data.get("err_msg") - ] - - # 拼装页面 - return [ - { - 'component': 'VRow', - 'content': [ - # 总上传量 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/upload.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总上传量' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': StringUtils.str_filesize(total_upload) - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 总下载量 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/download.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总下载量' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': StringUtils.str_filesize(total_download) - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 总做种数 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/seed.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总做种数' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': f'{"{:,}".format(total_seed)}' - } - ] - } - ] - } - ] - } - ] - }, - ] - }, - # 总做种体积 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 3, - 'sm': 6 - }, - 'content': [ - { - 'component': 'VCard', - 'props': { - 'variant': 'tonal', - }, - 'content': [ - { - 'component': 'VCardText', - 'props': { - 'class': 'd-flex align-center', - }, - 'content': [ - { - 'component': 'VAvatar', - 'props': { - 'rounded': True, - 'variant': 'text', - 'class': 'me-3' - }, - 'content': [ - { - 'component': 'VImg', - 'props': { - 'src': '/plugin_icon/database.png' - } - } - ] - }, - { - 'component': 'div', - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-caption' - }, - 'text': '总做种体积' - }, - { - 'component': 'div', - 'props': { - 'class': 'd-flex align-center flex-wrap' - }, - 'content': [ - { - 'component': 'span', - 'props': { - 'class': 'text-h6' - }, - 'text': StringUtils.str_filesize(total_seed_size) - } - ] - } - ] - } - ] - } - ] - } - ] - }, - # 各站点数据明细 - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VTable', - 'props': { - 'hover': True - }, - 'content': [ - { - 'component': 'thead', - 'content': [ - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '站点' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '用户名' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '用户等级' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '上传量' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '下载量' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '分享率' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '魔力值' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '做种数' - }, - { - 'component': 'th', - 'props': { - 'class': 'text-start ps-4' - }, - 'text': '做种体积' - } - ] - }, - { - 'component': 'tbody', - 'content': site_trs - } - ] - } - ] - } - ] - } - ] - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) - - def __build_class(self, html_text: str) -> Any: - for site_schema in self._site_schema: - try: - if site_schema.match(html_text): - return site_schema - except Exception as e: - logger.error(f"站点匹配失败 {str(e)}") - return None - - def build(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]: - """ - 构建站点信息 - """ - site_cookie = site_info.get("cookie") - if not site_cookie: - return None - site_name = site_info.get("name") - url = site_info.get("url") - proxy = site_info.get("proxy") - ua = site_info.get("ua") - # 会话管理 - with requests.Session() as session: - proxies = settings.PROXY if proxy else None - proxy_server = settings.PROXY_SERVER if proxy else None - render = site_info.get("render") - - logger.debug(f"站点 {site_name} url={url} site_cookie={site_cookie} ua={ua}") - if render: - # 演染模式 - html_text = PlaywrightHelper().get_page_source(url=url, - cookies=site_cookie, - ua=ua, - proxies=proxy_server) - else: - # 普通模式 - res = RequestUtils(cookies=site_cookie, - session=session, - ua=ua, - proxies=proxies - ).get_res(url=url) - if res and res.status_code == 200: - if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE): - res.encoding = "utf-8" - else: - res.encoding = res.apparent_encoding - html_text = res.text - # 第一次登录反爬 - if html_text.find("title") == -1: - i = html_text.find("window.location") - if i == -1: - return None - tmp_url = url + html_text[i:html_text.find(";")] \ - .replace("\"", "") \ - .replace("+", "") \ - .replace(" ", "") \ - .replace("window.location=", "") - res = RequestUtils(cookies=site_cookie, - session=session, - ua=ua, - proxies=proxies - ).get_res(url=tmp_url) - if res and res.status_code == 200: - if "charset=utf-8" in res.text or "charset=UTF-8" in res.text: - res.encoding = "UTF-8" - else: - res.encoding = res.apparent_encoding - html_text = res.text - if not html_text: - return None - else: - logger.error("站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code)) - return None - - # 兼容假首页情况,假首页通常没有 schemas.Response: - """ - 刷新一个站点数据,可由API调用 - """ - site_info = self.sites.get_indexer(domain) - if site_info: - site_data = self.__refresh_site_data(site_info) - if site_data: - return schemas.Response( - success=True, - message=f"站点 {domain} 刷新成功", - data=site_data.to_dict() - ) - return schemas.Response( - success=False, - message=f"站点 {domain} 刷新数据失败,未获取到数据" - ) - return schemas.Response( - success=False, - message=f"站点 {domain} 不存在" - ) - - def __refresh_site_data(self, site_info: CommentedMap) -> Optional[ISiteUserInfo]: - """ - 更新单个site 数据信息 - :param site_info: - :return: - """ - site_name = site_info.get('name') - site_url = site_info.get('url') - if not site_url: - return None - unread_msg_notify = True - try: - site_user_info: ISiteUserInfo = self.build(site_info=site_info) - if site_user_info: - logger.debug(f"站点 {site_name} 开始以 {site_user_info.site_schema()} 模型解析") - # 开始解析 - site_user_info.parse() - logger.debug(f"站点 {site_name} 解析完成") - - # 获取不到数据时,仅返回错误信息,不做历史数据更新 - if site_user_info.err_msg: - self._sites_data.update({site_name: {"err_msg": site_user_info.err_msg}}) - return None - - # 发送通知,存在未读消息 - self.__notify_unread_msg(site_name, site_user_info, unread_msg_notify) - - # 分享率接近1时,发送消息提醒 - if site_user_info.ratio and float(site_user_info.ratio) < 1: - self.post_message(mtype=NotificationType.SiteMessage, - title=f"【站点分享率低预警】", - text=f"站点 {site_user_info.site_name} 分享率 {site_user_info.ratio},请注意!") - - self._sites_data.update( - { - site_name: { - "upload": site_user_info.upload, - "username": site_user_info.username, - "user_level": site_user_info.user_level, - "join_at": site_user_info.join_at, - "download": site_user_info.download, - "ratio": site_user_info.ratio, - "seeding": site_user_info.seeding, - "seeding_size": site_user_info.seeding_size, - "leeching": site_user_info.leeching, - "bonus": site_user_info.bonus, - "url": site_url, - "err_msg": site_user_info.err_msg, - "message_unread": site_user_info.message_unread - } - }) - return site_user_info - - except Exception as e: - logger.error(f"站点 {site_name} 获取流量数据失败:{str(e)}") - return None - - def __notify_unread_msg(self, site_name: str, site_user_info: ISiteUserInfo, unread_msg_notify: bool): - if site_user_info.message_unread <= 0: - return - if self._sites_data.get(site_name, {}).get('message_unread') == site_user_info.message_unread: - return - if not unread_msg_notify: - return - - # 解析出内容,则发送内容 - if len(site_user_info.message_unread_contents) > 0: - for head, date, content in site_user_info.message_unread_contents: - msg_title = f"【站点 {site_user_info.site_name} 消息】" - msg_text = f"时间:{date}\n标题:{head}\n内容:\n{content}" - self.post_message(mtype=NotificationType.SiteMessage, title=msg_title, text=msg_text) - else: - self.post_message(mtype=NotificationType.SiteMessage, - title=f"站点 {site_user_info.site_name} 收到 " - f"{site_user_info.message_unread} 条新消息,请登陆查看") - - @eventmanager.register(EventType.SiteStatistic) - def refresh(self, event: Event): - """ - 刷新站点数据 - """ - if event: - logger.info("收到命令,开始刷新站点数据 ...") - self.post_message(channel=event.event_data.get("channel"), - title="开始刷新站点数据 ...", - userid=event.event_data.get("user")) - self.refresh_all_site_data() - if event: - self.post_message(channel=event.event_data.get("channel"), - title="站点数据刷新完成!", userid=event.event_data.get("user")) - - def refresh_all_site_data(self): - """ - 多线程刷新站点下载上传量,默认间隔6小时 - """ - if not self.sites.get_indexers(): - return - - logger.info("开始刷新站点数据 ...") - - with lock: - - all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites() - # 没有指定站点,默认使用全部站点 - if not self._statistic_sites: - refresh_sites = all_sites - else: - refresh_sites = [site for site in all_sites if - site.get("id") in self._statistic_sites] - if not refresh_sites: - return - - # 并发刷新 - with ThreadPool(min(len(refresh_sites), int(self._queue_cnt or 5))) as p: - p.map(self.__refresh_site_data, refresh_sites) - - # 通知刷新完成 - if self._notify: - yesterday_sites_data = {} - # 增量数据 - if self._statistic_type == "add": - last_update_time = self.get_data("last_update_time") - if last_update_time: - yesterday_sites_data = self.get_data(last_update_time) or {} - - messages = [] - # 按照上传降序排序 - sites = self._sites_data.keys() - uploads = [self._sites_data[site].get("upload") or 0 if not yesterday_sites_data.get(site) else - (self._sites_data[site].get("upload") or 0) - ( - yesterday_sites_data[site].get("upload") or 0) for site in sites] - downloads = [self._sites_data[site].get("download") or 0 if not yesterday_sites_data.get(site) else - (self._sites_data[site].get("download") or 0) - ( - yesterday_sites_data[site].get("download") or 0) for site in sites] - data_list = sorted(list(zip(sites, uploads, downloads)), - key=lambda x: x[1], - reverse=True) - # 总上传 - incUploads = 0 - # 总下载 - incDownloads = 0 - for data in data_list: - site = data[0] - upload = int(data[1]) - download = int(data[2]) - if upload > 0 or download > 0: - incUploads += int(upload) - incDownloads += int(download) - messages.append(f"【{site}】\n" - f"上传量:{StringUtils.str_filesize(upload)}\n" - f"下载量:{StringUtils.str_filesize(download)}\n" - f"————————————") - - if incDownloads or incUploads: - messages.insert(0, f"【汇总】\n" - f"总上传:{StringUtils.str_filesize(incUploads)}\n" - f"总下载:{StringUtils.str_filesize(incDownloads)}\n" - f"————————————") - self.post_message(mtype=NotificationType.SiteMessage, - title="站点数据统计", text="\n".join(messages)) - - # 获取今天的日期 - key = datetime.now().strftime('%Y-%m-%d') - # 保存数据 - self.save_data(key, self._sites_data) - - # 更新时间 - self.save_data("last_update_time", key) - logger.info("站点数据刷新完成") - - def __custom_sites(self) -> List[Any]: - custom_sites = [] - custom_sites_config = self.get_config("CustomSites") - if custom_sites_config and custom_sites_config.get("enabled"): - custom_sites = custom_sites_config.get("sites") - return custom_sites - - def __update_config(self): - self.update_config({ - "enabled": self._enabled, - "onlyonce": self._onlyonce, - "cron": self._cron, - "notify": self._notify, - "queue_cnt": self._queue_cnt, - "statistic_type": self._statistic_type, - "statistic_sites": self._statistic_sites, - }) - - @eventmanager.register(EventType.SiteDeleted) - def site_deleted(self, event): - """ - 删除对应站点选中 - """ - site_id = event.event_data.get("site_id") - config = self.get_config() - if config: - statistic_sites = config.get("statistic_sites") - if statistic_sites: - if isinstance(statistic_sites, str): - statistic_sites = [statistic_sites] - - # 删除对应站点 - if site_id: - statistic_sites = [site for site in statistic_sites if int(site) != int(site_id)] - else: - # 清空 - statistic_sites = [] - - # 若无站点,则停止 - if len(statistic_sites) == 0: - self._enabled = False - - self._statistic_sites = statistic_sites - # 保存配置 - self.__update_config() diff --git a/app/plugins/sitestatistic/siteuserinfo/__init__.py b/app/plugins/sitestatistic/siteuserinfo/__init__.py deleted file mode 100644 index e8e218f8..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/__init__.py +++ /dev/null @@ -1,338 +0,0 @@ -# -*- coding: utf-8 -*- -import json -import re -from abc import ABCMeta, abstractmethod -from enum import Enum -from typing import Optional -from urllib.parse import urljoin, urlsplit - -from requests import Session - -from app.core.config import settings -from app.helper.cloudflare import under_challenge -from app.log import logger -from app.utils.http import RequestUtils -from app.utils.site import SiteUtils - -SITE_BASE_ORDER = 1000 - - -# 站点框架 -class SiteSchema(Enum): - DiscuzX = "Discuz!" - Gazelle = "Gazelle" - Ipt = "IPTorrents" - NexusPhp = "NexusPhp" - NexusProject = "NexusProject" - NexusRabbit = "NexusRabbit" - NexusHhanclub = "NexusHhanclub" - SmallHorse = "Small Horse" - Unit3d = "Unit3d" - TorrentLeech = "TorrentLeech" - FileList = "FileList" - TNode = "TNode" - - -class ISiteUserInfo(metaclass=ABCMeta): - # 站点模版 - schema = SiteSchema.NexusPhp - # 站点解析时判断顺序,值越小越先解析 - order = SITE_BASE_ORDER - - def __init__(self, site_name: str, - url: str, - site_cookie: str, - index_html: str, - session: Session = None, - ua: str = None, - emulate: bool = False, - proxy: bool = None): - super().__init__() - # 站点信息 - self.site_name = None - self.site_url = None - # 用户信息 - self.username = None - self.userid = None - # 未读消息 - self.message_unread = 0 - self.message_unread_contents = [] - - # 流量信息 - self.upload = 0 - self.download = 0 - self.ratio = 0 - - # 种子信息 - self.seeding = 0 - self.leeching = 0 - self.uploaded = 0 - self.completed = 0 - self.incomplete = 0 - self.seeding_size = 0 - self.leeching_size = 0 - self.uploaded_size = 0 - self.completed_size = 0 - self.incomplete_size = 0 - # 做种人数, 种子大小 - self.seeding_info = [] - - # 用户详细信息 - self.user_level = None - self.join_at = None - self.bonus = 0.0 - - # 错误信息 - self.err_msg = None - # 内部数据 - self._base_url = None - self._site_cookie = None - self._index_html = None - self._addition_headers = None - - # 站点页面 - self._brief_page = "index.php" - self._user_detail_page = "userdetails.php?id=" - self._user_traffic_page = "index.php" - self._torrent_seeding_page = "getusertorrentlistajax.php?userid=" - self._user_mail_unread_page = "messages.php?action=viewmailbox&box=1&unread=yes" - self._sys_mail_unread_page = "messages.php?action=viewmailbox&box=-2&unread=yes" - self._torrent_seeding_params = None - self._torrent_seeding_headers = None - - split_url = urlsplit(url) - self.site_name = site_name - self.site_url = url - self._base_url = f"{split_url.scheme}://{split_url.netloc}" - self._site_cookie = site_cookie - self._index_html = index_html - self._session = session if session else None - self._ua = ua - - self._emulate = emulate - self._proxy = proxy - - def site_schema(self) -> SiteSchema: - """ - 站点解析模型 - :return: 站点解析模型 - """ - return self.schema - - @classmethod - def match(cls, html_text: str) -> bool: - """ - 是否匹配当前解析模型 - :param html_text: 站点首页html - :return: 是否匹配 - """ - pass - - def parse(self): - """ - 解析站点信息 - :return: - """ - if not self._parse_logged_in(self._index_html): - return - - self._parse_site_page(self._index_html) - self._parse_user_base_info(self._index_html) - self._pase_unread_msgs() - if self._user_traffic_page: - self._parse_user_traffic_info(self._get_page_content(urljoin(self._base_url, self._user_traffic_page))) - if self._user_detail_page: - self._parse_user_detail_info(self._get_page_content(urljoin(self._base_url, self._user_detail_page))) - - self._parse_seeding_pages() - self.seeding_info = json.dumps(self.seeding_info) - - def _pase_unread_msgs(self): - """ - 解析所有未读消息标题和内容 - :return: - """ - unread_msg_links = [] - if self.message_unread > 0: - links = {self._user_mail_unread_page, self._sys_mail_unread_page} - for link in links: - if not link: - continue - - msg_links = [] - next_page = self._parse_message_unread_links( - self._get_page_content(urljoin(self._base_url, link)), msg_links) - while next_page: - next_page = self._parse_message_unread_links( - self._get_page_content(urljoin(self._base_url, next_page)), msg_links) - - unread_msg_links.extend(msg_links) - - for msg_link in unread_msg_links: - logger.debug(f"{self.site_name} 信息链接 {msg_link}") - head, date, content = self._parse_message_content(self._get_page_content(urljoin(self._base_url, msg_link))) - logger.debug(f"{self.site_name} 标题 {head} 时间 {date} 内容 {content}") - self.message_unread_contents.append((head, date, content)) - - def _parse_seeding_pages(self): - if self._torrent_seeding_page: - # 第一页 - next_page = self._parse_user_torrent_seeding_info( - self._get_page_content(urljoin(self._base_url, self._torrent_seeding_page), - self._torrent_seeding_params, - self._torrent_seeding_headers)) - - # 其他页处理 - while next_page: - next_page = self._parse_user_torrent_seeding_info( - self._get_page_content(urljoin(urljoin(self._base_url, self._torrent_seeding_page), next_page), - self._torrent_seeding_params, - self._torrent_seeding_headers), - multi_page=True) - - @staticmethod - def _prepare_html_text(html_text): - """ - 处理掉HTML中的干扰部分 - """ - return re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_text)) - - @abstractmethod - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - """ - 获取未阅读消息链接 - :param html_text: - :return: - """ - pass - - def _get_page_content(self, url: str, params: dict = None, headers: dict = None): - """ - :param url: 网页地址 - :param params: post参数 - :param headers: 额外的请求头 - :return: - """ - req_headers = None - proxies = settings.PROXY if self._proxy else None - if self._ua or headers or self._addition_headers: - req_headers = {} - if headers: - req_headers.update(headers) - - req_headers.update({ - "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", - "User-Agent": f"{self._ua}" - }) - - if self._addition_headers: - req_headers.update(self._addition_headers) - - if params: - res = RequestUtils(cookies=self._site_cookie, - session=self._session, - timeout=60, - proxies=proxies, - headers=req_headers).post_res(url=url, data=params) - else: - res = RequestUtils(cookies=self._site_cookie, - session=self._session, - timeout=60, - proxies=proxies, - headers=req_headers).get_res(url=url) - if res is not None and res.status_code in (200, 500, 403): - # 如果cloudflare 有防护,尝试使用浏览器仿真 - if under_challenge(res.text): - logger.warn( - f"{self.site_name} 检测到Cloudflare,请更新Cookie和UA") - return "" - if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE): - res.encoding = "utf-8" - else: - res.encoding = res.apparent_encoding - return res.text - - return "" - - @abstractmethod - def _parse_site_page(self, html_text: str): - """ - 解析站点相关信息页面 - :param html_text: - :return: - """ - pass - - @abstractmethod - def _parse_user_base_info(self, html_text: str): - """ - 解析用户基础信息 - :param html_text: - :return: - """ - pass - - def _parse_logged_in(self, html_text): - """ - 解析用户是否已经登陆 - :param html_text: - :return: True/False - """ - logged_in = SiteUtils.is_logged_in(html_text) - if not logged_in: - self.err_msg = "未检测到已登陆,请检查cookies是否过期" - logger.warn(f"{self.site_name} 未登录,跳过后续操作") - - return logged_in - - @abstractmethod - def _parse_user_traffic_info(self, html_text: str): - """ - 解析用户的上传,下载,分享率等信息 - :param html_text: - :return: - """ - pass - - @abstractmethod - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 解析用户的做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - pass - - @abstractmethod - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户的详细信息 - 加入时间/等级/魔力值等 - :param html_text: - :return: - """ - pass - - @abstractmethod - def _parse_message_content(self, html_text): - """ - 解析短消息内容 - :param html_text: - :return: head: message, date: time, content: message content - """ - pass - - def to_dict(self): - """ - 转化为字典 - """ - attributes = [ - attr for attr in dir(self) - if not callable(getattr(self, attr)) and not attr.startswith("_") - ] - return { - attr: getattr(self, attr).value - if isinstance(getattr(self, attr), SiteSchema) - else getattr(self, attr) for attr in attributes - } diff --git a/app/plugins/sitestatistic/siteuserinfo/discuz.py b/app/plugins/sitestatistic/siteuserinfo/discuz.py deleted file mode 100644 index 03fbb81a..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/discuz.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class DiscuzUserInfo(ISiteUserInfo): - schema = SiteSchema.DiscuzX - order = SITE_BASE_ORDER + 10 - - @classmethod - def match(cls, html_text: str) -> bool: - html = etree.HTML(html_text) - if not html: - return False - - printable_text = html.xpath("string(.)") if html else "" - return 'Powered by Discuz!' in printable_text - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - user_info = html.xpath('//a[contains(@href, "&uid=")]') - if user_info: - user_id_match = re.search(r"&uid=(\d+)", user_info[0].attrib['href']) - if user_id_match and user_id_match.group().strip(): - self.userid = user_id_match.group(1) - self._torrent_seeding_page = f"forum.php?&mod=torrents&cat_5up=on" - self._user_detail_page = user_info[0].attrib['href'] - self.username = user_info[0].text.strip() - - def _parse_site_page(self, html_text: str): - # TODO - pass - - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户额外信息,加入时间,等级 - :param html_text: - :return: - """ - html = etree.HTML(html_text) - if not html: - return None - - # 用户等级 - user_levels_text = html.xpath('//a[contains(@href, "usergroup")]/text()') - if user_levels_text: - self.user_level = user_levels_text[-1].strip() - - # 加入日期 - join_at_text = html.xpath('//li[em[text()="注册时间"]]/text()') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) - - # 分享率 - ratio_text = html.xpath('//li[contains(.//text(), "分享率")]//text()') - if ratio_text: - ratio_match = re.search(r"\(([\d,.]+)\)", ratio_text[0]) - if ratio_match and ratio_match.group(1).strip(): - self.bonus = StringUtils.str_float(ratio_match.group(1)) - - # 积分 - bouns_text = html.xpath('//li[em[text()="积分"]]/text()') - if bouns_text: - self.bonus = StringUtils.str_float(bouns_text[0].strip()) - - # 上传 - upload_text = html.xpath('//li[em[contains(text(),"上传量")]]/text()') - if upload_text: - self.upload = StringUtils.num_filesize(upload_text[0].strip().split('/')[-1]) - - # 下载 - download_text = html.xpath('//li[em[contains(text(),"下载量")]]/text()') - if download_text: - self.download = StringUtils.num_filesize(download_text[0].strip().split('/')[-1]) - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 3 - seeders_col = 4 - # 搜索size列 - if html.xpath('//tr[position()=1]/td[.//img[@class="size"] and .//img[@alt="size"]]'): - size_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="size"] ' - 'and .//img[@alt="size"]]/preceding-sibling::td')) + 1 - # 搜索seeders列 - if html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] and .//img[@alt="seeders"]]'): - seeders_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] ' - 'and .//img[@alt="seeders"]]/preceding-sibling::td')) + 1 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]') - seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i]) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href') - if next_page_text: - next_page = next_page_text[-1].strip() - - return next_page - - def _parse_user_traffic_info(self, html_text: str): - pass - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/file_list.py b/app/plugins/sitestatistic/siteuserinfo/file_list.py deleted file mode 100644 index 611a4dd0..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/file_list.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class FileListSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.FileList - order = SITE_BASE_ORDER + 50 - - @classmethod - def match(cls, html_text: str) -> bool: - html = etree.HTML(html_text) - if not html: - return False - - printable_text = html.xpath("string(.)") if html else "" - return 'Powered by FileList' in printable_text - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = user_detail.group(1) - - self._torrent_seeding_page = f"snatchlist.php?id={self.userid}&action=torrents&type=seeding" - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()') - if ret: - self.username = str(ret[0]) - - def _parse_user_traffic_info(self, html_text: str): - """ - 上传/下载/分享率 [做种数/魔力值] - :param html_text: - :return: - """ - return - - def _parse_user_detail_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - upload_html = html.xpath('//table//tr/td[text()="Uploaded"]/following-sibling::td//text()') - if upload_html: - self.upload = StringUtils.num_filesize(upload_html[0]) - download_html = html.xpath('//table//tr/td[text()="Downloaded"]/following-sibling::td//text()') - if download_html: - self.download = StringUtils.num_filesize(download_html[0]) - - self.ratio = 0 if self.download == 0 else self.upload / self.download - - user_level_html = html.xpath('//table//tr/td[text()="Class"]/following-sibling::td//text()') - if user_level_html: - self.user_level = user_level_html[0].strip() - - join_at_html = html.xpath('//table//tr/td[contains(text(), "Join")]/following-sibling::td//text()') - if join_at_html: - self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip()) - - bonus_html = html.xpath('//a[contains(@href, "shop.php")]') - if bonus_html: - self.bonus = StringUtils.str_float(bonus_html[0].xpath("string(.)").strip()) - pass - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 6 - seeders_col = 7 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//table/tr[position()>1]/td[{size_col}]') - seeding_seeders = html.xpath(f'//table/tr[position()>1]/td[{seeders_col}]') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - - return next_page - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/gazelle.py b/app/plugins/sitestatistic/siteuserinfo/gazelle.py deleted file mode 100644 index ae2de5e5..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/gazelle.py +++ /dev/null @@ -1,163 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class GazelleSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.Gazelle - order = SITE_BASE_ORDER - - @classmethod - def match(cls, html_text: str) -> bool: - html = etree.HTML(html_text) - if not html: - return False - - printable_text = html.xpath("string(.)") if html else "" - - return "Powered by Gazelle" in printable_text or "DIC Music" in printable_text - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - tmps = html.xpath('//a[contains(@href, "user.php?id=")]') - if tmps: - user_id_match = re.search(r"user.php\?id=(\d+)", tmps[0].attrib['href']) - if user_id_match and user_id_match.group().strip(): - self.userid = user_id_match.group(1) - self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}" - self._user_detail_page = f"user.php?id={self.userid}" - self.username = tmps[0].text.strip() - - tmps = html.xpath('//*[@id="header-uploaded-value"]/@data-value') - if tmps: - self.upload = StringUtils.num_filesize(tmps[0]) - else: - tmps = html.xpath('//li[@id="stats_seeding"]/span/text()') - if tmps: - self.upload = StringUtils.num_filesize(tmps[0]) - - tmps = html.xpath('//*[@id="header-downloaded-value"]/@data-value') - if tmps: - self.download = StringUtils.num_filesize(tmps[0]) - else: - tmps = html.xpath('//li[@id="stats_leeching"]/span/text()') - if tmps: - self.download = StringUtils.num_filesize(tmps[0]) - - self.ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) - - tmps = html.xpath('//a[contains(@href, "bonus.php")]/@data-tooltip') - if tmps: - bonus_match = re.search(r"([\d,.]+)", tmps[0]) - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1)) - else: - tmps = html.xpath('//a[contains(@href, "bonus.php")]') - if tmps: - bonus_text = tmps[0].xpath("string(.)") - bonus_match = re.search(r"([\d,.]+)", bonus_text) - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1)) - - def _parse_site_page(self, html_text: str): - # TODO - pass - - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户额外信息,加入时间,等级 - :param html_text: - :return: - """ - html = etree.HTML(html_text) - if not html: - return None - - # 用户等级 - user_levels_text = html.xpath('//*[@id="class-value"]/@data-value') - if user_levels_text: - self.user_level = user_levels_text[0].strip() - else: - user_levels_text = html.xpath('//li[contains(text(), "用户等级")]/text()') - if user_levels_text: - self.user_level = user_levels_text[0].split(':')[1].strip() - - # 加入日期 - join_at_text = html.xpath('//*[@id="join-date-value"]/@data-value') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) - else: - join_at_text = html.xpath( - '//div[contains(@class, "box_userinfo_stats")]//li[contains(text(), "加入时间")]/span/text()') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip()) - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 3 - # 搜索size列 - if html.xpath('//table[contains(@id, "torrent")]//tr[1]/td'): - size_col = len(html.xpath('//table[contains(@id, "torrent")]//tr[1]/td')) - 3 - # 搜索seeders列 - seeders_col = size_col + 2 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{size_col}]') - seeding_seeders = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{seeders_col}]/text()') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = int(seeding_seeders[i]) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - if multi_page: - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - else: - if not self.seeding: - self.seeding = page_seeding - if not self.seeding_size: - self.seeding_size = page_seeding_size - if not self.seeding_info: - self.seeding_info = page_seeding_info - - # 是否存在下页数据 - next_page = None - next_page_text = html.xpath('//a[contains(.//text(), "Next") or contains(.//text(), "下一页")]/@href') - if next_page_text: - next_page = next_page_text[-1].strip() - - return next_page - - def _parse_user_traffic_info(self, html_text: str): - # TODO - pass - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/ipt_project.py b/app/plugins/sitestatistic/siteuserinfo/ipt_project.py deleted file mode 100644 index 9eeb2178..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/ipt_project.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class IptSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.Ipt - order = SITE_BASE_ORDER + 35 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'IPTorrents' in html_text - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - tmps = html.xpath('//a[contains(@href, "/u/")]//text()') - tmps_id = html.xpath('//a[contains(@href, "/u/")]/@href') - if tmps: - self.username = str(tmps[-1]) - if tmps_id: - user_id_match = re.search(r"/u/(\d+)", tmps_id[0]) - if user_id_match and user_id_match.group().strip(): - self.userid = user_id_match.group(1) - self._user_detail_page = f"user.php?u={self.userid}" - self._torrent_seeding_page = f"peers?u={self.userid}" - - tmps = html.xpath('//div[@class = "stats"]/div/div') - if tmps: - self.upload = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[1]).strip()) - self.download = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[2]).strip()) - self.seeding = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[0]) - self.leeching = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[1]) - self.ratio = StringUtils.str_float(str(tmps[0].xpath('span/text()')[0]).strip().replace('-', '0')) - self.bonus = StringUtils.str_float(tmps[0].xpath('a')[3].xpath('text()')[0]) - - def _parse_site_page(self, html_text: str): - # TODO - pass - - def _parse_user_detail_info(self, html_text: str): - html = etree.HTML(html_text) - if not html: - return - - user_levels_text = html.xpath('//tr/th[text()="Class"]/following-sibling::td[1]/text()') - if user_levels_text: - self.user_level = user_levels_text[0].strip() - - # 加入日期 - join_at_text = html.xpath('//tr/th[text()="Join date"]/following-sibling::td[1]/text()') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0]) - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - html = etree.HTML(html_text) - if not html: - return - # seeding start - seeding_end_pos = 3 - if html.xpath('//tr/td[text() = "Leechers"]'): - seeding_end_pos = len(html.xpath('//tr/td[text() = "Leechers"]/../preceding-sibling::tr')) + 1 - seeding_end_pos = seeding_end_pos - 3 - - page_seeding = 0 - page_seeding_size = 0 - seeding_torrents = html.xpath('//tr/td[text() = "Seeders"]/../following-sibling::tr/td[position()=6]/text()') - if seeding_torrents: - page_seeding = seeding_end_pos - for per_size in seeding_torrents[:seeding_end_pos]: - if '(' in per_size and ')' in per_size: - per_size = per_size.split('(')[-1] - per_size = per_size.split(')')[0] - - page_seeding_size += StringUtils.num_filesize(per_size) - - self.seeding = page_seeding - self.seeding_size = page_seeding_size - - def _parse_user_traffic_info(self, html_text: str): - # TODO - pass - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/nexus_hhanclub.py b/app/plugins/sitestatistic/siteuserinfo/nexus_hhanclub.py deleted file mode 100644 index c85c96d2..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/nexus_hhanclub.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -import re - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema -from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo -from app.utils.string import StringUtils - - -class NexusHhanclubSiteUserInfo(NexusPhpSiteUserInfo): - schema = SiteSchema.NexusHhanclub - order = SITE_BASE_ORDER + 20 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'hhanclub.top' in html_text - - def _parse_user_traffic_info(self, html_text): - super()._parse_user_traffic_info(html_text) - - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - # 上传、下载、分享率 - upload_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", - html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[4]/text()')[0]) - download_match = re.search(r"[_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", - html.xpath('//*[@id="user-info-panel"]/div[2]/div[2]/div[5]/text()')[0]) - ratio_match = re.search(r"分享率][::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", - html.xpath('//*[@id="user-info-panel"]/div[2]/div[1]/div[1]/div/text()')[0]) - - # 计算分享率 - self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 - self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 - # 优先使用页面上的分享率 - calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) - self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( - ratio_match and ratio_match.group(1).strip()) else calc_ratio - - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户额外信息,加入时间,等级 - :param html_text: - :return: - """ - super()._parse_user_detail_info(html_text) - - html = etree.HTML(html_text) - if not html: - return - # 加入时间 - join_at_text = html.xpath('//*[@id="mainContent"]/div/div[2]/div[4]/div[3]/span[2]/text()[1]') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip()) - - def _get_user_level(self, html): - super()._get_user_level(html) - user_level_path = html.xpath('//*[@id="mainContent"]/div/div[2]/div[2]/div[4]/span[2]/img/@title') - if user_level_path: - self.user_level = user_level_path[0] diff --git a/app/plugins/sitestatistic/siteuserinfo/nexus_php.py b/app/plugins/sitestatistic/siteuserinfo/nexus_php.py deleted file mode 100644 index 8d5b6c96..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/nexus_php.py +++ /dev/null @@ -1,392 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.log import logger -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class NexusPhpSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.NexusPhp - order = SITE_BASE_ORDER * 2 - - @classmethod - def match(cls, html_text: str) -> bool: - """ - 默认使用NexusPhp解析 - :param html_text: - :return: - """ - return True - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = user_detail.group(1) - self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding" - else: - user_detail = re.search(r"(userdetails)", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = None - self._torrent_seeding_page = None - - def _parse_message_unread(self, html_text): - """ - 解析未读短消息数量 - :param html_text: - :return: - """ - html = etree.HTML(html_text) - if not html: - return - - message_labels = html.xpath('//a[@href="messages.php"]/..') - message_labels.extend(html.xpath('//a[contains(@href, "messages.php")]/..')) - if message_labels: - message_text = message_labels[0].xpath("string(.)") - - logger.debug(f"{self.site_name} 消息原始信息 {message_text}") - message_unread_match = re.findall(r"[^Date](信息箱\s*|\(|你有\xa0)(\d+)", message_text) - - if message_unread_match and len(message_unread_match[-1]) == 2: - self.message_unread = StringUtils.str_int(message_unread_match[-1][1]) - elif message_text.isdigit(): - self.message_unread = StringUtils.str_int(message_text) - - def _parse_user_base_info(self, html_text: str): - # 合并解析,减少额外请求调用 - self._parse_user_traffic_info(html_text) - self._user_traffic_page = None - - self._parse_message_unread(html_text) - - html = etree.HTML(html_text) - if not html: - return - - ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//b//text()') - if ret: - self.username = str(ret[0]) - return - ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()') - if ret: - self.username = str(ret[0]) - - ret = html.xpath('//a[contains(@href, "userdetails")]//strong//text()') - if ret: - self.username = str(ret[0]) - return - - def _parse_user_traffic_info(self, html_text): - html_text = self._prepare_html_text(html_text) - upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, - re.IGNORECASE) - self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 - download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, - re.IGNORECASE) - self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 - ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text) - # 计算分享率 - calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3) - # 优先使用页面上的分享率 - self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( - ratio_match and ratio_match.group(1).strip()) else calc_ratio - leeching_match = re.search(r"(Torrents leeching|下载中)[\u4E00-\u9FA5\D\s]+(\d+)[\s\S]+<", html_text) - self.leeching = StringUtils.str_int(leeching_match.group(2)) if leeching_match and leeching_match.group( - 2).strip() else 0 - html = etree.HTML(html_text) - has_ucoin, self.bonus = self._parse_ucoin(html) - if has_ucoin: - return - tmps = html.xpath('//a[contains(@href,"mybonus")]/text()') if html else None - if tmps: - bonus_text = str(tmps[0]).strip() - bonus_match = re.search(r"([\d,.]+)", bonus_text) - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1)) - return - bonus_match = re.search(r"mybonus.[\[\]::<>/a-zA-Z_\-=\"'\s#;.(使用魔力值豆]+\s*([\d,.]+)[<()&\s]", html_text) - try: - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1)) - return - bonus_match = re.search(r"[魔力值|\]][\[\]::<>/a-zA-Z_\-=\"'\s#;]+\s*([\d,.]+|\"[\d,.]+\")[<>()&\s]", - html_text, - flags=re.S) - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1).strip('"')) - except Exception as err: - logger.error(f"{self.site_name} 解析魔力值出错, 错误信息: {str(err)}") - - @staticmethod - def _parse_ucoin(html): - """ - 解析ucoin, 统一转换为铜币 - :param html: - :return: - """ - if html: - gold, silver, copper = None, None, None - - golds = html.xpath('//span[@class = "ucoin-symbol ucoin-gold"]//text()') - if golds: - gold = StringUtils.str_float(str(golds[-1])) - silvers = html.xpath('//span[@class = "ucoin-symbol ucoin-silver"]//text()') - if silvers: - silver = StringUtils.str_float(str(silvers[-1])) - coppers = html.xpath('//span[@class = "ucoin-symbol ucoin-copper"]//text()') - if coppers: - copper = StringUtils.str_float(str(coppers[-1])) - if gold or silver or copper: - gold = gold if gold else 0 - silver = silver if silver else 0 - copper = copper if copper else 0 - return True, gold * 100 * 100 + silver * 100 + copper - return False, 0.0 - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(str(html_text).replace(r'\/', '/')) - if not html: - return None - - # 首页存在扩展链接,使用扩展链接 - seeding_url_text = html.xpath('//a[contains(@href,"torrents.php") ' - 'and contains(@href,"seeding")]/@href') - if multi_page is False and seeding_url_text and seeding_url_text[0].strip(): - self._torrent_seeding_page = seeding_url_text[0].strip() - return self._torrent_seeding_page - - size_col = 3 - seeders_col = 4 - # 搜索size列 - size_col_xpath = '//tr[position()=1]/' \ - 'td[(img[@class="size"] and img[@alt="size"])' \ - ' or (text() = "大小")' \ - ' or (a/img[@class="size" and @alt="size"])]' - if html.xpath(size_col_xpath): - size_col = len(html.xpath(f'{size_col_xpath}/preceding-sibling::td')) + 1 - # 搜索seeders列 - seeders_col_xpath = '//tr[position()=1]/' \ - 'td[(img[@class="seeders"] and img[@alt="seeders"])' \ - ' or (text() = "在做种")' \ - ' or (a/img[@class="seeders" and @alt="seeders"])]' - if html.xpath(seeders_col_xpath): - seeders_col = len(html.xpath(f'{seeders_col_xpath}/preceding-sibling::td')) + 1 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - # 如果 table class="torrents",则增加table[@class="torrents"] - table_class = '//table[@class="torrents"]' if html.xpath('//table[@class="torrents"]') else '' - seeding_sizes = html.xpath(f'{table_class}//tr[position()>1]/td[{size_col}]') - seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]/b/a/text()') - if not seeding_seeders: - seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]//text()') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i]) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href') - if next_page_text: - next_page = next_page_text[-1].strip() - # fix up page url - if self.userid not in next_page: - next_page = f'{next_page}&userid={self.userid}&type=seeding' - - return next_page - - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户额外信息,加入时间,等级 - :param html_text: - :return: - """ - html = etree.HTML(html_text) - if not html: - return - - self._get_user_level(html) - - self._fixup_traffic_info(html) - - # 加入日期 - join_at_text = html.xpath( - '//tr/td[text()="加入日期" or text()="注册日期" or *[text()="加入日期"]]/following-sibling::td[1]//text()' - '|//div/b[text()="加入日期"]/../text()') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip()) - - # 做种体积 & 做种数 - # seeding 页面获取不到的话,此处再获取一次 - seeding_sizes = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//' - 'table[tr[1][td[4 and text()="尺寸"]]]//tr[position()>1]/td[4]') - seeding_seeders = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//' - 'table[tr[1][td[5 and text()="做种者"]]]//tr[position()>1]/td[5]//text()') - tmp_seeding = len(seeding_sizes) - tmp_seeding_size = 0 - tmp_seeding_info = [] - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i]) - - tmp_seeding_size += size - tmp_seeding_info.append([seeders, size]) - - if not self.seeding_size: - self.seeding_size = tmp_seeding_size - if not self.seeding: - self.seeding = tmp_seeding - if not self.seeding_info: - self.seeding_info = tmp_seeding_info - - seeding_sizes = html.xpath('//tr/td[text()="做种统计"]/following-sibling::td[1]//text()') - if seeding_sizes: - seeding_match = re.search(r"总做种数:\s+(\d+)", seeding_sizes[0], re.IGNORECASE) - seeding_size_match = re.search(r"总做种体积:\s+([\d,.\s]+[KMGTPI]*B)", seeding_sizes[0], re.IGNORECASE) - tmp_seeding = StringUtils.str_int(seeding_match.group(1)) if ( - seeding_match and seeding_match.group(1)) else 0 - tmp_seeding_size = StringUtils.num_filesize( - seeding_size_match.group(1).strip()) if seeding_size_match else 0 - if not self.seeding_size: - self.seeding_size = tmp_seeding_size - if not self.seeding: - self.seeding = tmp_seeding - - self._fixup_torrent_seeding_page(html) - - def _fixup_torrent_seeding_page(self, html): - """ - 修正种子页面链接 - :param html: - :return: - """ - # 单独的种子页面 - seeding_url_text = html.xpath('//a[contains(@href,"getusertorrentlist.php") ' - 'and contains(@href,"seeding")]/@href') - if seeding_url_text: - self._torrent_seeding_page = seeding_url_text[0].strip() - # 从JS调用种获取用户ID - seeding_url_text = html.xpath('//a[contains(@href, "javascript: getusertorrentlistajax") ' - 'and contains(@href,"seeding")]/@href') - csrf_text = html.xpath('//meta[@name="x-csrf"]/@content') - if not self._torrent_seeding_page and seeding_url_text: - user_js = re.search(r"javascript: getusertorrentlistajax\(\s*'(\d+)", seeding_url_text[0]) - if user_js and user_js.group(1).strip(): - self.userid = user_js.group(1).strip() - self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding" - elif seeding_url_text and csrf_text: - if csrf_text[0].strip(): - self._torrent_seeding_page \ - = f"ajax_getusertorrentlist.php" - self._torrent_seeding_params = {'userid': self.userid, 'type': 'seeding', 'csrf': csrf_text[0].strip()} - - # 分类做种模式 - # 临时屏蔽 - # seeding_url_text = html.xpath('//tr/td[text()="当前做种"]/following-sibling::td[1]' - # '/table//td/a[contains(@href,"seeding")]/@href') - # if seeding_url_text: - # self._torrent_seeding_page = seeding_url_text - - def _get_user_level(self, html): - # 等级 获取同一行等级数据,图片格式等级,取title信息,否则取文本信息 - user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级" or *[text()="等级"]]/' - 'following-sibling::td[1]/img[1]/@title') - if user_levels_text: - self.user_level = user_levels_text[0].strip() - return - - user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/' - 'following-sibling::td[1 and not(img)]' - '|//tr/td[text()="等級" or text()="等级"]/' - 'following-sibling::td[1 and img[not(@title)]]') - if user_levels_text: - self.user_level = user_levels_text[0].xpath("string(.)").strip() - return - - user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/' - 'following-sibling::td[1]') - if user_levels_text: - self.user_level = user_levels_text[0].xpath("string(.)").strip() - return - - user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()') - if not self.user_level and user_levels_text: - for user_level_text in user_levels_text: - user_level_match = re.search(r"\[(.*)]", user_level_text) - if user_level_match and user_level_match.group(1).strip(): - self.user_level = user_level_match.group(1).strip() - break - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - html = etree.HTML(html_text) - if not html: - return None - - message_links = html.xpath('//tr[not(./td/img[@alt="Read"])]/td/a[contains(@href, "viewmessage")]/@href') - msg_links.extend(message_links) - # 是否存在下页数据 - next_page = None - next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href') - if next_page_text: - next_page = next_page_text[-1].strip() - - return next_page - - def _parse_message_content(self, html_text): - html = etree.HTML(html_text) - if not html: - return None, None, None - # 标题 - message_head_text = None - message_head = html.xpath('//h1/text()' - '|//div[@class="layui-card-header"]/span[1]/text()') - if message_head: - message_head_text = message_head[-1].strip() - - # 消息时间 - message_date_text = None - message_date = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[2]/td[2]' - '|//div[@class="layui-card-header"]/span[2]/span[2]') - if message_date: - message_date_text = message_date[0].xpath("string(.)").strip() - - # 消息内容 - message_content_text = None - message_content = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[3]/td' - '|//div[contains(@class,"layui-card-body")]') - if message_content: - message_content_text = message_content[0].xpath("string(.)").strip() - - return message_head_text, message_date_text, message_content_text - - def _fixup_traffic_info(self, html): - # fixup bonus - if not self.bonus: - bonus_text = html.xpath('//tr/td[text()="魔力值" or text()="猫粮"]/following-sibling::td[1]/text()') - if bonus_text: - self.bonus = StringUtils.str_float(bonus_text[0].strip()) diff --git a/app/plugins/sitestatistic/siteuserinfo/nexus_project.py b/app/plugins/sitestatistic/siteuserinfo/nexus_project.py deleted file mode 100644 index d64c59d3..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/nexus_project.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -import re - -from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema -from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo - - -class NexusProjectSiteUserInfo(NexusPhpSiteUserInfo): - schema = SiteSchema.NexusProject - order = SITE_BASE_ORDER + 25 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'Nexus Project' in html_text - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = user_detail.group(1) - - self._torrent_seeding_page = f"viewusertorrents.php?id={self.userid}&show=seeding" diff --git a/app/plugins/sitestatistic/siteuserinfo/nexus_rabbit.py b/app/plugins/sitestatistic/siteuserinfo/nexus_rabbit.py deleted file mode 100644 index 08c4c52d..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/nexus_rabbit.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -import json -from typing import Optional - -from lxml import etree - -from app.log import logger -from app.plugins.sitestatistic.siteuserinfo import SITE_BASE_ORDER, SiteSchema -from app.plugins.sitestatistic.siteuserinfo.nexus_php import NexusPhpSiteUserInfo - - -class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo): - schema = SiteSchema.NexusRabbit - order = SITE_BASE_ORDER + 5 - - @classmethod - def match(cls, html_text: str) -> bool: - html = etree.HTML(html_text) - if not html: - return False - - printable_text = html.xpath("string(.)") if html else "" - return 'Style by Rabbit' in printable_text - - def _parse_site_page(self, html_text: str): - super()._parse_site_page(html_text) - self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}" - self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"} - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - - try: - torrents = json.loads(html_text).get('data') - except Exception as e: - logger.error(f"解析做种信息失败: {str(e)}") - return - - page_seeding_size = 0 - page_seeding_info = [] - - page_seeding = len(torrents) - for torrent in torrents: - seeders = int(torrent.get('seeders', 0)) - size = int(torrent.get('size', 0)) - page_seeding_size += int(torrent.get('size', 0)) - - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) diff --git a/app/plugins/sitestatistic/siteuserinfo/small_horse.py b/app/plugins/sitestatistic/siteuserinfo/small_horse.py deleted file mode 100644 index d704a288..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/small_horse.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class SmallHorseSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.SmallHorse - order = SITE_BASE_ORDER + 30 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'Small Horse' in html_text - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - user_detail = re.search(r"user.php\?id=(\d+)", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = user_detail.group(1) - self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}" - self._user_traffic_page = f"user.php?id={self.userid}" - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - ret = html.xpath('//a[contains(@href, "user.php")]//text()') - if ret: - self.username = str(ret[0]) - - def _parse_user_traffic_info(self, html_text: str): - """ - 上传/下载/分享率 [做种数/魔力值] - :param html_text: - :return: - """ - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - tmps = html.xpath('//ul[@class = "stats nobullet"]') - if tmps: - if tmps[1].xpath("li") and tmps[1].xpath("li")[0].xpath("span//text()"): - self.join_at = StringUtils.unify_datetime_str(tmps[1].xpath("li")[0].xpath("span//text()")[0]) - self.upload = StringUtils.num_filesize(str(tmps[1].xpath("li")[2].xpath("text()")[0]).split(":")[1].strip()) - self.download = StringUtils.num_filesize( - str(tmps[1].xpath("li")[3].xpath("text()")[0]).split(":")[1].strip()) - if tmps[1].xpath("li")[4].xpath("span//text()"): - self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[4].xpath("span//text()")[0]).replace('∞', '0')) - else: - self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1]) - self.bonus = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1]) - self.user_level = str(tmps[3].xpath("li")[0].xpath("text()")[0]).split(":")[1].strip() - self.leeching = StringUtils.str_int( - (tmps[4].xpath("li")[6].xpath("text()")[0]).split(":")[1].replace("[", "")) - - def _parse_user_detail_info(self, html_text: str): - pass - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 6 - seeders_col = 8 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{size_col}]') - seeding_seeders = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{seeders_col}]') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li') - if next_pages and len(next_pages) > 1: - page_num = next_pages[0].xpath("string(.)").strip() - if page_num.isdigit(): - next_page = f"{self._torrent_seeding_page}&page={page_num}" - - return next_page - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/tnode.py b/app/plugins/sitestatistic/siteuserinfo/tnode.py deleted file mode 100644 index 8f7ce7f3..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/tnode.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -import json -import re -from typing import Optional - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class TNodeSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.TNode - order = SITE_BASE_ORDER + 60 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'Powered By TNode' in html_text - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - # - csrf_token = re.search(r'', html_text) - if csrf_token: - self._addition_headers = {'X-CSRF-TOKEN': csrf_token.group(1)} - self._user_detail_page = "api/user/getMainInfo" - self._torrent_seeding_page = "api/user/listTorrentActivity?id=&type=seeding&page=1&size=20000" - - def _parse_logged_in(self, html_text): - """ - 判断是否登录成功, 通过判断是否存在用户信息 - 暂时跳过检测,待后续优化 - :param html_text: - :return: - """ - return True - - def _parse_user_base_info(self, html_text: str): - self.username = self.userid - - def _parse_user_traffic_info(self, html_text: str): - pass - - def _parse_user_detail_info(self, html_text: str): - detail = json.loads(html_text) - if detail.get("status") != 200: - return - - user_info = detail.get("data", {}) - self.userid = user_info.get("id") - self.username = user_info.get("username") - self.user_level = user_info.get("class", {}).get("name") - self.join_at = user_info.get("regTime", 0) - self.join_at = StringUtils.unify_datetime_str(str(self.join_at)) - - self.upload = user_info.get("upload") - self.download = user_info.get("download") - self.ratio = 0 if self.download <= 0 else round(self.upload / self.download, 3) - self.bonus = user_info.get("bonus") - - self.message_unread = user_info.get("unreadAdmin", 0) + user_info.get("unreadInbox", 0) + user_info.get( - "unreadSystem", 0) - pass - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 解析用户做种信息 - """ - seeding_info = json.loads(html_text) - if seeding_info.get("status") != 200: - return - - torrents = seeding_info.get("data", {}).get("torrents", []) - - page_seeding_size = 0 - page_seeding_info = [] - for torrent in torrents: - size = torrent.get("size", 0) - seeders = torrent.get("seeding", 0) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += len(torrents) - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - - return next_page - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - """ - 系统信息 api/message/listSystem?page=1&size=20 - 收件箱信息 api/message/listInbox?page=1&size=20 - 管理员信息 api/message/listAdmin?page=1&size=20 - :param html_text: - :return: - """ - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/torrent_leech.py b/app/plugins/sitestatistic/siteuserinfo/torrent_leech.py deleted file mode 100644 index 96f973ae..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/torrent_leech.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class TorrentLeechSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.TorrentLeech - order = SITE_BASE_ORDER + 40 - - @classmethod - def match(cls, html_text: str) -> bool: - return 'TorrentLeech' in html_text - - def _parse_site_page(self, html_text: str): - html_text = self._prepare_html_text(html_text) - - user_detail = re.search(r"/profile/([^/]+)/", html_text) - if user_detail and user_detail.group().strip(): - self._user_detail_page = user_detail.group().strip().lstrip('/') - self.userid = user_detail.group(1) - self._user_traffic_page = f"profile/{self.userid}/view" - self._torrent_seeding_page = f"profile/{self.userid}/seeding" - - def _parse_user_base_info(self, html_text: str): - self.username = self.userid - - def _parse_user_traffic_info(self, html_text: str): - """ - 上传/下载/分享率 [做种数/魔力值] - :param html_text: - :return: - """ - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - upload_html = html.xpath('//div[contains(@class,"profile-uploaded")]//span/text()') - if upload_html: - self.upload = StringUtils.num_filesize(upload_html[0]) - download_html = html.xpath('//div[contains(@class,"profile-downloaded")]//span/text()') - if download_html: - self.download = StringUtils.num_filesize(download_html[0]) - ratio_html = html.xpath('//div[contains(@class,"profile-ratio")]//span/text()') - if ratio_html: - self.ratio = StringUtils.str_float(ratio_html[0].replace('∞', '0')) - - user_level_html = html.xpath('//table[contains(@class, "profileViewTable")]' - '//tr/td[text()="Class"]/following-sibling::td/text()') - if user_level_html: - self.user_level = user_level_html[0].strip() - - join_at_html = html.xpath('//table[contains(@class, "profileViewTable")]' - '//tr/td[text()="Registration date"]/following-sibling::td/text()') - if join_at_html: - self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip()) - - bonus_html = html.xpath('//span[contains(@class, "total-TL-points")]/text()') - if bonus_html: - self.bonus = StringUtils.str_float(bonus_html[0].strip()) - - def _parse_user_detail_info(self, html_text: str): - pass - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 2 - seeders_col = 7 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//tbody/tr/td[{size_col}]') - seeding_seeders = html.xpath(f'//tbody/tr/td[{seeders_col}]/text()') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i]) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - - return next_page - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/sitestatistic/siteuserinfo/unit3d.py b/app/plugins/sitestatistic/siteuserinfo/unit3d.py deleted file mode 100644 index a40483e4..00000000 --- a/app/plugins/sitestatistic/siteuserinfo/unit3d.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -import re -from typing import Optional - -from lxml import etree - -from app.plugins.sitestatistic.siteuserinfo import ISiteUserInfo, SITE_BASE_ORDER, SiteSchema -from app.utils.string import StringUtils - - -class Unit3dSiteUserInfo(ISiteUserInfo): - schema = SiteSchema.Unit3d - order = SITE_BASE_ORDER + 15 - - @classmethod - def match(cls, html_text: str) -> bool: - return "unit3d.js" in html_text - - def _parse_user_base_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - html = etree.HTML(html_text) - - tmps = html.xpath('//a[contains(@href, "/users/") and contains(@href, "settings")]/@href') - if tmps: - user_name_match = re.search(r"/users/(.+)/settings", tmps[0]) - if user_name_match and user_name_match.group().strip(): - self.username = user_name_match.group(1) - self._torrent_seeding_page = f"/users/{self.username}/active?perPage=100&client=&seeding=include" - self._user_detail_page = f"/users/{self.username}" - - tmps = html.xpath('//a[contains(@href, "bonus/earnings")]') - if tmps: - bonus_text = tmps[0].xpath("string(.)") - bonus_match = re.search(r"([\d,.]+)", bonus_text) - if bonus_match and bonus_match.group(1).strip(): - self.bonus = StringUtils.str_float(bonus_match.group(1)) - - def _parse_site_page(self, html_text: str): - # TODO - pass - - def _parse_user_detail_info(self, html_text: str): - """ - 解析用户额外信息,加入时间,等级 - :param html_text: - :return: - """ - html = etree.HTML(html_text) - if not html: - return None - - # 用户等级 - user_levels_text = html.xpath('//div[contains(@class, "content")]//span[contains(@class, "badge-user")]/text()') - if user_levels_text: - self.user_level = user_levels_text[0].strip() - - # 加入日期 - join_at_text = html.xpath('//div[contains(@class, "content")]//h4[contains(text(), "注册日期") ' - 'or contains(text(), "註冊日期") ' - 'or contains(text(), "Registration date")]/text()') - if join_at_text: - self.join_at = StringUtils.unify_datetime_str( - join_at_text[0].replace('注册日期', '').replace('註冊日期', '').replace('Registration date', '')) - - def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]: - """ - 做种相关信息 - :param html_text: - :param multi_page: 是否多页数据 - :return: 下页地址 - """ - html = etree.HTML(html_text) - if not html: - return None - - size_col = 9 - seeders_col = 2 - # 搜索size列 - if html.xpath('//thead//th[contains(@class,"size")]'): - size_col = len(html.xpath('//thead//th[contains(@class,"size")][1]/preceding-sibling::th')) + 1 - # 搜索seeders列 - if html.xpath('//thead//th[contains(@class,"seeders")]'): - seeders_col = len(html.xpath('//thead//th[contains(@class,"seeders")]/preceding-sibling::th')) + 1 - - page_seeding = 0 - page_seeding_size = 0 - page_seeding_info = [] - seeding_sizes = html.xpath(f'//tr[position()]/td[{size_col}]') - seeding_seeders = html.xpath(f'//tr[position()]/td[{seeders_col}]') - if seeding_sizes and seeding_seeders: - page_seeding = len(seeding_sizes) - - for i in range(0, len(seeding_sizes)): - size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip()) - seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip()) - - page_seeding_size += size - page_seeding_info.append([seeders, size]) - - self.seeding += page_seeding - self.seeding_size += page_seeding_size - self.seeding_info.extend(page_seeding_info) - - # 是否存在下页数据 - next_page = None - next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li') - if next_pages and len(next_pages) > 1: - page_num = next_pages[0].xpath("string(.)").strip() - if page_num.isdigit(): - next_page = f"{self._torrent_seeding_page}&page={page_num}" - - return next_page - - def _parse_user_traffic_info(self, html_text: str): - html_text = self._prepare_html_text(html_text) - upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, - re.IGNORECASE) - self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0 - download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text, - re.IGNORECASE) - self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0 - ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text) - self.ratio = StringUtils.str_float(ratio_match.group(1)) if ( - ratio_match and ratio_match.group(1).strip()) else 0.0 - - def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]: - return None - - def _parse_message_content(self, html_text): - return None, None, None diff --git a/app/plugins/speedlimiter/__init__.py b/app/plugins/speedlimiter/__init__.py deleted file mode 100644 index 344862bf..00000000 --- a/app/plugins/speedlimiter/__init__.py +++ /dev/null @@ -1,629 +0,0 @@ -import ipaddress -from typing import List, Tuple, Dict, Any - -from apscheduler.schedulers.background import BackgroundScheduler - -from app.core.config import settings -from app.core.event import eventmanager, Event -from app.log import logger -from app.modules.emby import Emby -from app.modules.jellyfin import Jellyfin -from app.modules.plex import Plex -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.schemas import NotificationType, WebhookEventInfo -from app.schemas.types import EventType -from app.utils.ip import IpUtils - - -class SpeedLimiter(_PluginBase): - # 插件名称 - plugin_name = "播放限速" - # 插件描述 - plugin_desc = "外网播放媒体库视频时,自动对下载器进行限速。" - # 插件图标 - plugin_icon = "SpeedLimiter.jpg" - # 主题色 - plugin_color = "#183883" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "Shurelol" - # 作者主页 - author_url = "https://github.com/Shurelol" - # 插件配置项ID前缀 - plugin_config_prefix = "speedlimit_" - # 加载顺序 - plugin_order = 11 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _scheduler = None - _qb = None - _tr = None - _enabled: bool = False - _notify: bool = False - _interval: int = 60 - _downloader: list = [] - _play_up_speed: float = 0 - _play_down_speed: float = 0 - _noplay_up_speed: float = 0 - _noplay_down_speed: float = 0 - _bandwidth: float = 0 - _allocation_ratio: str = "" - _auto_limit: bool = False - _limit_enabled: bool = False - # 不限速地址 - _unlimited_ips = {} - # 当前限速状态 - _current_state = "" - - def init_plugin(self, config: dict = None): - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._notify = config.get("notify") - self._play_up_speed = float(config.get("play_up_speed")) if config.get("play_up_speed") else 0 - self._play_down_speed = float(config.get("play_down_speed")) if config.get("play_down_speed") else 0 - self._noplay_up_speed = float(config.get("noplay_up_speed")) if config.get("noplay_up_speed") else 0 - self._noplay_down_speed = float(config.get("noplay_down_speed")) if config.get("noplay_down_speed") else 0 - self._current_state = f"U:{self._noplay_up_speed},D:{self._noplay_down_speed}" - try: - # 总带宽 - self._bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000 - # 自动限速开关 - if self._bandwidth > 0: - self._auto_limit = True - else: - self._auto_limit = False - except Exception as e: - logger.error(f"智能限速上行带宽设置错误:{str(e)}") - self._bandwidth = 0 - - # 限速服务开关 - self._limit_enabled = True if (self._play_up_speed - or self._play_down_speed - or self._auto_limit) else False - self._allocation_ratio = config.get("allocation_ratio") or "" - # 不限速地址 - self._unlimited_ips["ipv4"] = config.get("ipv4") or "" - self._unlimited_ips["ipv6"] = config.get("ipv6") or "" - - self._downloader = config.get("downloader") or [] - if self._downloader: - if 'qbittorrent' in self._downloader: - self._qb = Qbittorrent() - if 'transmission' in self._downloader: - self._tr = Transmission() - - # 移出现有任务 - self.stop_service() - - # 启动限速任务 - if self._enabled and self._limit_enabled: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - self._scheduler.add_job(func=self.check_playing_sessions, - trigger='interval', - seconds=self._interval, - name="播放限速检查") - self._scheduler.print_jobs() - self._scheduler.start() - logger.info("播放限速检查服务启动") - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'downloader', - 'label': '下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'}, - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'play_up_speed', - 'label': '播放限速(上传)', - 'placeholder': 'KB/s' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'play_down_speed', - 'label': '播放限速(下载)', - 'placeholder': 'KB/s' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'noplay_up_speed', - 'label': '未播放限速(上传)', - 'placeholder': 'KB/s' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'noplay_down_speed', - 'label': '未播放限速(下载)', - 'placeholder': 'KB/s' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'bandwidth', - 'label': '智能限速上行带宽', - 'placeholder': 'Mbps' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'allocation_ratio', - 'label': '智能限速分配比例', - 'items': [ - {'title': '平均', 'value': ''}, - {'title': '1:9', 'value': '1:9'}, - {'title': '2:8', 'value': '2:8'}, - {'title': '3:7', 'value': '3:7'}, - {'title': '4:6', 'value': '4:6'}, - {'title': '6:4', 'value': '6:4'}, - {'title': '7:3', 'value': '7:3'}, - {'title': '8:2', 'value': '8:2'}, - {'title': '9:1', 'value': '9:1'}, - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'ipv4', - 'label': '不限速地址范围(ipv4)', - 'placeholder': '留空默认不限速内网ipv4' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'ipv6', - 'label': '不限速地址范围(ipv6)', - 'placeholder': '留空默认不限速内网ipv6' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": True, - "downloader": [], - "play_up_speed": None, - "play_down_speed": None, - "noplay_up_speed": None, - "noplay_down_speed": None, - "bandwidth": None, - "allocation_ratio": "", - "ipv4": "", - "ipv6": "" - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType.WebhookMessage) - def check_playing_sessions(self, event: Event = None): - """ - 检查播放会话 - """ - if not self._qb and not self._tr: - return - if not self._enabled: - return - if event: - event_data: WebhookEventInfo = event.event_data - if event_data.event not in [ - "playback.start", - "PlaybackStart", - "media.play", - "media.stop", - "PlaybackStop", - "playback.stop" - ]: - return - # 当前播放的总比特率 - total_bit_rate = 0 - # 媒体服务器类型,多个以,分隔 - if not settings.MEDIASERVER: - return - media_servers = settings.MEDIASERVER.split(',') - # 查询所有媒体服务器状态 - for media_server in media_servers: - # 查询播放中会话 - playing_sessions = [] - if media_server == "emby": - req_url = "[HOST]emby/Sessions?api_key=[APIKEY]" - try: - res = Emby().get_data(req_url) - if res and res.status_code == 200: - sessions = res.json() - for session in sessions: - if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"): - playing_sessions.append(session) - except Exception as e: - logger.error(f"获取Emby播放会话失败:{str(e)}") - continue - # 计算有效比特率 - for session in playing_sessions: - # 设置了不限速范围则判断session ip是否在不限速范围内 - if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]: - if not self.__allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \ - and session.get("NowPlayingItem", {}).get("MediaType") == "Video": - total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0) - # 未设置不限速范围,则默认不限速内网ip - elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \ - and session.get("NowPlayingItem", {}).get("MediaType") == "Video": - total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0) - elif media_server == "jellyfin": - req_url = "[HOST]Sessions?api_key=[APIKEY]" - try: - res = Jellyfin().get_data(req_url) - if res and res.status_code == 200: - sessions = res.json() - for session in sessions: - if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"): - playing_sessions.append(session) - except Exception as e: - logger.error(f"获取Jellyfin播放会话失败:{str(e)}") - continue - # 计算有效比特率 - for session in playing_sessions: - # 设置了不限速范围则判断session ip是否在不限速范围内 - if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]: - if not self.__allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \ - and session.get("NowPlayingItem", {}).get("MediaType") == "Video": - media_streams = session.get("NowPlayingItem", {}).get("MediaStreams") or [] - for media_stream in media_streams: - total_bit_rate += int(media_stream.get("BitRate") or 0) - # 未设置不限速范围,则默认不限速内网ip - elif not IpUtils.is_private_ip(session.get("RemoteEndPoint")) \ - and session.get("NowPlayingItem", {}).get("MediaType") == "Video": - media_streams = session.get("NowPlayingItem", {}).get("MediaStreams") or [] - for media_stream in media_streams: - total_bit_rate += int(media_stream.get("BitRate") or 0) - elif media_server == "plex": - _plex = Plex().get_plex() - if _plex: - sessions = _plex.sessions() - for session in sessions: - bitrate = sum([m.bitrate or 0 for m in session.media]) - playing_sessions.append({ - "type": session.TAG, - "bitrate": bitrate, - "address": session.player.address - }) - # 计算有效比特率 - for session in playing_sessions: - # 设置了不限速范围则判断session ip是否在不限速范围内 - if self._unlimited_ips["ipv4"] or self._unlimited_ips["ipv6"]: - if not self.__allow_access(self._unlimited_ips, session.get("address")) \ - and session.get("type") == "Video": - total_bit_rate += int(session.get("bitrate") or 0) - # 未设置不限速范围,则默认不限速内网ip - elif not IpUtils.is_private_ip(session.get("address")) \ - and session.get("type") == "Video": - total_bit_rate += int(session.get("bitrate") or 0) - - if total_bit_rate: - # 开启智能限速计算上传限速 - if self._auto_limit: - play_up_speed = self.__calc_limit(total_bit_rate) - else: - play_up_speed = self._play_up_speed - - # 当前正在播放,开始限速 - self.__set_limiter(limit_type="播放", upload_limit=play_up_speed, - download_limit=self._play_down_speed) - else: - # 当前没有播放,取消限速 - self.__set_limiter(limit_type="未播放", upload_limit=self._noplay_up_speed, - download_limit=self._noplay_down_speed) - - def __calc_limit(self, total_bit_rate: float) -> float: - """ - 计算智能上传限速 - """ - if not self._bandwidth: - return 10 - return round((self._bandwidth - total_bit_rate) / 8 / 1024, 2) - - def __set_limiter(self, limit_type: str, upload_limit: float, download_limit: float): - """ - 设置限速 - """ - if not self._qb and not self._tr: - return - state = f"U:{upload_limit},D:{download_limit}" - if self._current_state == state: - # 限速状态没有改变 - return - else: - self._current_state = state - - try: - cnt = 0 - for download in self._downloader: - if self._auto_limit and limit_type == "播放": - # 开启了播放智能限速 - if len(self._downloader) == 1: - # 只有一个下载器 - upload_limit = int(upload_limit) - else: - # 多个下载器 - if not self._allocation_ratio: - # 平均 - upload_limit = int(upload_limit / len(self._downloader)) - else: - # 按比例 - allocation_count = sum([int(i) for i in self._allocation_ratio.split(":")]) - upload_limit = int(upload_limit * int(self._allocation_ratio.split(":")[cnt]) / allocation_count) - cnt += 1 - if upload_limit: - text = f"上传:{upload_limit} KB/s" - else: - text = f"上传:未限速" - if download_limit: - text = f"{text}\n下载:{download_limit} KB/s" - else: - text = f"{text}\n下载:未限速" - if str(download) == 'qbittorrent': - if self._qb: - self._qb.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit) - # 发送通知 - if self._notify: - title = "【播放限速】" - if upload_limit or download_limit: - subtitle = f"Qbittorrent 开始{limit_type}限速" - self.post_message( - mtype=NotificationType.MediaServer, - title=title, - text=f"{subtitle}\n{text}" - ) - else: - self.post_message( - mtype=NotificationType.MediaServer, - title=title, - text=f"Qbittorrent 已取消限速" - ) - else: - if self._tr: - self._tr.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit) - # 发送通知 - if self._notify: - title = "【播放限速】" - if upload_limit or download_limit: - subtitle = f"Transmission 开始{limit_type}限速" - self.post_message( - mtype=NotificationType.MediaServer, - title=title, - text=f"{subtitle}\n{text}" - ) - else: - self.post_message( - mtype=NotificationType.MediaServer, - title=title, - text=f"Transmission 已取消限速" - ) - except Exception as e: - logger.error(f"设置限速失败:{str(e)}") - - @staticmethod - def __allow_access(allow_ips: dict, ip: str) -> bool: - """ - 判断IP是否合法 - :param allow_ips: 充许的IP范围 {"ipv4":, "ipv6":} - :param ip: 需要检查的ip - """ - if not allow_ips: - return True - try: - ipaddr = ipaddress.ip_address(ip) - if ipaddr.version == 4: - if not allow_ips.get('ipv4'): - return True - allow_ipv4s = allow_ips.get('ipv4').split(",") - for allow_ipv4 in allow_ipv4s: - if ipaddr in ipaddress.ip_network(allow_ipv4, strict=False): - return True - elif ipaddr.ipv4_mapped: - if not allow_ips.get('ipv4'): - return True - allow_ipv4s = allow_ips.get('ipv4').split(",") - for allow_ipv4 in allow_ipv4s: - if ipaddr.ipv4_mapped in ipaddress.ip_network(allow_ipv4, strict=False): - return True - else: - if not allow_ips.get('ipv6'): - return True - allow_ipv6s = allow_ips.get('ipv6').split(",") - for allow_ipv6 in allow_ipv6s: - if ipaddr in ipaddress.ip_network(allow_ipv6, strict=False): - return True - except Exception as err: - print(str(err)) - return False - return False - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - print(str(e)) diff --git a/app/plugins/syncdownloadfiles/__init__.py b/app/plugins/syncdownloadfiles/__init__.py deleted file mode 100644 index 4b3977d0..00000000 --- a/app/plugins/syncdownloadfiles/__init__.py +++ /dev/null @@ -1,581 +0,0 @@ -import time -from datetime import datetime -from pathlib import Path -from typing import Any, List, Dict, Tuple, Optional - -from apscheduler.schedulers.background import BackgroundScheduler - -from app.core.config import settings -from app.db.downloadhistory_oper import DownloadHistoryOper -from app.db.transferhistory_oper import TransferHistoryOper -from app.log import logger -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase - - -class SyncDownloadFiles(_PluginBase): - # 插件名称 - plugin_name = "下载器文件同步" - # 插件描述 - plugin_desc = "同步下载器的文件信息到数据库,删除文件时联动删除下载任务。" - # 插件图标 - plugin_icon = "sync_file.png" - # 主题色 - plugin_color = "#4686E3" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "thsrite" - # 作者主页 - author_url = "https://github.com/thsrite" - # 插件配置项ID前缀 - plugin_config_prefix = "syncdownloadfiles_" - # 加载顺序 - plugin_order = 20 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _enabled = False - # 任务执行间隔 - _time = None - qb = None - tr = None - _onlyonce = False - _history = False - _clear = False - _downloaders = [] - _dirs = None - downloadhis = None - transferhis = None - - # 定时器 - _scheduler: Optional[BackgroundScheduler] = None - - def init_plugin(self, config: dict = None): - # 停止现有任务 - self.stop_service() - - self.qb = Qbittorrent() - self.tr = Transmission() - self.downloadhis = DownloadHistoryOper() - self.transferhis = TransferHistoryOper() - - if config: - self._enabled = config.get('enabled') - self._time = config.get('time') or 6 - self._history = config.get('history') - self._clear = config.get('clear') - self._onlyonce = config.get("onlyonce") - self._downloaders = config.get('downloaders') or [] - self._dirs = config.get("dirs") or "" - - if self._clear: - # 清理下载器文件记录 - self.downloadhis.truncate_files() - # 清理下载器最后处理记录 - for downloader in self._downloaders: - # 获取最后同步时间 - self.del_data(f"last_sync_time_{downloader}") - # 关闭clear - self._clear = False - self.__update_config() - - if self._onlyonce: - # 执行一次 - # 关闭onlyonce - self._onlyonce = False - self.__update_config() - - self.sync() - - if self._enabled: - # 定时服务 - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._time: - try: - self._scheduler.add_job(func=self.sync, - trigger="interval", - hours=float(str(self._time).strip()), - name="自动同步下载器文件记录") - logger.info(f"自动同步下载器文件记录服务启动,时间间隔 {self._time} 小时") - except Exception as err: - logger.error(f"定时任务配置错误:{str(err)}") - - # 启动任务 - if self._scheduler.get_jobs(): - self._scheduler.print_jobs() - self._scheduler.start() - else: - self._enabled = False - self.__update_config() - - def sync(self): - """ - 同步所选下载器种子记录 - """ - start_time = datetime.now() - logger.info("开始同步下载器任务文件记录") - - if not self._downloaders: - logger.error("未选择同步下载器,停止运行") - return - - # 遍历下载器同步记录 - for downloader in self._downloaders: - # 获取最后同步时间 - last_sync_time = self.get_data(f"last_sync_time_{downloader}") - - logger.info(f"开始扫描下载器 {downloader} ...") - downloader_obj = self.__get_downloader(downloader) - # 获取下载器中已完成的种子 - torrents = downloader_obj.get_completed_torrents() - if torrents: - logger.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}") - else: - logger.info(f"下载器 {downloader} 没有已完成种子") - continue - - # 把种子按照名称和种子大小分组,获取添加时间最早的一个,认定为是源种子,其余为辅种 - torrents = self.__get_origin_torrents(torrents, downloader) - logger.info(f"下载器 {downloader} 去除辅种,获取到源种子数:{len(torrents)}") - - for torrent in torrents: - # 返回false,标识后续种子已被同步 - sync_flag = self.__compare_time(torrent, downloader, last_sync_time) - - if not sync_flag: - logger.info(f"最后同步时间{last_sync_time}, 之前种子已被同步,结束当前下载器 {downloader} 任务") - break - - # 获取种子hash - hash_str = self.__get_hash(torrent, downloader) - - # 判断是否是mp下载,判断download_hash是否在downloadhistory表中,是则不处理 - downloadhis = self.downloadhis.get_by_hash(hash_str) - if downloadhis: - downlod_files = self.downloadhis.get_files_by_hash(hash_str) - if downlod_files: - logger.info(f"种子 {hash_str} 通过MoviePilot下载,跳过处理") - continue - - # 获取种子download_dir - download_dir = self.__get_download_dir(torrent, downloader) - - # 处理路径映射 - if self._dirs: - paths = self._dirs.split("\n") - for path in paths: - sub_paths = path.split(":") - download_dir = download_dir.replace(sub_paths[0], sub_paths[1]).replace('\\', '/') - - # 获取种子name - torrent_name = self.__get_torrent_name(torrent, downloader) - # 种子保存目录 - save_path = Path(download_dir).joinpath(torrent_name) - # 获取种子文件 - torrent_files = self.__get_torrent_files(torrent, downloader, downloader_obj) - logger.info(f"开始同步种子 {hash_str}, 文件数 {len(torrent_files)}") - - download_files = [] - for file in torrent_files: - # 过滤掉没下载的文件 - if not self.__is_download(file, downloader): - continue - # 种子文件路径 - file_path_str = self.__get_file_path(file, downloader) - file_path = Path(file_path_str) - # 只处理视频格式 - if not file_path.suffix \ - or file_path.suffix not in settings.RMT_MEDIAEXT: - continue - # 种子文件根路程 - root_path = file_path.parts[0] - # 不含种子名称的种子文件相对路径 - if root_path == torrent_name: - rel_path = str(file_path.relative_to(root_path)) - else: - rel_path = str(file_path) - # 完整路径 - full_path = save_path.joinpath(rel_path) - if self._history: - transferhis = self.transferhis.get_by_src(str(full_path)) - if transferhis and not transferhis.download_hash: - logger.info(f"开始补充转移记录:{transferhis.id} download_hash {hash_str}") - self.transferhis.update_download_hash(historyid=transferhis.id, - download_hash=hash_str) - - # 种子文件记录 - download_files.append( - { - "download_hash": hash_str, - "downloader": downloader, - "fullpath": str(full_path), - "savepath": str(save_path), - "filepath": rel_path, - "torrentname": torrent_name, - } - ) - - if download_files: - # 登记下载文件 - self.downloadhis.add_files(download_files) - logger.info(f"种子 {hash_str} 同步完成") - - logger.info(f"下载器种子文件同步完成!") - self.save_data(f"last_sync_time_{downloader}", - time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) - - # 计算耗时 - end_time = datetime.now() - - logger.info(f"下载器任务文件记录已同步完成。总耗时 {(end_time - start_time).seconds} 秒") - - def __update_config(self): - self.update_config({ - "enabled": self._enabled, - "time": self._time, - "history": self._history, - "clear": self._clear, - "onlyonce": self._onlyonce, - "downloaders": self._downloaders, - "dirs": self._dirs - }) - - @staticmethod - def __get_origin_torrents(torrents: Any, dl_tpe: str): - # 把种子按照名称和种子大小分组,获取添加时间最早的一个,认定为是源种子,其余为辅种 - grouped_data = {} - - # 排序种子,根据种子添加时间倒序 - if dl_tpe == "qbittorrent": - torrents = sorted(torrents, key=lambda x: x.get("added_on"), reverse=True) - # 遍历原始数组,按照size和name进行分组 - for torrent in torrents: - size = torrent.get('size') - name = torrent.get('name') - key = (size, name) # 使用元组作为字典的键 - - # 如果分组键不存在,则将当前元素作为最小元素添加到字典中 - if key not in grouped_data: - grouped_data[key] = torrent - else: - # 如果分组键已存在,则比较当前元素的time是否更小,如果更小则更新字典中的元素 - if torrent.get('added_on') < grouped_data[key].get('added_on'): - grouped_data[key] = torrent - else: - torrents = sorted(torrents, key=lambda x: x.added_date, reverse=True) - # 遍历原始数组,按照size和name进行分组 - for torrent in torrents: - size = torrent.total_size - name = torrent.name - key = (size, name) # 使用元组作为字典的键 - - # 如果分组键不存在,则将当前元素作为最小元素添加到字典中 - if key not in grouped_data: - grouped_data[key] = torrent - else: - # 如果分组键已存在,则比较当前元素的time是否更小,如果更小则更新字典中的元素 - if torrent.added_date < grouped_data[key].added_date: - grouped_data[key] = torrent - - # 新的数组 - return list(grouped_data.values()) - - @staticmethod - def __compare_time(torrent: Any, dl_tpe: str, last_sync_time: str = None): - if last_sync_time: - # 获取种子时间 - if dl_tpe == "qbittorrent": - torrent_date = time.gmtime(torrent.get("added_on")) # 将时间戳转换为时间元组 - torrent_date = time.strftime("%Y-%m-%d %H:%M:%S", torrent_date) # 格式化时间 - else: - torrent_date = torrent.added_date - - # 之后的种子已经同步了 - if last_sync_time > str(torrent_date): - return False - - return True - - @staticmethod - def __is_download(file: Any, dl_type: str): - """ - 判断文件是否被下载 - """ - try: - if dl_type == "qbittorrent": - return True - else: - return file.completed and file.completed > 0 - except Exception as e: - print(str(e)) - return True - - @staticmethod - def __get_file_path(file: Any, dl_type: str): - """ - 获取文件路径 - """ - try: - return file.get("name") if dl_type == "qbittorrent" else file.name - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_torrent_files(torrent: Any, dl_type: str, downloader_obj): - """ - 获取种子文件 - """ - try: - return torrent.files if dl_type == "qbittorrent" else downloader_obj.get_files(tid=torrent.id) - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_torrent_name(torrent: Any, dl_type: str): - """ - 获取种子name - """ - try: - return torrent.get("name") if dl_type == "qbittorrent" else torrent.name - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_download_dir(torrent: Any, dl_type: str): - """ - 获取种子download_dir - """ - try: - return torrent.get("save_path") if dl_type == "qbittorrent" else torrent.download_dir - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_hash(torrent: Any, dl_type: str): - """ - 获取种子hash - """ - try: - return torrent.get("hash") if dl_type == "qbittorrent" else torrent.hashString - except Exception as e: - print(str(e)) - return "" - - def __get_downloader(self, dtype: str): - """ - 根据类型返回下载器实例 - """ - if dtype == "qbittorrent": - return self.qb - elif dtype == "transmission": - return self.tr - else: - return None - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '开启插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'history', - 'label': '补充整理历史记录', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'clear', - 'label': '清理数据', - } - } - ] - }, - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'time', - 'label': '同步时间间隔' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'downloaders', - 'label': '同步下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'dirs', - 'label': '目录映射', - 'rows': 5, - 'placeholder': '每一行一个目录,下载器保存目录:MoviePilot映射目录' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '适用于非MoviePilot下载的任务;下载器种子数据较多时,同步时间将会较长,请耐心等候,可查看实时日志了解同步进度;时间间隔建议最少每6小时执行一次,防止上次任务没处理完。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "onlyonce": False, - "history": False, - "clear": False, - "time": 6, - "dirs": "", - "downloaders": [] - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._scheduler.shutdown() - self._scheduler = None - except Exception as e: - logger.error("退出插件失败:%s" % str(e)) diff --git a/app/plugins/torrentremover/__init__.py b/app/plugins/torrentremover/__init__.py deleted file mode 100644 index 7b08ec2f..00000000 --- a/app/plugins/torrentremover/__init__.py +++ /dev/null @@ -1,766 +0,0 @@ -import re -import threading -import time -from datetime import datetime, timedelta -from typing import List, Tuple, Dict, Any, Optional - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -from app.core.config import settings -from app.log import logger -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.schemas import NotificationType -from app.utils.string import StringUtils - -lock = threading.Lock() - - -class TorrentRemover(_PluginBase): - # 插件名称 - plugin_name = "自动删种" - # 插件描述 - plugin_desc = "自动删除下载器中的下载任务。" - # 插件图标 - plugin_icon = "torrent.png" - # 主题色 - plugin_color = "#02853F" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "torrentremover_" - # 加载顺序 - plugin_order = 8 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - qb = None - tr = None - _event = threading.Event() - _scheduler = None - _enabled = False - _onlyonce = False - _notify = False - # pause/delete - _downloaders = [] - _action = "pause" - _cron = None - _samedata = False - _mponly = False - _size = None - _ratio = None - _time = None - _upspeed = None - _labels = None - _pathkeywords = None - _trackerkeywords = None - _errorkeywords = None - _torrentstates = None - _torrentcategorys = None - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._notify = config.get("notify") - self._downloaders = config.get("downloaders") or [] - self._action = config.get("action") - self._cron = config.get("cron") - self._samedata = config.get("samedata") - self._mponly = config.get("mponly") - self._size = config.get("size") or "" - self._ratio = config.get("ratio") - self._time = config.get("time") - self._upspeed = config.get("upspeed") - self._labels = config.get("labels") or "" - self._pathkeywords = config.get("pathkeywords") or "" - self._trackerkeywords = config.get("trackerkeywords") or "" - self._errorkeywords = config.get("errorkeywords") or "" - self._torrentstates = config.get("torrentstates") or "" - self._torrentcategorys = config.get("torrentcategorys") or "" - - self.stop_service() - - if self.get_state() or self._onlyonce: - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - self.qb = Qbittorrent() - self.tr = Transmission() - if self._cron: - try: - self._scheduler.add_job(func=self.delete_torrents, - trigger=CronTrigger.from_crontab(self._cron), - name="自动删种服务") - logger.info(f"自动删种服务启动,周期:{self._cron}") - except Exception as err: - logger.error(f"自动删种服务启动失败:{str(err)}") - self.systemmessage.put(f"自动删种服务启动失败:{str(err)}") - if self._onlyonce: - logger.info(f"自动删种服务启动,立即运行一次") - self._scheduler.add_job(func=self.delete_torrents, trigger='date', - run_date=datetime.now( - tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3) - ) - # 关闭一次性开关 - self._onlyonce = False - # 保存设置 - self.update_config({ - "enabled": self._enabled, - "notify": self._notify, - "onlyonce": self._onlyonce, - "action": self._action, - "cron": self._cron, - "downloaders": self._downloaders, - "samedata": self._samedata, - "mponly": self._mponly, - "size": self._size, - "ratio": self._ratio, - "time": self._time, - "upspeed": self._upspeed, - "labels": self._labels, - "pathkeywords": self._pathkeywords, - "trackerkeywords": self._trackerkeywords, - "errorkeywords": self._errorkeywords, - "torrentstates": self._torrentstates, - "torrentcategorys": self._torrentcategorys - - }) - if self._scheduler.get_jobs(): - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self) -> bool: - return True if self._enabled and self._cron and self._downloaders else False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '0 */12 * * *' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'action', - 'label': '动作', - 'items': [ - {'title': '暂停', 'value': 'pause'}, - {'title': '删除种子', 'value': 'delete'}, - {'title': '删除种子和文件', 'value': 'deletefile'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'chips': True, - 'multiple': True, - 'model': 'downloaders', - 'label': '下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'size', - 'label': '种子大小(GB)', - 'placeholder': '例如1-10' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'ratio', - 'label': '分享率', - 'placeholder': '' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'time', - 'label': '做种时间(小时)', - 'placeholder': '' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'upspeed', - 'label': '平均上传速度', - 'placeholder': '' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'labels', - 'label': '标签', - 'placeholder': '用,分隔多个标签' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'pathkeywords', - 'label': '保存路径关键词', - 'placeholder': '支持正式表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'trackerkeywords', - 'label': 'Tracker关键词', - 'placeholder': '支持正式表达式' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'errorkeywords', - 'label': '错误信息关键词(TR)', - 'placeholder': '支持正式表达式,仅适用于TR' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'torrentstates', - 'label': '任务状态(QB)', - 'placeholder': '用,分隔多个状态,仅适用于QB' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'torrentcategorys', - 'label': '任务分类', - 'placeholder': '用,分隔多个分类' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'samedata', - 'label': '处理辅种', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'mponly', - 'label': '仅MoviePilot任务', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - }, - 'content': [ - { - 'component': 'VAlert', - 'props': { - 'type': 'info', - 'variant': 'tonal', - 'text': '自动删种存在风险,如设置不当可能导致数据丢失!建议动作先选择暂停,确定条件正确后再改成删除。' - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": False, - "onlyonce": False, - "action": 'pause', - 'downloaders': [], - "cron": '0 */12 * * *', - "samedata": False, - "mponly": False, - "size": "", - "ratio": "", - "time": "", - "upspeed": "", - "labels": "", - "pathkeywords": "", - "trackerkeywords": "", - "errorkeywords": "", - "torrentstates": "", - "torrentcategorys": "" - } - - def get_page(self) -> List[dict]: - pass - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) - - def __get_downloader(self, dtype: str): - """ - 根据类型返回下载器实例 - """ - if dtype == "qbittorrent": - return self.qb - elif dtype == "transmission": - return self.tr - else: - return None - - def delete_torrents(self): - """ - 定时删除下载器中的下载任务 - """ - for downloader in self._downloaders: - try: - with lock: - # 获取需删除种子列表 - torrents = self.get_remove_torrents(downloader) - logger.info(f"自动删种任务 获取符合处理条件种子数 {len(torrents)}") - # 下载器 - downlader_obj = self.__get_downloader(downloader) - if self._action == "pause": - message_text = f"{downloader.title()} 共暂停{len(torrents)}个种子" - for torrent in torrents: - if self._event.is_set(): - logger.info(f"自动删种服务停止") - return - text_item = f"{torrent.get('name')} " \ - f"来自站点:{torrent.get('site')} " \ - f"大小:{StringUtils.str_filesize(torrent.get('size'))}" - # 暂停种子 - downlader_obj.stop_torrents(ids=[torrent.get("id")]) - logger.info(f"自动删种任务 暂停种子:{text_item}") - message_text = f"{message_text}\n{text_item}" - elif self._action == "delete": - message_text = f"{downloader.title()} 共删除{len(torrents)}个种子" - for torrent in torrents: - if self._event.is_set(): - logger.info(f"自动删种服务停止") - return - text_item = f"{torrent.get('name')} " \ - f"来自站点:{torrent.get('site')} " \ - f"大小:{StringUtils.str_filesize(torrent.get('size'))}" - # 删除种子 - downlader_obj.delete_torrents(delete_file=False, - ids=[torrent.get("id")]) - logger.info(f"自动删种任务 删除种子:{text_item}") - message_text = f"{message_text}\n{text_item}" - elif self._action == "deletefile": - message_text = f"{downloader.title()} 共删除{len(torrents)}个种子及文件" - for torrent in torrents: - if self._event.is_set(): - logger.info(f"自动删种服务停止") - return - text_item = f"{torrent.get('name')} " \ - f"来自站点:{torrent.get('site')} " \ - f"大小:{StringUtils.str_filesize(torrent.get('size'))}" - # 删除种子 - downlader_obj.delete_torrents(delete_file=True, - ids=[torrent.get("id")]) - logger.info(f"自动删种任务 删除种子及文件:{text_item}") - message_text = f"{message_text}\n{text_item}" - else: - continue - if torrents and message_text and self._notify: - self.post_message( - mtype=NotificationType.SiteMessage, - title=f"【自动删种任务完成】", - text=message_text - ) - except Exception as e: - logger.error(f"自动删种任务异常:{str(e)}") - - def __get_qb_torrent(self, torrent: Any) -> Optional[dict]: - """ - 检查QB下载任务是否符合条件 - """ - # 完成时间 - date_done = torrent.completion_on if torrent.completion_on > 0 else torrent.added_on - # 现在时间 - date_now = int(time.mktime(datetime.now().timetuple())) - # 做种时间 - torrent_seeding_time = date_now - date_done if date_done else 0 - # 平均上传速度 - torrent_upload_avs = torrent.uploaded / torrent_seeding_time if torrent_seeding_time else 0 - # 大小 单位:GB - sizes = self._size.split('-') if self._size else [] - minsize = sizes[0] * 1024 * 1024 * 1024 if sizes else 0 - maxsize = sizes[-1] * 1024 * 1024 * 1024 if sizes else 0 - # 分享率 - if self._ratio and torrent.ratio <= float(self._ratio): - return None - # 做种时间 单位:小时 - if self._time and torrent_seeding_time <= float(self._time) * 3600: - return None - # 文件大小 - if self._size and (torrent.size >= int(maxsize) or torrent.size <= int(minsize)): - return None - if self._upspeed and torrent_upload_avs >= float(self._upspeed) * 1024: - return None - if self._pathkeywords and not re.findall(self._pathkeywords, torrent.save_path, re.I): - return None - if self._trackerkeywords and not re.findall(self._trackerkeywords, torrent.tracker, re.I): - return None - if self._torrentstates and torrent.state not in self._torrentstates: - return None - if self._torrentcategorys and (not torrent.category or torrent.category not in self._torrentcategorys): - return None - return { - "id": torrent.hash, - "name": torrent.name, - "site": StringUtils.get_url_sld(torrent.tracker), - "size": torrent.size - } - - def __get_tr_torrent(self, torrent: Any) -> Optional[dict]: - """ - 检查TR下载任务是否符合条件 - """ - # 完成时间 - date_done = torrent.date_done or torrent.date_added - # 现在时间 - date_now = int(time.mktime(datetime.now().timetuple())) - # 做种时间 - torrent_seeding_time = date_now - int(time.mktime(date_done.timetuple())) if date_done else 0 - # 上传量 - torrent_uploaded = torrent.ratio * torrent.total_size - # 平均上传速茺 - torrent_upload_avs = torrent_uploaded / torrent_seeding_time if torrent_seeding_time else 0 - # 大小 单位:GB - sizes = self._size.split('-') if self._size else [] - minsize = sizes[0] * 1024 * 1024 * 1024 if sizes else 0 - maxsize = sizes[-1] * 1024 * 1024 * 1024 if sizes else 0 - # 分享率 - if self._ratio and torrent.ratio <= float(self._ratio): - return None - if self._time and torrent_seeding_time <= float(self._time) * 3600: - return None - if self._size and (torrent.total_size >= int(maxsize) or torrent.total_size <= int(minsize)): - return None - if self._upspeed and torrent_upload_avs >= float(self._upspeed) * 1024: - return None - if self._pathkeywords and not re.findall(self._pathkeywords, torrent.download_dir, re.I): - return None - if self._trackerkeywords: - if not torrent.trackers: - return None - else: - tacker_key_flag = False - for tracker in torrent.trackers: - if re.findall(self._trackerkeywords, tracker.get("announce", ""), re.I): - tacker_key_flag = True - break - if not tacker_key_flag: - return None - if self._errorkeywords and not re.findall(self._errorkeywords, torrent.error_string, re.I): - return None - return { - "id": torrent.hashString, - "name": torrent.name, - "site": torrent.trackers[0].get("sitename") if torrent.trackers else "", - "size": torrent.total_size - } - - def get_remove_torrents(self, downloader: str): - """ - 获取自动删种任务种子 - """ - remove_torrents = [] - # 下载器对象 - downloader_obj = self.__get_downloader(downloader) - # 标题 - if self._labels: - tags = self._labels.split(',') - else: - tags = [] - if self._mponly: - tags.extend(settings.TORRENT_TAG) - # 查询种子 - torrents, error_flag = downloader_obj.get_torrents(tags=tags or None) - if error_flag: - return [] - # 处理种子 - for torrent in torrents: - if downloader == "qbittorrent": - item = self.__get_qb_torrent(torrent) - else: - item = self.__get_tr_torrent(torrent) - if not item: - continue - remove_torrents.append(item) - # 处理辅种 - if self._samedata and remove_torrents: - remove_ids = [t.get("id") for t in remove_torrents] - remove_torrents_plus = [] - for remove_torrent in remove_torrents: - name = remove_torrent.get("name") - size = remove_torrent.get("size") - for torrent in torrents: - if downloader == "qbittorrent": - plus_id = torrent.hash - plus_name = torrent.name - plus_size = torrent.size - plus_site = StringUtils.get_url_sld(torrent.tracker) - else: - plus_id = torrent.hashString - plus_name = torrent.name - plus_size = torrent.total_size - plus_site = torrent.trackers[0].get("sitename") if torrent.trackers else "" - # 比对名称和大小 - if plus_name == name \ - and plus_size == size \ - and plus_id not in remove_ids: - remove_torrents_plus.append( - { - "id": plus_id, - "name": plus_name, - "site": plus_site, - "size": plus_size - } - ) - if remove_torrents_plus: - remove_torrents.extend(remove_torrents_plus) - return remove_torrents diff --git a/app/plugins/torrenttransfer/__init__.py b/app/plugins/torrenttransfer/__init__.py deleted file mode 100644 index e638b2ab..00000000 --- a/app/plugins/torrenttransfer/__init__.py +++ /dev/null @@ -1,822 +0,0 @@ -import os -from datetime import datetime, timedelta -from pathlib import Path -from threading import Event -from typing import Any, List, Dict, Tuple, Optional - -import pytz -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger -from bencode import bdecode, bencode - -from app.core.config import settings -from app.helper.torrent import TorrentHelper -from app.log import logger -from app.modules.qbittorrent import Qbittorrent -from app.modules.transmission import Transmission -from app.plugins import _PluginBase -from app.schemas import NotificationType -from app.utils.string import StringUtils - - -class TorrentTransfer(_PluginBase): - # 插件名称 - plugin_name = "自动转移做种" - # 插件描述 - plugin_desc = "定期转移下载器中的做种任务到另一个下载器。" - # 插件图标 - plugin_icon = "torrenttransfer.jpg" - # 主题色 - plugin_color = "#272636" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "torrenttransfer_" - # 加载顺序 - plugin_order = 18 - # 可使用的用户级别 - auth_level = 2 - - # 私有属性 - _scheduler = None - qb = None - tr = None - torrent = None - # 开关 - _enabled = False - _cron = None - _onlyonce = False - _fromdownloader = None - _todownloader = None - _frompath = None - _topath = None - _notify = False - _nolabels = None - _nopaths = None - _deletesource = False - _fromtorrentpath = None - _autostart = False - # 退出事件 - _event = Event() - # 待检查种子清单 - _recheck_torrents = {} - _is_recheck_running = False - # 任务标签 - _torrent_tags = ["已整理", "转移做种"] - - def init_plugin(self, config: dict = None): - self.torrent = TorrentHelper() - # 读取配置 - if config: - self._enabled = config.get("enabled") - self._onlyonce = config.get("onlyonce") - self._cron = config.get("cron") - self._notify = config.get("notify") - self._nolabels = config.get("nolabels") - self._frompath = config.get("frompath") - self._topath = config.get("topath") - self._fromdownloader = config.get("fromdownloader") - self._todownloader = config.get("todownloader") - self._deletesource = config.get("deletesource") - self._fromtorrentpath = config.get("fromtorrentpath") - self._nopaths = config.get("nopaths") - self._autostart = config.get("autostart") - - # 停止现有任务 - self.stop_service() - - # 启动定时任务 & 立即运行一次 - if self.get_state() or self._onlyonce: - self.qb = Qbittorrent() - self.tr = Transmission() - # 检查配置 - if self._fromtorrentpath and not Path(self._fromtorrentpath).exists(): - logger.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}") - self.systemmessage.put(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}") - return - if self._fromdownloader == self._todownloader: - logger.error(f"源下载器和目的下载器不能相同") - self.systemmessage.put(f"源下载器和目的下载器不能相同") - return - self._scheduler = BackgroundScheduler(timezone=settings.TZ) - if self._cron: - logger.info(f"转移做种服务启动,周期:{self._cron}") - try: - self._scheduler.add_job(self.transfer, - CronTrigger.from_crontab(self._cron)) - except Exception as e: - logger.error(f"转移做种服务启动失败:{str(e)}") - self.systemmessage.put(f"转移做种服务启动失败:{str(e)}") - return - if self._onlyonce: - logger.info(f"转移做种服务启动,立即运行一次") - self._scheduler.add_job(self.transfer, 'date', - run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta( - seconds=3)) - # 关闭一次性开关 - self._onlyonce = False - self.update_config({ - "enabled": self._enabled, - "onlyonce": self._onlyonce, - "cron": self._cron, - "notify": self._notify, - "nolabels": self._nolabels, - "frompath": self._frompath, - "topath": self._topath, - "fromdownloader": self._fromdownloader, - "todownloader": self._todownloader, - "deletesource": self._deletesource, - "fromtorrentpath": self._fromtorrentpath, - "nopaths": self._nopaths, - "autostart": self._autostart - }) - if self._scheduler.get_jobs(): - if self._autostart: - # 追加种子校验服务 - self._scheduler.add_job(self.check_recheck, 'interval', minutes=3) - # 启动服务 - self._scheduler.print_jobs() - self._scheduler.start() - - def get_state(self): - return True if self._enabled \ - and self._cron \ - and self._fromdownloader \ - and self._todownloader \ - and self._fromtorrentpath else False - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'notify', - 'label': '发送通知', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'cron', - 'label': '执行周期', - 'placeholder': '0 0 0 ? *' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'nolabels', - 'label': '不转移种子标签', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'fromdownloader', - 'label': '源下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'fromtorrentpath', - 'label': '源下载器种子文件路径', - 'placeholder': 'BT_backup、torrents' - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'frompath', - 'label': '源数据文件根路径', - 'placeholder': '根路径,留空不进行路径转换' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'todownloader', - 'label': '目的下载器', - 'items': [ - {'title': 'Qbittorrent', 'value': 'qbittorrent'}, - {'title': 'Transmission', 'value': 'transmission'} - ] - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'topath', - 'label': '目的数据文件根路径', - 'placeholder': '根路径,留空不进行路径转换' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12 - }, - 'content': [ - { - 'component': 'VTextarea', - 'props': { - 'model': 'nopaths', - 'label': '不转移数据文件目录', - 'rows': 3, - 'placeholder': '每一行一个目录' - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'autostart', - 'label': '校验完成后自动开始', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'deletesource', - 'label': '删除源种子', - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'onlyonce', - 'label': '立即运行一次', - } - } - ] - } - ] - } - ] - } - ], { - "enabled": False, - "notify": False, - "onlyonce": False, - "cron": "", - "nolabels": "", - "frompath": "", - "topath": "", - "fromdownloader": "", - "todownloader": "", - "deletesource": False, - "fromtorrentpath": "", - "nopaths": "", - "autostart": True - } - - def get_page(self) -> List[dict]: - pass - - def __get_downloader(self, dtype: str): - """ - 根据类型返回下载器实例 - """ - if dtype == "qbittorrent": - return self.qb - elif dtype == "transmission": - return self.tr - else: - return None - - def __download(self, downloader: str, content: bytes, - save_path: str) -> Optional[str]: - """ - 添加下载任务 - """ - if downloader == "qbittorrent": - # 生成随机Tag - tag = StringUtils.generate_random_str(10) - state = self.qb.add_torrent(content=content, - download_dir=save_path, - is_paused=True, - tag=["已整理", "转移做种", tag]) - if not state: - return None - else: - # 获取种子Hash - torrent_hash = self.qb.get_torrent_id_by_tag(tags=tag) - if not torrent_hash: - logger.error(f"{downloader} 获取种子Hash失败") - return None - return torrent_hash - elif downloader == "transmission": - # 添加任务 - torrent = self.tr.add_torrent(content=content, - download_dir=save_path, - is_paused=True, - labels=["已整理", "转移做种"]) - if not torrent: - return None - else: - return torrent.hashString - - logger.error(f"不支持的下载器:{downloader}") - return None - - def transfer(self): - """ - 开始转移做种 - """ - logger.info("开始转移做种任务 ...") - - # 源下载器 - downloader = self._fromdownloader - # 目的下载器 - todownloader = self._todownloader - - # 获取下载器中已完成的种子 - downloader_obj = self.__get_downloader(downloader) - torrents = downloader_obj.get_completed_torrents() - if torrents: - logger.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}") - else: - logger.info(f"下载器 {downloader} 没有已完成种子") - return - - # 过滤种子,记录保存目录 - trans_torrents = [] - for torrent in torrents: - if self._event.is_set(): - logger.info(f"转移服务停止") - return - - # 获取种子hash - hash_str = self.__get_hash(torrent, downloader) - # 获取保存路径 - save_path = self.__get_save_path(torrent, downloader) - - if self._nopaths and save_path: - # 过滤不需要转移的路径 - nopath_skip = False - for nopath in self._nopaths.split('\n'): - if os.path.normpath(save_path).startswith(os.path.normpath(nopath)): - logger.info(f"种子 {hash_str} 保存路径 {save_path} 不需要转移,跳过 ...") - nopath_skip = True - break - if nopath_skip: - continue - - # 获取种子标签 - torrent_labels = self.__get_label(torrent, downloader) - if torrent_labels and self._nolabels: - is_skip = False - for label in self._nolabels.split(','): - if label in torrent_labels: - logger.info(f"种子 {hash_str} 含有不转移标签 {label},跳过 ...") - is_skip = True - break - if is_skip: - continue - - # 添加转移数据 - trans_torrents.append({ - "hash": hash_str, - "save_path": save_path, - "torrent": torrent - }) - - # 开始转移任务 - if trans_torrents: - logger.info(f"需要转移的种子数:{len(trans_torrents)}") - # 记数 - total = len(trans_torrents) - # 总成功数 - success = 0 - # 总失败数 - fail = 0 - # 跳过数 - skip = 0 - - for torrent_item in trans_torrents: - # 检查种子文件是否存在 - torrent_file = Path(self._fromtorrentpath) / f"{torrent_item.get('hash')}.torrent" - if not torrent_file.exists(): - logger.error(f"种子文件不存在:{torrent_file}") - # 失败计数 - fail += 1 - continue - - # 查询hash值是否已经在目的下载器中 - todownloader_obj = self.__get_downloader(todownloader) - torrent_info, _ = todownloader_obj.get_torrents(ids=[torrent_item.get('hash')]) - if torrent_info: - logger.info(f"{torrent_item.get('hash')} 已在目的下载器中,跳过 ...") - # 跳过计数 - skip += 1 - continue - - # 转换保存路径 - download_dir = self.__convert_save_path(torrent_item.get('save_path'), - self._frompath, - self._topath) - if not download_dir: - logger.error(f"转换保存路径失败:{torrent_item.get('save_path')}") - # 失败计数 - fail += 1 - continue - - # 如果源下载器是QB检查是否有Tracker,没有的话额外获取 - if downloader == "qbittorrent": - # 读取种子内容、解析种子文件 - content = torrent_file.read_bytes() - if not content: - logger.warn(f"读取种子文件失败:{torrent_file}") - fail += 1 - continue - # 读取trackers - try: - torrent_main = bdecode(content) - main_announce = torrent_main.get('announce') - except Exception as err: - logger.warn(f"解析种子文件 {torrent_file} 失败:{str(err)}") - fail += 1 - continue - - if not main_announce: - logger.info(f"{torrent_item.get('hash')} 未发现tracker信息,尝试补充tracker信息...") - # 读取fastresume文件 - fastresume_file = Path(self._fromtorrentpath) / f"{torrent_item.get('hash')}.fastresume" - if not fastresume_file.exists(): - logger.warn(f"fastresume文件不存在:{fastresume_file}") - fail += 1 - continue - # 尝试补充trackers - try: - # 解析fastresume文件 - fastresume = fastresume_file.read_bytes() - torrent_fastresume = bdecode(fastresume) - # 读取trackers - fastresume_trackers = torrent_fastresume.get('trackers') - if isinstance(fastresume_trackers, list) \ - and len(fastresume_trackers) > 0 \ - and fastresume_trackers[0]: - # 重新赋值 - torrent_main['announce'] = fastresume_trackers[0][0] - # 替换种子文件路径 - torrent_file = settings.TEMP_PATH / f"{torrent_item.get('hash')}.torrent" - # 编码并保存到临时文件 - torrent_file.write_bytes(bencode(torrent_main)) - except Exception as err: - logger.error(f"解析fastresume文件 {fastresume_file} 出错:{str(err)}") - fail += 1 - continue - - # 发送到另一个下载器中下载:默认暂停、传输下载路径、关闭自动管理模式 - logger.info(f"添加转移做种任务到下载器 {todownloader}:{torrent_file}") - download_id = self.__download(downloader=todownloader, - content=torrent_file.read_bytes(), - save_path=download_dir) - if not download_id: - # 下载失败 - fail += 1 - logger.error(f"添加下载任务失败:{torrent_file}") - continue - else: - # 下载成功 - logger.info(f"成功添加转移做种任务,种子文件:{torrent_file}") - - # TR会自动校验,QB需要手动校验 - if todownloader == "qbittorrent": - logger.info(f"qbittorrent 开始校验 {download_id} ...") - todownloader_obj.recheck_torrents(ids=[download_id]) - - # 追加校验任务 - logger.info(f"添加校验检查任务:{download_id} ...") - if not self._recheck_torrents.get(todownloader): - self._recheck_torrents[todownloader] = [] - self._recheck_torrents[todownloader].append(download_id) - - # 删除源种子,不能删除文件! - if self._deletesource: - logger.info(f"删除源下载器任务(不含文件):{torrent_item.get('hash')} ...") - downloader_obj.delete_torrents(delete_file=False, ids=[torrent_item.get('hash')]) - - # 成功计数 - success += 1 - # 插入转种记录 - history_key = "%s-%s" % (self._fromdownloader, torrent_item.get('hash')) - self.save_data(key=history_key, - value={ - "to_download": self._todownloader, - "to_download_id": download_id, - "delete_source": self._deletesource, - }) - # 触发校验任务 - if success > 0 and self._autostart: - self.check_recheck() - - # 发送通知 - if self._notify: - self.post_message( - mtype=NotificationType.SiteMessage, - title="【转移做种任务执行完成】", - text=f"总数:{total},成功:{success},失败:{fail},跳过:{skip}" - ) - else: - logger.info(f"没有需要转移的种子") - logger.info("转移做种任务执行完成") - - def check_recheck(self): - """ - 定时检查下载器中种子是否校验完成,校验完成且完整的自动开始辅种 - """ - if not self._recheck_torrents: - return - if not self._todownloader: - return - if self._is_recheck_running: - return - - # 校验下载器 - downloader = self._todownloader - - # 需要检查的种子 - recheck_torrents = self._recheck_torrents.get(downloader, []) - if not recheck_torrents: - return - - logger.info(f"开始检查下载器 {downloader} 的校验任务 ...") - - # 运行状态 - self._is_recheck_running = True - - # 获取任务 - downloader_obj = self.__get_downloader(downloader) - torrents, _ = downloader_obj.get_torrents(ids=recheck_torrents) - if torrents: - # 可做种的种子 - can_seeding_torrents = [] - for torrent in torrents: - # 获取种子hash - hash_str = self.__get_hash(torrent, downloader) - # 判断是否可做种 - if self.__can_seeding(torrent, downloader): - can_seeding_torrents.append(hash_str) - - if can_seeding_torrents: - logger.info(f"共 {len(can_seeding_torrents)} 个任务校验完成,开始做种") - # 开始做种 - downloader_obj.start_torrents(ids=can_seeding_torrents) - # 去除已经处理过的种子 - self._recheck_torrents[downloader] = list( - set(recheck_torrents).difference(set(can_seeding_torrents))) - else: - logger.info(f"没有新的任务校验完成,将在下次个周期继续检查 ...") - - elif torrents is None: - logger.info(f"下载器 {downloader} 查询校验任务失败,将在下次继续查询 ...") - else: - logger.info(f"下载器 {downloader} 中没有需要检查的校验任务,清空待处理列表") - self._recheck_torrents[downloader] = [] - - self._is_recheck_running = False - - @staticmethod - def __get_hash(torrent: Any, dl_type: str): - """ - 获取种子hash - """ - try: - return torrent.get("hash") if dl_type == "qbittorrent" else torrent.hashString - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __get_label(torrent: Any, dl_type: str): - """ - 获取种子标签 - """ - try: - return [str(tag).strip() for tag in torrent.get("tags").split(',')] \ - if dl_type == "qbittorrent" else torrent.labels or [] - except Exception as e: - print(str(e)) - return [] - - @staticmethod - def __get_save_path(torrent: Any, dl_type: str): - """ - 获取种子保存路径 - """ - try: - return torrent.get("save_path") if dl_type == "qbittorrent" else torrent.download_dir - except Exception as e: - print(str(e)) - return "" - - @staticmethod - def __can_seeding(torrent: Any, dl_type: str): - """ - 判断种子是否可以做种并处于暂停状态 - """ - try: - return (torrent.get("state") == "pausedUP") if dl_type == "qbittorrent" \ - else (torrent.status.stopped and torrent.percent_done == 1) - except Exception as e: - print(str(e)) - return False - - @staticmethod - def __convert_save_path(save_path: str, from_root: str, to_root: str): - """ - 转换保存路径 - """ - try: - # 没有保存目录,以目的根目录为准 - if not save_path: - return to_root - # 没有设置根目录时返回save_path - if not to_root or not from_root: - return save_path - # 统一目录格式 - save_path = os.path.normpath(save_path).replace("\\", "/") - from_root = os.path.normpath(from_root).replace("\\", "/") - to_root = os.path.normpath(to_root).replace("\\", "/") - # 替换根目录 - if save_path.startswith(from_root): - return save_path.replace(from_root, to_root, 1) - except Exception as e: - print(str(e)) - return None - - def stop_service(self): - """ - 退出插件 - """ - try: - if self._scheduler: - self._scheduler.remove_all_jobs() - if self._scheduler.running: - self._event.set() - self._scheduler.shutdown() - self._event.clear() - self._scheduler = None - except Exception as e: - print(str(e)) diff --git a/app/plugins/webhook/__init__.py b/app/plugins/webhook/__init__.py deleted file mode 100644 index c735d54f..00000000 --- a/app/plugins/webhook/__init__.py +++ /dev/null @@ -1,184 +0,0 @@ -from app.plugins import _PluginBase -from app.core.event import eventmanager -from app.schemas.types import EventType -from app.utils.http import RequestUtils -from typing import Any, List, Dict, Tuple -from app.log import logger - - -class WebHook(_PluginBase): - # 插件名称 - plugin_name = "Webhook" - # 插件描述 - plugin_desc = "事件发生时向第三方地址发送请求。" - # 插件图标 - plugin_icon = "webhook.png" - # 主题色 - plugin_color = "#C73A63" - # 插件版本 - plugin_version = "1.0" - # 插件作者 - plugin_author = "jxxghp" - # 作者主页 - author_url = "https://github.com/jxxghp" - # 插件配置项ID前缀 - plugin_config_prefix = "webhook_" - # 加载顺序 - plugin_order = 14 - # 可使用的用户级别 - auth_level = 1 - - # 私有属性 - _webhook_url = None - _method = None - _enabled = False - - def init_plugin(self, config: dict = None): - if config: - self._enabled = config.get("enabled") - self._webhook_url = config.get("webhook_url") - self._method = config.get('request_method') - - def get_state(self) -> bool: - return self._enabled - - @staticmethod - def get_command() -> List[Dict[str, Any]]: - pass - - def get_api(self) -> List[Dict[str, Any]]: - pass - - def get_form(self) -> Tuple[List[dict], Dict[str, Any]]: - """ - 拼装插件配置页面,需要返回两块数据:1、页面配置;2、数据结构 - """ - request_options = ["POST", "GET"] - return [ - { - 'component': 'VForm', - 'content': [ - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 6 - }, - 'content': [ - { - 'component': 'VSwitch', - 'props': { - 'model': 'enabled', - 'label': '启用插件', - } - } - ] - } - ] - }, - { - 'component': 'VRow', - 'content': [ - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 4 - }, - 'content': [ - { - 'component': 'VSelect', - 'props': { - 'model': 'request_method', - 'label': '请求方式', - 'items': request_options - } - } - ] - }, - { - 'component': 'VCol', - 'props': { - 'cols': 12, - 'md': 8 - }, - 'content': [ - { - 'component': 'VTextField', - 'props': { - 'model': 'webhook_url', - 'label': 'webhook地址' - } - } - ] - } - ] - }, - ] - } - ], { - "enabled": False, - "request_method": "POST", - "webhook_url": "" - } - - def get_page(self) -> List[dict]: - pass - - @eventmanager.register(EventType) - def send(self, event): - """ - 向第三方Webhook发送请求 - """ - if not self._enabled or not self._webhook_url: - return - - def __to_dict(_event): - """ - 递归将对象转换为字典 - """ - if isinstance(_event, dict): - for k, v in _event.items(): - _event[k] = __to_dict(v) - return _event - elif isinstance(_event, list): - for i in range(len(_event)): - _event[i] = __to_dict(_event[i]) - return _event - elif isinstance(_event, tuple): - return tuple(__to_dict(list(_event))) - elif isinstance(_event, set): - return set(__to_dict(list(_event))) - elif hasattr(_event, 'to_dict'): - return __to_dict(_event.to_dict()) - elif hasattr(_event, '__dict__'): - return __to_dict(_event.__dict__) - elif isinstance(_event, (int, float, str, bool, type(None))): - return _event - else: - return str(_event) - - event_info = { - "type": event.event_type, - "data": __to_dict(event.event_data) - } - - if self._method == 'POST': - ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info) - else: - ret = RequestUtils().get_res(self._webhook_url, params=event_info) - if ret: - logger.info("发送成功:%s" % self._webhook_url) - elif ret is not None: - logger.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}") - else: - logger.error("发送失败,未获取到返回信息") - - def stop_service(self): - """ - 退出插件 - """ - pass diff --git a/update b/update index daa5c9d3..74404070 100644 --- a/update +++ b/update @@ -20,6 +20,9 @@ if [ "${MOVIEPILOT_AUTO_UPDATE_DEV}" = "true" ]; then pip install ${PIP_OPTIONS} -r /tmp/app/requirements.txt if [ $? -eq 0 ]; then echo "安装依赖成功" + # 下载插件 + echo "正在下载插件..." + curl ${CURL_OPTIONS} "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" | busybox unzip -d /tmp - # 检查前端最新版本 frontend_version=$(curl ${CURL_OPTIONS} "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) if [[ "${frontend_version}" == *v* ]]; then @@ -31,6 +34,7 @@ if [ "${MOVIEPILOT_AUTO_UPDATE_DEV}" = "true" ]; then mv /tmp/app /app rm -rf /public mv /tmp/dist /public + mv /tmp/MoviePilot-Plugins-main/plugins/* /app/app/plugins/ echo "程序更新成功,前端版本:${frontend_version}" else echo "前端程序下载失败,继续使用旧的程序来启动..."