This commit is contained in:
jxxghp
2024-06-21 11:49:07 +08:00
parent bfbeae7fa7
commit 5deb0089bb
4 changed files with 84 additions and 12 deletions

View File

@ -98,27 +98,30 @@ def search(title: str,
@router.get("/scrape", summary="刮削媒体信息", response_model=schemas.Response) @router.get("/scrape", summary="刮削媒体信息", response_model=schemas.Response)
def scrape(path: str, storage: str = "local", def scrape(fileitem: schemas.FileItem,
storage: str = "local",
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
刮削媒体信息 刮削媒体信息
""" """
if not path: if not fileitem:
return schemas.Response(success=False, message="刮削路径无效") return schemas.Response(success=False, message="刮削路径无效")
chain = MediaChain() chain = MediaChain()
# 识别媒体信息 # 识别媒体信息
meta = MetaInfoPath(path) meta = MetaInfoPath(fileitem.path)
mediainfo = chain.recognize_media(meta) mediainfo = chain.recognize_media(meta)
if not media_info: if not media_info:
return schemas.Response(success=False, message="刮削失败,无法识别媒体信息") return schemas.Response(success=False, message="刮削失败,无法识别媒体信息")
if storage == "local": if storage == "local":
scrape_path = Path(path) scrape_path = Path(fileitem.path)
if not scrape_path.exists(): if not scrape_path.exists():
return schemas.Response(success=False, message="刮削路径不存在") return schemas.Response(success=False, message="刮削路径不存在")
# 刮削 # 刮削
chain.scrape_metadata(path=scrape_path, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE) chain.scrape_metadata(path=scrape_path, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE)
else: else:
chain.scrape_metadata_online(storage=storage, path=path, mediainfo=mediainfo) if not fileitem.fileid:
return schemas.Response(success=False, message="刮削文件ID无效")
chain.scrape_metadata_online(storage=storage, fileitem=fileitem, meta=meta, mediainfo=mediainfo)
return schemas.Response(success=True, message="刮削完成") return schemas.Response(success=True, message="刮削完成")

View File

@ -4,11 +4,15 @@ from pathlib import Path
from threading import Lock from threading import Lock
from typing import Optional, List, Tuple from typing import Optional, List, Tuple
from app import schemas
from app.chain import ChainBase from app.chain import ChainBase
from app.core.config import settings
from app.core.context import Context, MediaInfo from app.core.context import Context, MediaInfo
from app.core.event import eventmanager, Event from app.core.event import eventmanager, Event
from app.core.meta import MetaBase from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo, MetaInfoPath from app.core.metainfo import MetaInfo, MetaInfoPath
from app.helper.aliyun import AliyunHelper
from app.helper.u115 import U115Helper
from app.log import logger from app.log import logger
from app.schemas.types import EventType, MediaType from app.schemas.types import EventType, MediaType
from app.utils.singleton import Singleton from app.utils.singleton import Singleton
@ -327,9 +331,73 @@ class MediaChain(ChainBase, metaclass=Singleton):
) )
return None return None
def scrape_metadata_online(self, storage: str, path: str, mediainfo: MediaInfo): def scrape_metadata_online(self, storage: str, fileitem: schemas.FileItem,
meta: MetaBase, mediainfo: MediaInfo):
""" """
远程刮削媒体信息(网盘等) 远程刮削媒体信息(网盘等)
""" """
# TODO: 远程刮削媒体信息
pass def __list_files(s: str, f: str):
if s == "aliyun":
return AliyunHelper().list(parent_file_id=f)
if s == "u115":
return U115Helper().list(parent_file_id=f)
return []
def __upload_file(s: str, p: str, f: Path):
if s == "aliyun":
return AliyunHelper().upload(parent_file_id=p, file_path=f)
if s == "u115":
return U115Helper().upload(parent_file_id=p, file_path=f)
if storage not in ["aliyun", "u115"]:
logger.warn(f"不支持的存储类型:{storage}")
return
filepath = Path(fileitem.path)
if mediainfo.type == MediaType.MOVIE:
if fileitem.type == "file":
# 电影文件
movie_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo)
if not movie_nfo:
logger.warn(f"无法生成电影NFO文件{meta.name}")
return
# 写入到临时目录
nfo_path = settings.TEMP_PATH / f"{filepath.stem}.nfo"
nfo_path.write_bytes(movie_nfo)
# 上传NFO文件
__upload_file(storage, fileitem.parent_fileid, nfo_path)
else:
# 电影目录
files = __list_files(storage, fileitem.fileid)
for file in files:
self.scrape_metadata_online(storage=storage, fileitem=schemas.FileItem(**file),
meta=meta, mediainfo=mediainfo)
else:
# 电视剧
if fileitem.type == "file":
# 电视剧文件
tv_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo, season=meta.begin_season, episode=meta.begin_episode)
if not tv_nfo:
logger.warn(f"无法生成电视剧NFO文件{meta.name}")
return
# 写入到临时目录
nfo_path = settings.TEMP_PATH / f"{filepath.stem}.nfo"
nfo_path.write_bytes(tv_nfo)
# 上传NFO文件
__upload_file(storage, fileitem.parent_fileid, nfo_path)
else:
# 根目录
tv_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo)
if not tv_nfo:
logger.warn(f"无法生成电视剧NFO文件{meta.name}")
return
# 写入nfo到根目录
nfo_path = settings.TEMP_PATH / f"tvshow.nfo"
nfo_path.write_bytes(tv_nfo)
# 上传NFO文件
__upload_file(storage, fileitem.fileid, nfo_path)
# 递归刮削目录内的文件和子目录
files = __list_files(storage, fileitem.fileid)
for file in files:
self.scrape_metadata_online(storage=storage, fileitem=schemas.FileItem(**file),
meta=meta, mediainfo=mediainfo)

View File

@ -515,7 +515,7 @@ class AliyunHelper:
self.__handle_error(res, "移动文件") self.__handle_error(res, "移动文件")
return False return False
def upload(self, parent_file_id: str, name: str, filepath: Path) -> Optional[dict]: def upload(self, parent_file_id: str, file_path: Path) -> Optional[dict]:
""" """
上传文件,并标记完成 上传文件,并标记完成
""" """
@ -526,7 +526,7 @@ class AliyunHelper:
res = RequestUtils(headers=headers, timeout=10).post_res(self.create_file_url, json={ res = RequestUtils(headers=headers, timeout=10).post_res(self.create_file_url, json={
"drive_id": params.get("resourceDriveId"), "drive_id": params.get("resourceDriveId"),
"parent_file_id": parent_file_id, "parent_file_id": parent_file_id,
"name": name, "name": file_path.name,
"type": "file", "type": "file",
"check_name_mode": "refuse" "check_name_mode": "refuse"
}) })
@ -543,7 +543,7 @@ class AliyunHelper:
# 上传地址 # 上传地址
upload_url = part_info_list[0].get("upload_url") upload_url = part_info_list[0].get("upload_url")
# 上传文件 # 上传文件
res = RequestUtils(headers=headers).put_res(upload_url, data=filepath.read_bytes()) res = RequestUtils(headers=headers).put_res(upload_url, data=file_path.read_bytes())
if not res: if not res:
self.__handle_error(res, "上传文件") self.__handle_error(res, "上传文件")
return None return None

View File

@ -1,4 +1,5 @@
import base64 import base64
from pathlib import Path
from typing import Optional, Tuple, Generator from typing import Optional, Tuple, Generator
import oss2 import oss2
@ -211,7 +212,7 @@ class U115Helper(metaclass=Singleton):
logger.error(f"移动115文件失败{str(e)}") logger.error(f"移动115文件失败{str(e)}")
return False return False
def upload(self, file_path: str, parent_file_id: str) -> Optional[dict]: def upload(self, parent_file_id: str, file_path: Path) -> Optional[dict]:
""" """
上传文件 上传文件
""" """