This commit is contained in:
jxxghp
2024-06-21 21:28:48 +08:00
parent e0a251b339
commit 37985eba25
8 changed files with 170 additions and 129 deletions

View File

@ -2,7 +2,7 @@ import copy
import time
from pathlib import Path
from threading import Lock
from typing import Optional, List, Tuple
from typing import Optional, List, Tuple, Union
from app import schemas
from app.chain import ChainBase
@ -18,6 +18,7 @@ from app.schemas.types import EventType, MediaType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
recognize_lock = Lock()
@ -31,8 +32,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 临时识别结果 {title, name, year, season, episode}
recognize_temp: Optional[dict] = None
def meta_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
season: int = None, episode: int = None) -> Optional[str]:
def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
season: int = None, episode: int = None) -> Optional[str]:
"""
获取NFO文件内容文本
:param meta: 元数据
@ -40,7 +41,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
:param season: 季号
:param episode: 集号
"""
return self.run_module("meta_nfo", meta=meta, mediainfo=mediainfo, season=season, episode=episode)
return self.run_module("metadata_nfo", meta=meta, mediainfo=mediainfo, season=season, episode=episode)
def recognize_by_meta(self, metainfo: MetaBase) -> Optional[MediaInfo]:
"""
@ -332,42 +333,66 @@ class MediaChain(ChainBase, metaclass=Singleton):
)
return None
def scrape_metadata_online(self, storage: str, fileitem: schemas.FileItem,
meta: MetaBase, mediainfo: MediaInfo, init_folder: bool = True):
def manual_scrape(self, storage: str, fileitem: schemas.FileItem,
meta: MetaBase, mediainfo: MediaInfo, init_folder: bool = True):
"""
远程刮削媒体信息(网盘等)
手动刮削媒体信息
"""
def __list_files(_storage: str, _fileid: str, _path: str = None, _drive_id: str = None):
"""
列出下级文件
"""
if _storage == "aliyun":
return AliyunHelper().list(drive_id=_drive_id, parent_file_id=_fileid, path=_path)
if _storage == "u115":
elif _storage == "u115":
return U115Helper().list(parent_file_id=_fileid, path=_path)
return []
else:
items = SystemUtils.list_sub_all(Path(_path))
return [schemas.FileItem(
type="file" if item.is_file() else "dir",
path=str(item),
name=item.name,
basename=item.stem,
extension=item.suffix[1:],
size=item.stat().st_size,
modify_time=item.stat().st_mtime
) for item in items]
def __upload_file(_storage: str, _fileid: str, _path: Path):
if _storage == "aliyun":
return AliyunHelper().upload(parent_file_id=_fileid, file_path=_path)
if _storage == "u115":
return U115Helper().upload(parent_file_id=_fileid, file_path=_path)
def __save_file(_storage: str, _drive_id: str, _fileid: str, _path: Path, _content: Union[bytes, str]):
"""
保存或上传文件
"""
if _storage != "local":
# 写入到临时目录
temp_path = settings.TEMP_PATH / _path.name
temp_path.write_bytes(_content)
# 上传文件
logger.info(f"正在上传 {_path.name} ...")
if _storage == "aliyun":
AliyunHelper().upload(drive_id=_drive_id, parent_file_id=_fileid, file_path=temp_path)
elif _storage == "u115":
U115Helper().upload(parent_file_id=_fileid, file_path=temp_path)
logger.info(f"{_path.name} 上传完成")
else:
# 保存到本地
logger.info(f"正在保存 {_path.name} ...")
_path.write_bytes(_content)
logger.info(f"{_path} 已保存")
def __save_image(u: str, f: Path):
def __save_image(_url: str) -> Optional[bytes]:
"""
下载图片并保存
"""
try:
logger.info(f"正在下载{f.stem}图片:{u} ...")
r = RequestUtils(proxies=settings.PROXY).get_res(url=u)
logger.info(f"正在下载图片:{_url} ...")
r = RequestUtils(proxies=settings.PROXY).get_res(url=_url)
if r:
f.write_bytes(r.content)
return r.content
else:
logger.info(f"{f.stem}图片下载失败,请检查网络连通性!")
logger.info(f"{_url} 图片下载失败,请检查网络连通性!")
except Exception as err:
logger.error(f"{f.stem}图片下载失败:{str(err)}")
if storage not in ["aliyun", "u115"]:
logger.warn(f"不支持的存储类型:{storage}")
return
logger.error(f"{_url} 图片下载失败:{str(err)}")
# 当前文件路径
filepath = Path(fileitem.path)
@ -380,27 +405,24 @@ class MediaChain(ChainBase, metaclass=Singleton):
if fileitem.type == "file":
# 电影文件
logger.info(f"正在生成电影nfo{mediainfo.title_year} - {filepath.name}")
movie_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo)
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if not movie_nfo:
logger.warn(f"{filepath.name} nfo文件生成失败")
return
# 写入到临时目录
nfo_path = settings.TEMP_PATH / f"{filepath.stem}.nfo"
nfo_path.write_bytes(movie_nfo)
# 上传NFO文件
logger.info(f"上传NFO文件{nfo_path.name} ...")
__upload_file(storage, fileitem.parent_fileid, nfo_path)
logger.info(f"{nfo_path.name} 上传成功")
# 保存或上传nfo文件
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.parent_fileid,
_path=filepath.with_suffix(".nfo"), _content=movie_nfo)
else:
# 电影目录
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
_drive_id=fileitem.drive_id, _path=fileitem.path)
for file in files:
self.scrape_metadata_online(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
# 生成图片文件和上传
self.manual_scrape(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
# 生成目录内图片文件
if init_folder:
# 图片
for attr_name, attr_value in vars(mediainfo).items():
if attr_value \
and attr_name.endswith("_path") \
@ -408,13 +430,12 @@ class MediaChain(ChainBase, metaclass=Singleton):
and isinstance(attr_value, str) \
and attr_value.startswith("http"):
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
image_path = filepath / image_name
# 下载图片
content = __save_image(_url=attr_value)
# 写入nfo到根目录
image_path = settings.TEMP_PATH / image_name
__save_image(attr_value, image_path)
# 上传图片文件到当前目录
logger.info(f"上传图片文件:{image_path.name} ...")
__upload_file(storage, fileitem.fileid, image_path)
logger.info(f"{image_path.name} 上传成功")
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=image_path, _content=content)
else:
# 电视剧
if fileitem.type == "file":
@ -428,94 +449,83 @@ class MediaChain(ChainBase, metaclass=Singleton):
logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
return
# 获取集的nfo文件
episode_nfo = self.meta_nfo(meta=file_meta, mediainfo=file_mediainfo,
season=file_meta.begin_season, episode=file_meta.begin_episode)
episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo,
season=file_meta.begin_season, episode=file_meta.begin_episode)
if not episode_nfo:
logger.warn(f"{filepath.name} nfo生成失败")
return
# 写入到临时目录
nfo_path = settings.TEMP_PATH / f"{filepath.stem}.nfo"
nfo_path.write_bytes(episode_nfo)
# 上传NFO文件到文件当前目录下
logger.info(f"上传NFO文件{nfo_path.name} ...")
__upload_file(storage, fileitem.parent_fileid, nfo_path)
logger.info(f"{nfo_path.name} 上传成功")
# 保存或上传nfo文件
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.parent_fileid,
_path=filepath.with_suffix(".nfo"), _content=episode_nfo)
elif meta.begin_season:
# 当前为季的目录,处理目录内的文件
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
_drive_id=fileitem.drive_id, _path=fileitem.path)
for file in files:
self.scrape_metadata_online(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
self.manual_scrape(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
# 生成季的nfo和图片
if init_folder:
# 季nfo
season_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo, season=meta.begin_season)
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo, season=meta.begin_season)
if not season_nfo:
logger.warn(f"无法生成电视剧季nfo文件{meta.name}")
return
# 写入nfo到根目录
nfo_path = settings.TEMP_PATH / "season.nfo"
nfo_path.write_bytes(season_nfo)
# 上传NFO文件
logger.info(f"上传NFO文件{nfo_path.name} ...")
__upload_file(storage, fileitem.fileid, nfo_path)
logger.info(f"{nfo_path.name} 上传成功")
nfo_path = filepath / "season.nfo"
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=nfo_path, _content=season_nfo)
# TMDB季poster图片
sea_seq = str(meta.begin_season).rjust(2, '0')
# 查询季剧详情
seasoninfo = self.tmdb_info(tmdbid=mediainfo.tmdb_id, mtype=MediaType.TV,
season=meta.begin_season)
if not seasoninfo:
logger.warn(f"无法获取 {mediainfo.title_year}{meta.begin_season}季 的媒体信息!")
return
if seasoninfo.get("poster_path"):
# 下载图片
ext = Path(seasoninfo.get('poster_path')).suffix
url = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{seasoninfo.get('poster_path')}"
image_path = filepath.parent.with_name(f"season{sea_seq}-poster{ext}")
__save_image(url, image_path)
# 上传图片文件到当前目录
logger.info(f"上传图片文件:{image_path.name} ...")
__upload_file(storage, fileitem.fileid, image_path)
logger.info(f"{image_path.name} 上传成功")
# 季的其它图片
for attr_name, attr_value in vars(mediainfo).items():
if attr_value \
and attr_name.startswith("season") \
and not attr_name.endswith("poster_path") \
and attr_value \
and isinstance(attr_value, str) \
and attr_value.startswith("http"):
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
image_path = filepath.parent.with_name(image_name)
__save_image(attr_value, image_path)
# 上传图片文件到当前目录
logger.info(f"上传图片文件:{image_path.name} ...")
__upload_file(storage, fileitem.fileid, image_path)
logger.info(f"{image_path.name} 上传成功")
if settings.SCRAP_SOURCE == "themoviedb":
sea_seq = str(meta.begin_season).rjust(2, '0')
# 查询季剧详情
seasoninfo = self.tmdb_info(tmdbid=mediainfo.tmdb_id, mtype=MediaType.TV,
season=meta.begin_season)
if not seasoninfo:
logger.warn(f"无法获取 {mediainfo.title_year}{meta.begin_season}季 的媒体信息!")
return
if seasoninfo.get("poster_path"):
# 下载图片
content = __save_image(f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original"
f"{seasoninfo.get('poster_path')}")
image_path = filepath.with_name(f"season{sea_seq}"
f"-poster{Path(seasoninfo.get('poster_path')).suffix}")
# 保存图片文件到当前目录
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=image_path, _content=content)
# 季的其它图片
for attr_name, attr_value in vars(mediainfo).items():
if attr_value \
and attr_name.startswith("season") \
and not attr_name.endswith("poster_path") \
and attr_value \
and isinstance(attr_value, str) \
and attr_value.startswith("http"):
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
image_path = filepath.parent.with_name(image_name)
content = __save_image(attr_value)
# 保存图片文件到当前目录
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=image_path, _content=content)
else:
# 当前为根目录,处理目录内的文件
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
_drive_id=fileitem.drive_id, _path=fileitem.path)
for file in files:
self.scrape_metadata_online(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
self.manual_scrape(storage=storage, fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False)
# 生成根目录的nfo和图片
if init_folder:
tv_nfo = self.meta_nfo(meta=meta, mediainfo=mediainfo)
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if not tv_nfo:
logger.warn(f"无法生成电视剧nfo文件{meta.name}")
return
# 写入nfo到根目录
nfo_path = settings.TEMP_PATH / "tvshow.nfo"
nfo_path.write_bytes(tv_nfo)
# 上传NFO文件
logger.info(f"上传NFO文件{nfo_path.name} ...")
__upload_file(storage, fileitem.fileid, nfo_path)
logger.info(f"{nfo_path.name} 上传成功")
nfo_path = filepath / "tvshow.nfo"
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=nfo_path, _content=tv_nfo)
# 生成根目录图片
for attr_name, attr_value in vars(mediainfo).items():
if attr_name \
@ -526,10 +536,9 @@ class MediaChain(ChainBase, metaclass=Singleton):
and attr_value.startswith("http"):
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
image_path = filepath.parent.with_name(image_name)
__save_image(attr_value, image_path)
# 上传图片文件到当前目录
logger.info(f"上传图片文件:{image_path.name} ...")
__upload_file(storage, fileitem.fileid, image_path)
logger.info(f"{image_path.name} 上传成功")
content = __save_image(attr_value)
# 保存图片文件到当前目录
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
_path=image_path, _content=content)
logger.info(f"{filepath.name} 刮削完成")