feat torrents全局缓存共享

This commit is contained in:
jxxghp
2023-09-09 17:42:31 +08:00
parent 27238ac467
commit 5bcbacf3a5
7 changed files with 28 additions and 42 deletions

View File

@ -197,7 +197,7 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("search_medias", meta=meta)
def search_torrents(self, site: CommentedMap,
mediainfo: Optional[MediaInfo] = None,
mediainfo: MediaInfo,
keyword: str = None,
page: int = 0,
area: str = "title") -> List[TorrentInfo]:

View File

@ -76,22 +76,6 @@ class SearchChain(ChainBase):
print(str(e))
return []
def browse(self, domain: str, keyword: str = None) -> List[TorrentInfo]:
"""
浏览站点首页内容
:param domain: 站点域名
:param keyword: 关键词,有值时为搜索
"""
if not keyword:
logger.info(f'开始浏览站点首页内容,站点:{domain} ...')
else:
logger.info(f'开始搜索资源,关键词:{keyword},站点:{domain} ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
return self.search_torrents(site=site, keyword=keyword)
def process(self, mediainfo: MediaInfo,
keyword: str = None,
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,

View File

@ -375,14 +375,14 @@ class SubscribeChain(ChainBase):
def refresh(self):
"""
刷新订阅
订阅刷新
"""
# 查询所有订阅
subscribes = self.subscribeoper.list('R')
if not subscribes:
# 没有订阅不运行
return
# 刷新站点资源,从缓存中匹配订阅
# 触发刷新站点资源,从缓存中匹配订阅
self.match(
self.torrentschain.refresh()
)

View File

@ -1,6 +1,6 @@
from datetime import datetime
from typing import Dict, List, Union
from cachetools import cached, TTLCache
from requests import Session
from app.chain import ChainBase
@ -12,17 +12,16 @@ from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import Notification
from app.schemas.types import SystemConfigKey, MessageChannel
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
from app.utils.timer import TimerUtils
class TorrentsChain(ChainBase):
class TorrentsChain(ChainBase, metaclass=Singleton):
"""
种子刷新处理链
站点首页种子处理链,服务于订阅、刷流等
"""
_cache_file = "__torrents_cache__"
_last_refresh_time = None
def __init__(self, db: Session = None):
super().__init__(db)
@ -46,17 +45,23 @@ class TorrentsChain(ChainBase):
# 读取缓存
return self.load_cache(self._cache_file) or {}
@cached(cache=TTLCache(maxsize=128, ttl=600))
def browse(self, domain: str) -> List[TorrentInfo]:
"""
浏览站点首页内容返回种子清单TTL缓存10分钟
:param domain: 站点域名
"""
logger.info(f'开始获取站点 {domain} 最新种子 ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
return self.refresh_torrents(site=site)
def refresh(self) -> Dict[str, List[Context]]:
"""
刷新站点最新资源
刷新站点最新资源,识别并缓存起来
"""
# 控制刷新频率不能小于10分钟
if self._last_refresh_time and TimerUtils.diff_minutes(self._last_refresh_time) < 10:
logger.warn(f'种子刷新频率过快,跳过本次刷新')
return self.get_torrents()
# 记录刷新时间
self._last_refresh_time = datetime.now()
# 读取缓存
torrents_cache = self.get_torrents()
@ -70,9 +75,8 @@ class TorrentsChain(ChainBase):
# 未开启的站点不搜索
if config_indexers and str(indexer.get("id")) not in config_indexers:
continue
logger.info(f'开始刷新 {indexer.get("name")} 最新种子 ...')
domain = StringUtils.get_url_domain(indexer.get("domain"))
torrents: List[TorrentInfo] = self.refresh_torrents(site=indexer)
torrents: List[TorrentInfo] = self.browse(domain=domain)
# 按pubdate降序排列
torrents.sort(key=lambda x: x.pubdate or '', reverse=True)
# 取前N条