fix RSS模式指定订阅站点时不刷新订阅的问题

This commit is contained in:
jxxghp 2023-09-18 17:05:08 +08:00
parent f104c903ec
commit 03a07ac7bf
3 changed files with 11 additions and 10 deletions

View File

@ -234,14 +234,14 @@ def read_rss_sites(db: Session = Depends(get_db)) -> List[dict]:
获取站点列表
"""
# 选中的rss站点
rss_sites = SystemConfigOper().get(SystemConfigKey.RssSites)
selected_sites = SystemConfigOper().get(SystemConfigKey.RssSites) or []
# 所有站点
all_site = Site.list_order_by_pri(db)
if not rss_sites or not all_site:
if not selected_sites or not all_site:
return []
# 选中的rss站点
rss_sites = [site for site in all_site if site and site.id in rss_sites]
rss_sites = [site for site in all_site if site and site.id in selected_sites]
return rss_sites

View File

@ -247,14 +247,14 @@ class SearchChain(ChainBase):
"""
# 未开启的站点不搜索
indexer_sites = []
# 配置的索引站点
if sites:
config_indexers = [str(sid) for sid in sites]
else:
config_indexers = [str(sid) for sid in self.systemconfig.get(SystemConfigKey.IndexerSites) or []]
if not sites:
sites = self.systemconfig.get(SystemConfigKey.IndexerSites) or []
for indexer in self.siteshelper.get_indexers():
# 检查站点索引开关
if not config_indexers or str(indexer.get("id")) in config_indexers:
if not sites or indexer.get("id") in sites:
# 站点流控
state, msg = self.siteshelper.check(indexer.get("domain"))
if state:
@ -264,6 +264,7 @@ class SearchChain(ChainBase):
if not indexer_sites:
logger.warn('未开启任何有效站点,无法搜索资源')
return []
# 开始进度
self.progress.start(ProgressKey.Search)
# 开始计时

View File

@ -129,7 +129,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
# 刷新站点
if not sites:
sites = [str(sid) for sid in (self.systemconfig.get(SystemConfigKey.RssSites) or [])]
sites = self.systemconfig.get(SystemConfigKey.RssSites) or []
# 读取缓存
torrents_cache = self.get_torrents()
@ -139,7 +139,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
# 遍历站点缓存资源
for indexer in indexers:
# 未开启的站点不刷新
if sites and str(indexer.get("id")) not in sites:
if sites and indexer.get("id") not in sites:
continue
domain = StringUtils.get_url_domain(indexer.get("domain"))
if stype == "spider":