id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,288,900 | batch_spider.py | demigody_nas-tools/third_party/feapder/feapder/core/spiders/batch_spider.py | # -*- coding: utf-8 -*-
"""
Created on 2020/4/22 12:06 AM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import datetime
import os
import time
import warnings
from collections.abc import Iterable
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.core.base_parser import BatchParser
from feapder.core.scheduler import Scheduler
from feapder.db.mysqldb import MysqlDB
from feapder.db.redisdb import RedisDB
from feapder.network.item import Item
from feapder.network.item import UpdateItem
from feapder.network.request import Request
from feapder.utils.log import log
from feapder.utils.perfect_dict import PerfectDict
from feapder.utils.redis_lock import RedisLock
CONSOLE_PIPELINE_PATH = "feapder.pipelines.console_pipeline.ConsolePipeline"
class BatchSpider(BatchParser, Scheduler):
def __init__(
self,
task_table,
batch_record_table,
batch_name,
batch_interval,
task_keys,
task_state="state",
min_task_count=10000,
check_task_interval=5,
task_limit=10000,
related_redis_key=None,
related_batch_record=None,
task_condition="",
task_order_by="",
redis_key=None,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_next_batch=True,
**kwargs,
):
"""
@summary: 批次爬虫
必要条件
1、需有任务表
任务表中必须有id 及 任务状态字段 如 state。如指定parser_name字段,则任务会自动下发到对应的parser下, 否则会下发到所有的parser下。其他字段可根据爬虫需要的参数自行扩充
参考建表语句如下:
CREATE TABLE `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`param` varchar(1000) DEFAULT NULL COMMENT '爬虫需要的抓取数据需要的参数',
`state` int(11) DEFAULT NULL COMMENT '任务状态',
`parser_name` varchar(255) DEFAULT NULL COMMENT '任务解析器的脚本类名',
PRIMARY KEY (`id`),
UNIQUE KEY `nui` (`param`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
2、需有批次记录表 不存在自动创建
---------
@param task_table: mysql中的任务表
@param batch_record_table: mysql 中的批次记录表
@param batch_name: 批次采集程序名称
@param batch_interval: 批次间隔 天为单位。 如想一小时一批次,可写成1/24
@param task_keys: 需要获取的任务字段 列表 [] 如需指定解析的parser,则需将parser_name字段取出来。
@param task_state: mysql中任务表的任务状态字段
@param min_task_count: redis 中最少任务数, 少于这个数量会从mysql的任务表取任务
@param check_task_interval: 检查是否还有任务的时间间隔;
@param task_limit: 从数据库中取任务的数量
@param redis_key: 任务等数据存放在redis中的key前缀
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻,默认否
@param auto_start_next_batch: 本批次结束后,且下一批次时间已到达时,是否自动启动下一批次,默认是
@param related_redis_key: 有关联的其他爬虫任务表(redis)注意:要避免环路 如 A -> B & B -> A 。
@param related_batch_record: 有关联的其他爬虫批次表(mysql)注意:要避免环路 如 A -> B & B -> A 。
related_redis_key 与 related_batch_record 选其一配置即可;用于相关联的爬虫没结束时,本爬虫也不结束
若相关连的爬虫为批次爬虫,推荐以related_batch_record配置,
若相关连的爬虫为普通爬虫,无批次表,可以以related_redis_key配置
@param task_condition: 任务条件 用于从一个大任务表中挑选出数据自己爬虫的任务,即where后的条件语句
@param task_order_by: 取任务时的排序条件 如 id desc
---------
@result:
"""
Scheduler.__init__(
self,
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
keep_alive=keep_alive,
auto_start_requests=False,
batch_interval=batch_interval,
task_table=task_table,
**kwargs,
)
self._redisdb = RedisDB()
self._mysqldb = MysqlDB()
self._task_table = task_table # mysql中的任务表
self._batch_record_table = batch_record_table # mysql 中的批次记录表
self._batch_name = batch_name # 批次采集程序名称
self._task_keys = task_keys # 需要获取的任务字段
self._task_state = task_state # mysql中任务表的state字段名
self._min_task_count = min_task_count # redis 中最少任务数
self._check_task_interval = check_task_interval
self._task_limit = task_limit # mysql中一次取的任务数量
self._related_task_tables = [
setting.TAB_REQUESTS.format(redis_key=redis_key)
] # 自己的task表也需要检查是否有任务
if related_redis_key:
self._related_task_tables.append(
setting.TAB_REQUESTS.format(redis_key=related_redis_key)
)
self._related_batch_record = related_batch_record
self._task_condition = task_condition
self._task_condition_prefix_and = task_condition and " and {}".format(
task_condition
)
self._task_condition_prefix_where = task_condition and " where {}".format(
task_condition
)
self._task_order_by = task_order_by and " order by {}".format(task_order_by)
self._auto_start_next_batch = auto_start_next_batch
self._batch_date_cache = None
if self._batch_interval >= 1:
self._date_format = "%Y-%m-%d"
elif self._batch_interval < 1 and self._batch_interval >= 1 / 24:
self._date_format = "%Y-%m-%d %H"
else:
self._date_format = "%Y-%m-%d %H:%M"
self._is_more_parsers = True # 多模版类爬虫
# 初始化每个配置的属性
self._spider_last_done_time = None # 爬虫最近已做任务数量时间
self._spider_last_done_count = None # 爬虫最近已做任务数量
self._spider_deal_speed_cached = None
self._batch_timeout = False # 批次是否超时或将要超时
# 重置任务
self.reset_task()
def init_batch_property(self):
"""
每个批次开始时需要重置的属性
@return:
"""
self._spider_deal_speed_cached = None
self._spider_last_done_time = None
self._spider_last_done_count = None # 爬虫刚开始启动时已做任务数量
self._batch_timeout = False
def add_parser(self, parser, **kwargs):
parser = parser(
self._task_table,
self._batch_record_table,
self._task_state,
self._date_format,
self._mysqldb,
**kwargs,
) # parser 实例化
self._parsers.append(parser)
def start_monitor_task(self):
"""
@summary: 监控任务状态
---------
---------
@result:
"""
if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版
self._is_more_parsers = False
self._parsers.append(self)
elif len(self._parsers) <= 1:
self._is_more_parsers = False
self.create_batch_record_table()
# 添加任务
for parser in self._parsers:
parser.add_task()
is_first_check = True
while True:
try:
if self.check_batch(is_first_check): # 该批次已经做完
if self._keep_alive:
is_first_check = True
log.info("爬虫所有任务已做完,不自动结束,等待新任务...")
time.sleep(self._check_task_interval)
continue
else:
break
is_first_check = False
# 检查redis中是否有任务 任务小于_min_task_count 则从mysql中取
tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key)
todo_task_count = self._redisdb.zget_count(tab_requests)
tasks = []
if todo_task_count < self._min_task_count: # 从mysql中取任务
# 更新batch表的任务状态数量
self.update_task_done_count()
log.info("redis 中剩余任务%s 数量过小 从mysql中取任务追加" % todo_task_count)
tasks = self.get_todo_task_from_mysql()
if not tasks: # 状态为0的任务已经做完,需要检查状态为2的任务是否丢失
if (
todo_task_count == 0
): # redis 中无待做任务,此时mysql中状态为2的任务为丢失任务。需重新做
lose_task_count = self.get_lose_task_count()
if not lose_task_count:
time.sleep(self._check_task_interval)
continue
elif (
lose_task_count > self._task_limit * 5
): # 丢失任务太多,直接重置,否则每次等redis任务消耗完再取下一批丢失任务,速度过慢
log.info("正在重置丢失任务为待做 共 {} 条".format(lose_task_count))
# 重置正在做的任务为待做
if self.reset_lose_task_from_mysql():
log.info("重置丢失任务成功")
else:
log.info("重置丢失任务失败")
continue
else: # 丢失任务少,直接取
log.info(
"正在取丢失任务 共 {} 条, 取 {} 条".format(
lose_task_count,
self._task_limit
if self._task_limit <= lose_task_count
else lose_task_count,
)
)
tasks = self.get_doing_task_from_mysql()
else:
log.info("mysql 中取到待做任务 %s 条" % len(tasks))
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
if not tasks:
if todo_task_count >= self._min_task_count:
# log.info('任务正在进行 redis中剩余任务 %s' % todo_task_count)
pass
else:
log.info("mysql 中无待做任务 redis中剩余任务 %s" % todo_task_count)
else:
# make start requests
self.distribute_task(tasks)
log.info("添加任务到redis成功")
except Exception as e:
log.exception(e)
time.sleep(self._check_task_interval)
def create_batch_record_table(self):
sql = (
"select table_name from information_schema.tables where table_name like '%s'"
% self._batch_record_table
)
tables_name = self._mysqldb.find(sql)
if not tables_name:
sql = """
CREATE TABLE `{table_name}` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT,
`batch_date` {batch_date} DEFAULT NULL COMMENT '批次时间',
`total_count` int(11) DEFAULT NULL COMMENT '任务总数',
`done_count` int(11) DEFAULT NULL COMMENT '完成数 (1,-1)',
`fail_count` int(11) DEFAULT NULL COMMENT '失败任务数 (-1)',
`interval` float(11) DEFAULT NULL COMMENT '批次间隔',
`interval_unit` varchar(20) DEFAULT NULL COMMENT '批次间隔单位 day, hour',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '批次开始时间',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '本条记录更新时间',
`is_done` int(11) DEFAULT '0' COMMENT '批次是否完成 0 未完成 1 完成',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
""".format(
table_name=self._batch_record_table,
batch_date="datetime",
)
self._mysqldb.execute(sql)
def distribute_task(self, tasks):
"""
@summary: 分发任务
---------
@param tasks:
---------
@result:
"""
if self._is_more_parsers: # 为多模版类爬虫,需要下发指定的parser
for task in tasks:
for parser in self._parsers: # 寻找task对应的parser
if parser.name in task:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(requests)
)
)
break
else: # task没对应的parser 则将task下发到所有的parser
for task in tasks:
for parser in self._parsers:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
self._request_buffer.flush()
self._item_buffer.flush()
def __get_task_state_count(self):
sql = "select {state}, count(1) from {task_table}{task_condition} group by {state}".format(
state=self._task_state,
task_table=self._task_table,
task_condition=self._task_condition_prefix_where,
)
task_state_count = self._mysqldb.find(sql)
task_state = {
"total_count": sum(count for state, count in task_state_count),
"done_count": sum(
count for state, count in task_state_count if state in (1, -1)
),
"failed_count": sum(
count for state, count in task_state_count if state == -1
),
}
return task_state
def update_task_done_count(self):
"""
@summary: 更新批次表中的任务状态
---------
---------
@result:
"""
task_count = self.__get_task_state_count()
# log.info('《%s》 批次进度 %s/%s' % (self._batch_name, done_task_count, total_task_count))
# 更新批次表
sql = "update {} set done_count = {}, total_count = {}, fail_count = {}, update_time = CURRENT_TIME, is_done=0, `interval` = {}, interval_unit = '{}' where batch_date = '{}'".format(
self._batch_record_table,
task_count.get("done_count"),
task_count.get("total_count"),
task_count.get("failed_count"),
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
self.batch_date,
)
self._mysqldb.update(sql)
def update_is_done(self):
sql = "update {} set is_done = 1, update_time = CURRENT_TIME where batch_date = '{}' and is_done = 0".format(
self._batch_record_table, self.batch_date
)
self._mysqldb.update(sql)
def get_todo_task_from_mysql(self):
"""
@summary: 取待做的任务
---------
---------
@result:
"""
# TODO 分批取数据 每批最大取 1000000个,防止内存占用过大
# 查询任务
task_keys = ", ".join([f"`{key}`" for key in self._task_keys])
sql = "select %s from %s where %s = 0%s%s limit %s" % (
task_keys,
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
if tasks:
# 更新任务状态
for i in range(0, len(tasks), 10000): # 10000 一批量更新
task_ids = str(
tuple([task[0] for task in tasks[i : i + 10000]])
).replace(",)", ")")
sql = "update %s set %s = 2 where id in %s" % (
self._task_table,
self._task_state,
task_ids,
)
self._mysqldb.update(sql)
return tasks
def get_doing_task_from_mysql(self):
"""
@summary: 取正在做的任务
---------
---------
@result:
"""
# 查询任务
task_keys = ", ".join([f"`{key}`" for key in self._task_keys])
sql = "select %s from %s where %s = 2%s%s limit %s" % (
task_keys,
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
return tasks
def get_lose_task_count(self):
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('2018-08-19', 49686, 0),)
batch_date, total_count, done_count = batch_info[0]
return total_count - done_count
def reset_lose_task_from_mysql(self):
"""
@summary: 重置丢失任务为待做
---------
---------
@result:
"""
sql = "update {table} set {state} = 0 where {state} = 2{task_condition}".format(
table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
def get_deal_speed(self, total_count, done_count, last_batch_date):
"""
获取处理速度
@param total_count: 总数量
@param done_count: 做完数量
@param last_batch_date: 批次时间 datetime
@return:
deal_speed (条/小时), need_time (秒), overflow_time(秒) ( overflow_time < 0 时表示提前多少秒完成 )
或
None
"""
now_date = datetime.datetime.now()
if self._spider_last_done_count is None:
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
elif done_count > self._spider_last_done_count:
time_interval = (now_date - self._spider_last_done_time).total_seconds()
deal_speed = (
done_count - self._spider_last_done_count
) / time_interval # 条/秒
need_time = (total_count - done_count) / deal_speed # 单位秒
overflow_time = (
(now_date - last_batch_date).total_seconds()
+ need_time
- datetime.timedelta(days=self._batch_interval).total_seconds()
) # 溢出时间 秒
calculate_speed_time = now_date.strftime("%Y-%m-%d %H:%M:%S") # 统计速度时间
deal_speed = int(deal_speed * 3600) # 条/小时
# 更新最近已做任务数及时间
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
self._spider_deal_speed_cached = (
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
)
return self._spider_deal_speed_cached
def init_task(self):
"""
@summary: 初始化任务表中的任务, 新一个批次开始时调用。 可能会重写
---------
---------
@result:
"""
sql = "update {task_table} set {state} = 0 where {state} != -1{task_condition}".format(
task_table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
def check_batch(self, is_first_check=False):
"""
@summary: 检查批次是否完成
---------
@param: is_first_check 是否为首次检查,若首次检查,且检查结果为批次已完成,则不发送批次完成消息。因为之前发送过了
---------
@result: 完成返回True 否则False
"""
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('批次时间', 总量, 完成量, 批次是否完成),)
if batch_info:
batch_date, total_count, done_count, is_done = batch_info[0]
now_date = datetime.datetime.now()
last_batch_date = datetime.datetime.strptime(batch_date, self._date_format)
time_difference = now_date - last_batch_date
if total_count == done_count and time_difference < datetime.timedelta(
days=self._batch_interval
): # 若在本批次内,再次检查任务表是否有新增任务
# # 改成查询任务表 看是否真的没任务了,因为batch_record表里边的数量可能没来得及更新
task_count = self.__get_task_state_count()
total_count = task_count.get("total_count")
done_count = task_count.get("done_count")
if total_count == done_count:
if not is_done:
# 检查相关联的爬虫是否完成
related_spider_is_done = self.related_spider_is_done()
if related_spider_is_done is False:
msg = "《{}》本批次未完成, 正在等待依赖爬虫 {} 结束. 批次时间 {} 批次进度 {}/{}".format(
self._batch_name,
self._related_batch_record or self._related_task_tables,
batch_date,
done_count,
total_count,
)
log.info(msg)
# 检查是否超时 超时发出报警
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
self.send_msg(
msg,
level="error",
message_prefix="《{}》本批次未完成, 正在等待依赖爬虫 {} 结束".format(
self._batch_name,
self._related_batch_record
or self._related_task_tables,
),
)
self._batch_timeout = True
return False
else:
self.update_is_done()
msg = "《{}》本批次完成 批次时间 {} 共处理 {} 条任务".format(
self._batch_name, batch_date, done_count
)
log.info(msg)
if not is_first_check:
if self._batch_timeout: # 之前报警过已超时,现在已完成,发出恢复消息
self._batch_timeout = False
self.send_msg(msg, level="error")
else:
self.send_msg(msg)
# 判断下一批次是否到
if time_difference >= datetime.timedelta(days=self._batch_interval):
if not is_first_check and not self._auto_start_next_batch:
return True # 下一批次不开始。因为设置了不自动开始下一批次
msg = "《{}》下一批次开始".format(self._batch_name)
log.info(msg)
self.send_msg(msg)
# 初始化任务表状态
if self.init_task() != False: # 更新失败返回False 其他返回True/None
# 初始化属性
self.init_batch_property()
is_success = (
self.record_batch()
) # 有可能插入不成功,但是任务表已经重置了,不过由于当前时间为下一批次的时间,检查批次是否结束时不会检查任务表,所以下次执行时仍然会重置
if is_success:
# 看是否有等待任务的worker,若有则需要等会再下发任务,防止work批次时间没来得及更新
if self.have_alive_spider():
log.info(
f"插入新批次记录成功,检测到有爬虫进程在等待任务,本批任务1分钟后开始下发, 防止爬虫端缓存的批次时间没来得及更新"
)
tools.delay_time(60)
else:
log.info("插入新批次记录成功")
return False # 下一批次开始
else:
return True # 下一批次不开始。先不派发任务,因为批次表新批次插入失败了,需要插入成功后再派发任务
else:
log.info("《{}》下次批次时间未到".format(self._batch_name))
if not is_first_check:
self.send_msg("《{}》下次批次时间未到".format(self._batch_name))
return True
else:
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
time_out = time_difference - datetime.timedelta(
days=self._batch_interval
)
time_out_pretty = tools.format_seconds(time_out.total_seconds())
msg = "《{}》本批次已超时{} 批次时间 {}, 批次进度 {}/{}".format(
self._batch_name,
time_out_pretty,
batch_date,
done_count,
total_count,
)
if self._batch_interval >= 1:
msg += ", 期望时间{}天".format(self._batch_interval)
else:
msg += ", 期望时间{}小时".format(self._batch_interval * 24)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
(
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
) = result
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次预计总超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
log.info(msg)
self.send_msg(
msg,
level="error",
message_prefix="《{}》批次超时".format(self._batch_name),
)
self._batch_timeout = True
else: # 未超时
remaining_time = (
datetime.timedelta(days=self._batch_interval) - time_difference
)
remaining_time_pretty = tools.format_seconds(
remaining_time.total_seconds()
)
if self._batch_interval >= 1:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}天, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval,
remaining_time_pretty,
)
else:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}小时, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval * 24,
remaining_time_pretty,
)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
(
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
) = result
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次可能会超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
# 发送警报
self.send_msg(
msg,
level="error",
message_prefix="《{}》批次可能超时".format(self._batch_name),
)
self._batch_timeout = True
elif overflow_time < 0:
msg += ", 该批次预计提前 {} 完成".format(
tools.format_seconds(-overflow_time)
)
log.info(msg)
else:
# 插入batch_date
self.record_batch()
# 初始化任务表状态 可能有产生任务的代码
self.init_task()
return False
def related_spider_is_done(self):
"""
相关连的爬虫是否跑完
@return: True / False / None 表示无相关的爬虫 可由自身的total_count 和 done_count 来判断
"""
for related_redis_task_table in self._related_task_tables:
if self._redisdb.exists_key(related_redis_task_table):
return False
if self._related_batch_record:
sql = "select is_done from {} order by id desc limit 1".format(
self._related_batch_record
)
is_done = self._mysqldb.find(sql)
is_done = is_done[0][0] if is_done else None
if is_done is None:
log.warning("相关联的批次表不存在或无批次信息")
return True
if not is_done:
return False
return True
def record_batch(self):
"""
@summary: 记录批次信息(初始化)
---------
---------
@result:
"""
# 查询总任务数
sql = "select count(1) from %s%s" % (
self._task_table,
self._task_condition_prefix_where,
)
total_task_count = self._mysqldb.find(sql)[0][0]
batch_date = tools.get_current_date(self._date_format)
sql = "insert into %s (batch_date, done_count, total_count, `interval`, interval_unit, create_time) values ('%s', %s, %s, %s, '%s', CURRENT_TIME)" % (
self._batch_record_table,
batch_date,
0,
total_task_count,
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
)
affect_count = self._mysqldb.add(sql) # None / 0 / 1 (1 为成功)
if affect_count:
# 重置批次日期
self._batch_date_cache = batch_date
# 重新刷下self.batch_date 中的 os.environ.get('batch_date') 否则日期还停留在上一个批次
os.environ["batch_date"] = self._batch_date_cache
# 爬虫开始
self.spider_begin()
else:
log.error("插入新批次失败")
return affect_count
# -------- 批次结束逻辑 ------------
def task_is_done(self):
"""
@summary: 检查任务状态 是否做完 同时更新批次时间 (不能挂 挂了批次时间就不更新了)
---------
---------
@result: True / False (做完 / 未做完)
"""
is_done = False
# 查看批次记录表任务状态
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql)
if batch_info is None:
raise Exception("查询批次信息失败")
if batch_info:
self._batch_date_cache, total_count, done_count, is_done = batch_info[
0
] # 更新self._batch_date_cache, 防止新批次已经开始了,但self._batch_date_cache还是原来的批次时间
log.info(
"《%s》 批次时间%s 批次进度 %s/%s 完成状态 %d"
% (
self._batch_name,
self._batch_date_cache,
done_count,
total_count,
is_done,
)
)
os.environ["batch_date"] = self._batch_date_cache # 更新BatchParser里边的批次时间
if is_done: # 检查任务表中是否有没做的任务 若有则is_done 为 False
# 比较耗时 加锁防止多进程同时查询
with RedisLock(key=self._spider_name) as lock:
if lock.locked:
log.info("批次表标记已完成,正在检查任务表是否有未完成的任务")
sql = "select 1 from %s where (%s = 0 or %s=2)%s limit 1" % (
self._task_table,
self._task_state,
self._task_state,
self._task_condition_prefix_and,
)
tasks = self._mysqldb.find(sql) # [(1,)] / []
if tasks:
log.info("检测到任务表中有未完成任务,等待任务下发")
is_done = False
# 更新batch_record 表的is_done 状态,减少查询任务表的次数
sql = 'update {batch_record_table} set is_done = 0 where batch_date = "{batch_date}"'.format(
batch_record_table=self._batch_record_table,
batch_date=self._batch_date_cache,
)
self._mysqldb.update(sql)
else:
log.info("任务表中任务均已完成,爬虫结束")
else:
log.info("批次表标记已完成,其他爬虫进程正在检查任务表是否有未完成的任务,本进程跳过检查,继续等待")
is_done = False
return is_done
def run(self):
"""
@summary: 重写run方法 检查mysql中的任务是否做完, 做完停止
---------
---------
@result:
"""
try:
self.create_batch_record_table()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self._stop_spider or (
self.task_is_done() and self.all_thread_is_done()
): # redis全部的任务已经做完 并且mysql中的任务已经做完(检查各个线程all_thread_is_done,防止任务没做完,就更新任务状态,导致程序结束的情况)
if not self._is_notify_end:
self.spider_end()
self._is_notify_end = True
if not self._keep_alive:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
except Exception as e:
log.exception(e)
tools.delay_time(10) # 10秒钟检查一次爬虫状态
except Exception as e:
msg = "《%s》主线程异常 爬虫结束 exception: %s" % (self._batch_name, e)
log.error(msg)
self.send_msg(
msg, level="error", message_prefix="《%s》爬虫异常结束".format(self._batch_name)
)
os._exit(137) # 使退出码为35072 方便爬虫管理器重启
@classmethod
def to_DebugBatchSpider(cls, *args, **kwargs):
# DebugBatchSpider 继承 cls
DebugBatchSpider.__bases__ = (cls,)
DebugBatchSpider.__name__ = cls.__name__
return DebugBatchSpider(*args, **kwargs)
class DebugBatchSpider(BatchSpider):
"""
Debug批次爬虫
"""
__debug_custom_setting__ = dict(
COLLECTOR_TASK_COUNT=1,
# SPIDER
SPIDER_THREAD_COUNT=1,
SPIDER_SLEEP_TIME=0,
SPIDER_MAX_RETRY_TIMES=10,
REQUEST_LOST_TIMEOUT=600, # 10分钟
PROXY_ENABLE=False,
RETRY_FAILED_REQUESTS=False,
# 保存失败的request
SAVE_FAILED_REQUEST=False,
# 过滤
ITEM_FILTER_ENABLE=False,
REQUEST_FILTER_ENABLE=False,
OSS_UPLOAD_TABLES=(),
DELETE_KEYS=True,
)
def __init__(
self,
task_id=None,
task=None,
save_to_db=False,
update_task=False,
*args,
**kwargs,
):
"""
@param task_id: 任务id
@param task: 任务 task 与 task_id 二者选一即可
@param save_to_db: 数据是否入库 默认否
@param update_task: 是否更新任务 默认否
@param args:
@param kwargs:
"""
warnings.warn(
"您正处于debug模式下,该模式下不会更新任务状态及数据入库,仅用于调试。正式发布前请更改为正常模式", category=Warning
)
if not task and not task_id:
raise Exception("task_id 与 task 不能同时为null")
kwargs["redis_key"] = kwargs["redis_key"] + "_debug"
if not save_to_db:
self.__class__.__debug_custom_setting__["ITEM_PIPELINES"] = [
CONSOLE_PIPELINE_PATH
]
self.__class__.__custom_setting__.update(
self.__class__.__debug_custom_setting__
)
super(DebugBatchSpider, self).__init__(*args, **kwargs)
self._task_id = task_id
self._task = task
self._update_task = update_task
def start_monitor_task(self):
"""
@summary: 监控任务状态
---------
---------
@result:
"""
if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版
self._is_more_parsers = False
self._parsers.append(self)
elif len(self._parsers) <= 1:
self._is_more_parsers = False
if self._task:
self.distribute_task([self._task])
else:
tasks = self.get_todo_task_from_mysql()
if not tasks:
raise Exception("未获取到任务 请检查 task_id: {} 是否存在".format(self._task_id))
self.distribute_task(tasks)
os.environ.setdefault("batch_date", "1970-00-00")
log.debug("下发任务完毕")
def get_todo_task_from_mysql(self):
"""
@summary: 取待做的任务
---------
---------
@result:
"""
# 查询任务
task_keys = ", ".join([f"`{key}`" for key in self._task_keys])
sql = "select %s from %s where id=%s" % (
task_keys,
self._task_table,
self._task_id,
)
tasks = self._mysqldb.find(sql)
return tasks
def save_cached(self, request, response, table):
pass
def update_task_state(self, task_id, state=1, *args, **kwargs):
"""
@summary: 更新任务表中任务状态,做完每个任务时代码逻辑中要主动调用。可能会重写
调用方法为 yield lambda : self.update_task_state(task_id, state)
---------
@param task_id:
@param state:
---------
@result:
"""
if self._update_task:
kwargs["id"] = task_id
kwargs[self._task_state] = state
sql = tools.make_update_sql(
self._task_table,
kwargs,
condition="id = {task_id}".format(task_id=task_id),
)
if self._mysqldb.update(sql):
log.debug("置任务%s状态成功" % task_id)
else:
log.error("置任务%s状态失败 sql=%s" % (task_id, sql))
def update_task_batch(self, task_id, state=1, *args, **kwargs):
"""
批量更新任务 多处调用,更新的字段必须一致
注意:需要 写成 yield update_task_batch(...) 否则不会更新
@param task_id:
@param state:
@param kwargs:
@return:
"""
if self._update_task:
kwargs["id"] = task_id
kwargs[self._task_state] = state
update_item = UpdateItem(**kwargs)
update_item.table_name = self._task_table
update_item.name_underline = self._task_table + "_item"
return update_item
def run(self):
self.start_monitor_task()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self.all_thread_is_done():
self._stop_all_thread()
break
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
self.delete_tables([self._redis_key + "*"])
| 49,433 | Python | .py | 1,040 | 26.165385 | 190 | 0.474069 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,901 | spider.py | demigody_nas-tools/third_party/feapder/feapder/core/spiders/spider.py | # -*- coding: utf-8 -*-
"""
Created on 2020/4/22 12:05 AM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import time
import warnings
from collections.abc import Iterable
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.core.base_parser import BaseParser
from feapder.core.scheduler import Scheduler
from feapder.db.redisdb import RedisDB
from feapder.network.item import Item
from feapder.network.request import Request
from feapder.utils.log import log
CONSOLE_PIPELINE_PATH = "feapder.pipelines.console_pipeline.ConsolePipeline"
class Spider(
BaseParser, Scheduler
): # threading 中有name函数, 必须先继承BaseParser 否则其内部的name会被Schedule的基类threading.Thread的name覆盖
"""
@summary: 为了简化搭建爬虫
---------
"""
def __init__(
self,
redis_key=None,
min_task_count=1,
check_task_interval=5,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_requests=None,
batch_interval=0,
wait_lock=True,
**kwargs
):
"""
@summary: 爬虫
---------
@param redis_key: 任务等数据存放在redis中的key前缀
@param min_task_count: 任务队列中最少任务数, 少于这个数量才会添加任务,默认1。start_monitor_task 模式下生效
@param check_task_interval: 检查是否还有任务的时间间隔;默认5秒
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻
@param auto_start_requests: 爬虫是否自动添加任务
@param batch_interval: 抓取时间间隔 默认为0 天为单位 多次启动时,只有当前时间与第一次抓取结束的时间间隔大于指定的时间间隔时,爬虫才启动
@param wait_lock: 下发任务时否等待锁,若不等待锁,可能会存在多进程同时在下发一样的任务,因此分布式环境下请将该值设置True
---------
@result:
"""
super(Spider, self).__init__(
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
keep_alive=keep_alive,
auto_start_requests=auto_start_requests,
batch_interval=batch_interval,
wait_lock=wait_lock,
**kwargs
)
self._min_task_count = min_task_count
self._check_task_interval = check_task_interval
self._is_distributed_task = False
self._is_show_not_task = False
def start_monitor_task(self, *args, **kws):
if not self.is_reach_next_spider_time():
return
self._auto_start_requests = False
redisdb = RedisDB()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
while True:
try:
# 检查redis中是否有任务
tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key)
todo_task_count = redisdb.zget_count(tab_requests)
if todo_task_count < self._min_task_count: # 添加任务
# make start requests
self.distribute_task(*args, **kws)
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
except Exception as e:
log.exception(e)
if not self._keep_alive:
break
time.sleep(self._check_task_interval)
def distribute_task(self, *args, **kws):
"""
@summary: 分发任务 并将返回的request入库
---------
@param tasks:
---------
@result:
"""
self._is_distributed_task = False
for parser in self._parsers:
requests = parser.start_requests(*args, **kws)
if requests and not isinstance(requests, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
self._is_distributed_task = True
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(request)
)
)
self._request_buffer.flush()
self._item_buffer.flush()
if self._is_distributed_task: # 有任务时才提示启动爬虫
# begin
self.spider_begin()
# 重置已经提示无任务状态为False
self._is_show_not_task = False
elif not self._is_show_not_task: # 无任务,且没推送过无任务信息
# 发送无任务消息
msg = "《%s》start_requests无任务添加" % (self._spider_name)
log.info(msg)
# self.send_msg(msg)
self._is_show_not_task = True
def run(self):
if not self.is_reach_next_spider_time():
return
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self._stop_spider or self.all_thread_is_done():
if not self._is_notify_end:
self.spider_end() # 跑完一轮
self._is_notify_end = True
if not self._keep_alive:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
@classmethod
def to_DebugSpider(cls, *args, **kwargs):
# DebugSpider 继承 cls
DebugSpider.__bases__ = (cls,)
DebugSpider.__name__ = cls.__name__
return DebugSpider(*args, **kwargs)
class DebugSpider(Spider):
"""
Debug爬虫
"""
__debug_custom_setting__ = dict(
COLLECTOR_TASK_COUNT=1,
# SPIDER
SPIDER_THREAD_COUNT=1,
SPIDER_SLEEP_TIME=0,
SPIDER_MAX_RETRY_TIMES=10,
REQUEST_LOST_TIMEOUT=600, # 10分钟
PROXY_ENABLE=False,
RETRY_FAILED_REQUESTS=False,
# 保存失败的request
SAVE_FAILED_REQUEST=False,
# 过滤
ITEM_FILTER_ENABLE=False,
REQUEST_FILTER_ENABLE=False,
OSS_UPLOAD_TABLES=(),
DELETE_KEYS=True,
)
def __init__(
self, request=None, request_dict=None, save_to_db=False, *args, **kwargs
):
"""
@param request: request 类对象
@param request_dict: request 字典。 request 与 request_dict 二者选一即可
@param save_to_db: 数据是否入库 默认否
@param kwargs:
"""
warnings.warn(
"您正处于debug模式下,该模式下不会更新任务状态及数据入库,仅用于调试。正式发布前请更改为正常模式", category=Warning
)
if not request and not request_dict:
raise Exception("request 与 request_dict 不能同时为null")
kwargs["redis_key"] = kwargs["redis_key"] + "_debug"
if not save_to_db:
self.__class__.__debug_custom_setting__["ITEM_PIPELINES"] = [
CONSOLE_PIPELINE_PATH
]
self.__class__.__custom_setting__.update(
self.__class__.__debug_custom_setting__
)
super(DebugSpider, self).__init__(*args, **kwargs)
self._request = request or Request.from_dict(request_dict)
def save_cached(self, request, response, table):
pass
def __start_requests(self):
yield self._request
def distribute_task(self):
"""
@summary: 分发任务 并将返回的request入库
---------
---------
@result:
"""
self._is_distributed_task = False
for parser in self._parsers:
requests = parser.__start_requests()
if requests and not isinstance(requests, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
self._is_distributed_task = True
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
self._request_buffer.flush()
self._item_buffer.flush()
if self._is_distributed_task: # 有任务时才提示启动爬虫
# begin
self.spider_begin()
# 重置已经提示无任务状态为False
self._is_show_not_task = False
elif not self._is_show_not_task: # 无任务,且没推送过无任务信息
# 发送无任务消息
msg = "《%s》start_requests无任务添加" % (self._spider_name)
log.info(msg)
# self.send_msg(msg)
self._is_show_not_task = True
def _start(self):
# 启动parser 的 start_requests
self.spider_begin() # 不自动结束的爬虫此处只能执行一遍
for parser in self._parsers:
results = parser.__start_requests()
# 添加request到请求队列,由请求队列统一入库
if results and not isinstance(results, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for result in results or []:
if isinstance(result, Request):
result.parser_name = result.parser_name or parser.name
self._request_buffer.put_request(result)
result_type = 1
elif isinstance(result, Item):
self._item_buffer.put_item(result)
result_type = 2
elif callable(result): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(result)
else:
self._item_buffer.put_item(result)
self._request_buffer.flush()
self._item_buffer.flush()
# 启动collector
self._collector.start()
# 启动parser control
for i in range(self._thread_count):
parser_control = self._parser_control_obj(
self._collector,
self._redis_key,
self._request_buffer,
self._item_buffer,
)
for parser in self._parsers:
parser_control.add_parser(parser)
parser_control.start()
self._parser_controls.append(parser_control)
# 启动request_buffer
self._request_buffer.start()
# 启动item_buffer
self._item_buffer.start()
def run(self):
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self.all_thread_is_done():
self._stop_all_thread()
break
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
self.delete_tables([self._redis_key + "*"])
| 13,420 | Python | .py | 316 | 26.25 | 125 | 0.549328 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,902 | __init__.py | demigody_nas-tools/third_party/feapder/feapder/core/spiders/__init__.py | # -*- coding: utf-8 -*-
"""
Created on 2020/4/22 12:08 AM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
__all__ = ["AirSpider", "TaskSpider", "Spider", "BatchSpider"]
from feapder.core.spiders.air_spider import AirSpider
from feapder.core.spiders.spider import Spider
from feapder.core.spiders.task_spider import TaskSpider
from feapder.core.spiders.batch_spider import BatchSpider
| 417 | Python | .py | 14 | 28.642857 | 62 | 0.738155 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,903 | air_spider.py | demigody_nas-tools/third_party/feapder/feapder/core/spiders/air_spider.py | # -*- coding: utf-8 -*-
"""
Created on 2020/4/22 12:05 AM
---------
@summary: 基于内存队列的爬虫,不支持分布式
---------
@author: Boris
@email: [email protected]
"""
from threading import Thread
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.buffer.item_buffer import ItemBuffer
from feapder.buffer.request_buffer import AirSpiderRequestBuffer
from feapder.core.base_parser import BaseParser
from feapder.core.parser_control import AirSpiderParserControl
from feapder.db.memorydb import MemoryDB
from feapder.network.request import Request
from feapder.utils import metrics
from feapder.utils.log import log
class AirSpider(BaseParser, Thread):
__custom_setting__ = {}
def __init__(self, thread_count=None):
"""
基于内存队列的爬虫,不支持分布式
:param thread_count: 线程数
"""
super(AirSpider, self).__init__()
for key, value in self.__class__.__custom_setting__.items():
setattr(setting, key, value)
if thread_count:
setattr(setting, "SPIDER_THREAD_COUNT", thread_count)
self._thread_count = setting.SPIDER_THREAD_COUNT
self._memory_db = MemoryDB()
self._parser_controls = []
self._item_buffer = ItemBuffer(redis_key=self.name)
self._request_buffer = AirSpiderRequestBuffer(
db=self._memory_db, dedup_name=self.name
)
self._stop_spider = False
metrics.init(**setting.METRICS_OTHER_ARGS)
def distribute_task(self):
for request in self.start_requests():
if not isinstance(request, Request):
raise ValueError("仅支持 yield Request")
request.parser_name = request.parser_name or self.name
self._request_buffer.put_request(request, ignore_max_size=False)
def all_thread_is_done(self):
for i in range(3): # 降低偶然性, 因为各个环节不是并发的,很有可能当时状态为假,但检测下一条时该状态为真。一次检测很有可能遇到这种偶然性
# 检测 parser_control 状态
for parser_control in self._parser_controls:
if not parser_control.is_not_task():
return False
# 检测 任务队列 状态
if not self._memory_db.empty():
return False
# 检测 item_buffer 状态
if (
self._item_buffer.get_items_count() > 0
or self._item_buffer.is_adding_to_db()
):
return False
tools.delay_time(1)
return True
def run(self):
self.start_callback()
for i in range(self._thread_count):
parser_control = AirSpiderParserControl(
memory_db=self._memory_db,
request_buffer=self._request_buffer,
item_buffer=self._item_buffer,
)
parser_control.add_parser(self)
parser_control.start()
self._parser_controls.append(parser_control)
self._item_buffer.start()
self.distribute_task()
while True:
try:
if self._stop_spider or self.all_thread_is_done():
# 停止 parser_controls
for parser_control in self._parser_controls:
parser_control.stop()
# 关闭item_buffer
self._item_buffer.stop()
# 关闭webdirver
Request.render_downloader and Request.render_downloader.close_all()
if self._stop_spider:
log.info("爬虫被终止")
else:
log.info("无任务,爬虫结束")
break
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
self.end_callback()
# 为了线程可重复start
self._started.clear()
# 关闭打点
metrics.close()
def join(self, timeout=None):
"""
重写线程的join
"""
if not self._started.is_set():
return
super().join()
def stop_spider(self):
self._stop_spider = True
| 4,372 | Python | .py | 109 | 26.59633 | 88 | 0.580933 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,904 | cmdline.py | demigody_nas-tools/third_party/feapder/feapder/commands/cmdline.py | # -*- coding: utf-8 -*-
"""
Created on 2020/5/8 2:24 PM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import re
import sys
from os.path import dirname, join
import os
import requests
from feapder.commands import create_builder
from feapder.commands import retry
from feapder.commands import shell
from feapder.commands import zip
HELP = """
███████╗███████╗ █████╗ ██████╗ ██████╗ ███████╗██████╗
██╔════╝██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝██╔══██╗
█████╗ █████╗ ███████║██████╔╝██║ ██║█████╗ ██████╔╝
██╔══╝ ██╔══╝ ██╔══██║██╔═══╝ ██║ ██║██╔══╝ ██╔══██╗
██║ ███████╗██║ ██║██║ ██████╔╝███████╗██║ ██║
╚═╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝
Version: {version}
Document: https://feapder.com
Usage:
feapder <command> [options] [args]
Available commands:
"""
NEW_VERSION_TIP = """
──────────────────────────────────────────────────────
New version available \033[31m{version}\033[0m → \033[32m{new_version}\033[0m
Run \033[33mpip install --upgrade feapder\033[0m to update!
"""
with open(join(dirname(dirname(__file__)), "VERSION"), "rb") as f:
VERSION = f.read().decode("ascii").strip()
def _print_commands():
print(HELP.rstrip().format(version=VERSION))
cmds = {
"create": "create project、spider、item and so on",
"shell": "debug response",
"zip": "zip project",
"retry": "retry failed request or item",
}
for cmdname, cmdclass in sorted(cmds.items()):
print(" %-13s %s" % (cmdname, cmdclass))
print('\nUse "feapder <command> -h" to see more info about a command')
def check_new_version():
try:
url = "https://pypi.org/simple/feapder/"
resp = requests.get(url, timeout=3, verify=False)
html = resp.text
last_stable_version = re.findall(r"feapder-([\d.]*?).tar.gz", html)[-1]
now_version = VERSION
now_stable_version = re.sub("-beta.*", "", VERSION)
if now_stable_version < last_stable_version or (
now_stable_version == last_stable_version and "beta" in now_version
):
new_version = f"feapder=={last_stable_version}"
if new_version:
version = f"feapder=={VERSION.replace('-beta', 'b')}"
tip = NEW_VERSION_TIP.format(version=version, new_version=new_version)
# 修复window下print不能带颜色输出的问题
if os.name == "nt":
os.system("")
print(tip)
except Exception as e:
pass
def execute():
try:
args = sys.argv
if len(args) < 2:
_print_commands()
check_new_version()
return
command = args.pop(1)
if command == "create":
create_builder.main()
elif command == "shell":
shell.main()
elif command == "zip":
zip.main()
elif command == "retry":
retry.main()
else:
_print_commands()
except KeyboardInterrupt:
pass
check_new_version()
if __name__ == "__main__":
execute()
| 3,824 | Python | .py | 93 | 27.172043 | 86 | 0.517875 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,905 | zip.py | demigody_nas-tools/third_party/feapder/feapder/commands/zip.py | # -*- coding: utf-8 -*-
"""
Created on 2022/2/13 12:59 上午
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import argparse
import os
import re
import zipfile
def is_ignore_file(ignore_files: list, filename):
for ignore_file in ignore_files:
if re.search(ignore_file, filename):
return True
return False
def zip(dir_path, zip_name, ignore_dirs: list = None, ignore_files: list = None):
print(f"正在压缩 {dir_path} >> {zip_name}")
ignore_files.append(os.path.basename(zip_name))
with zipfile.ZipFile(zip_name, "w") as file:
dir_name = os.path.basename(dir_path)
parent_dir = os.path.dirname(dir_path)
if parent_dir:
os.chdir(parent_dir)
for path, dirs, filenames in os.walk(dir_name):
# 修改原dirs,方式遍历忽略文件夹里的文件
if ignore_dirs:
dirs[:] = [d for d in dirs if d not in ignore_dirs]
for filename in filenames:
if ignore_files and is_ignore_file(ignore_files, filename):
continue
filepath = os.path.join(path, filename)
print(f" adding {filepath}")
file.write(filepath)
print(f"压缩成功 {dir_path} >> {zip_name}")
def parse_args():
parser = argparse.ArgumentParser(
description="压缩文件夹, 默认排除以下文件夹及文件 .git,__pycache__,.idea,venv,.DS_Store",
usage="feapder zip dir_path [zip_name]",
)
parser.add_argument("dir_path", type=str, help="文件夹路径")
parser.add_argument("zip_name", type=str, nargs="?", help="压缩后的文件名,默认为文件夹名.zip")
parser.add_argument("-i", help="忽略文件,逗号分隔,支持正则", metavar="")
parser.add_argument("-I", help="忽略文件夹,逗号分隔,支持正则 ", metavar="")
parser.add_argument("-o", help="输出路径,默认为当前目录", metavar="")
args = parser.parse_args()
return args
def main():
ignore_dirs = [".git", "__pycache__", ".idea", "venv", "env"]
ignore_files = [".DS_Store"]
args = parse_args()
if args.i:
ignore_files.extend(args.i.split(","))
if args.I:
ignore_dirs.extend(args.I.split(","))
dir_path = args.dir_path
zip_name = args.zip_name or os.path.basename(dir_path) + ".zip"
if args.o:
zip_name = os.path.join(args.o, os.path.basename(zip_name))
zip(dir_path, zip_name, ignore_dirs=ignore_dirs, ignore_files=ignore_files)
| 2,572 | Python | .py | 62 | 31.467742 | 84 | 0.616528 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,906 | create_builder.py | demigody_nas-tools/third_party/feapder/feapder/commands/create_builder.py | # -*- coding: utf-8 -*-
"""
Created on 2021/2/8 11:21 上午
---------
@summary: 生成器
---------
@author: Boris
@email: [email protected]
"""
import argparse
from terminal_layout import Fore
from terminal_layout.extensions.choice import Choice, StringStyle
import feapder.setting as setting
from feapder.commands.create import *
def main():
spider = argparse.ArgumentParser(description="生成器")
spider.add_argument(
"-p", "--project", help="创建项目 如 feapder create -p <project_name>", metavar=""
)
spider.add_argument(
"-s",
"--spider",
help="创建爬虫 如 feapder create -s <spider_name>",
metavar="",
)
spider.add_argument(
"-i",
"--item",
help="创建item 如 feapder create -i <table_name> 支持模糊匹配 如 feapder create -i %%table_name%%",
metavar="",
)
spider.add_argument(
"-t", "--table", help="根据json创建表 如 feapder create -t <table_name>", metavar=""
)
spider.add_argument(
"-init", help="创建__init__.py 如 feapder create -init", action="store_true"
)
spider.add_argument("-j", "--json", help="创建json", action="store_true")
spider.add_argument("-sj", "--sort_json", help="创建有序json", action="store_true")
spider.add_argument("-c", "--cookies", help="创建cookie", action="store_true")
spider.add_argument("--params", help="解析地址中的参数", action="store_true")
spider.add_argument(
"--setting", help="创建全局配置文件" "feapder create --setting", action="store_true"
)
# 指定数据库
spider.add_argument("--host", type=str, help="mysql 连接地址", metavar="")
spider.add_argument("--port", type=str, help="mysql 端口", metavar="")
spider.add_argument("--username", type=str, help="mysql 用户名", metavar="")
spider.add_argument("--password", type=str, help="mysql 密码", metavar="")
spider.add_argument("--db", type=str, help="mysql 数据库名", metavar="")
args = spider.parse_args()
if args.host:
setting.MYSQL_IP = args.host
if args.port:
setting.MYSQL_PORT = int(args.port)
if args.username:
setting.MYSQL_USER_NAME = args.username
if args.password:
setting.MYSQL_USER_PASS = args.password
if args.db:
setting.MYSQL_DB = args.db
if args.item:
c = Choice(
"请选择Item类型",
["Item", "Item 支持字典赋值", "UpdateItem", "UpdateItem 支持字典赋值"],
icon_style=StringStyle(fore=Fore.green),
selected_style=StringStyle(fore=Fore.green),
)
choice = c.get_choice()
if choice:
index, value = choice
item_name = args.item
item_type = "Item" if index <= 1 else "UpdateItem"
support_dict = index in (1, 3)
CreateItem().create(item_name, item_type, support_dict)
elif args.spider:
c = Choice(
"请选择爬虫模板",
["AirSpider", "Spider", "TaskSpider", "BatchSpider"],
icon_style=StringStyle(fore=Fore.green),
selected_style=StringStyle(fore=Fore.green),
)
choice = c.get_choice()
if choice:
index, spider_type = choice
spider_name = args.spider
CreateSpider().create(spider_name, spider_type)
elif args.project:
CreateProject().create(args.project)
elif args.table:
CreateTable().create(args.table)
elif args.init:
CreateInit().create()
elif args.json:
CreateJson().create()
elif args.sort_json:
CreateJson().create(sort_keys=True)
elif args.cookies:
CreateCookies().create()
elif args.setting:
CreateSetting().create()
elif args.params:
CreateParams().create()
else:
spider.print_help()
if __name__ == "__main__":
main()
| 3,984 | Python | .py | 107 | 28.186916 | 97 | 0.606476 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,907 | retry.py | demigody_nas-tools/third_party/feapder/feapder/commands/retry.py | # -*- coding: utf-8 -*-
"""
Created on 2022/11/18 12:33 PM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import argparse
from feapder.core.handle_failed_items import HandleFailedItems
from feapder.core.handle_failed_requests import HandleFailedRequests
def retry_failed_requests(redis_key):
handle_failed_requests = HandleFailedRequests(redis_key)
handle_failed_requests.reput_failed_requests_to_requests()
def retry_failed_items(redis_key):
handle_failed_items = HandleFailedItems(redis_key)
handle_failed_items.reput_failed_items_to_db()
handle_failed_items.close()
def parse_args():
parser = argparse.ArgumentParser(
description="重试失败的请求或入库失败的item",
usage="usage: feapder retry [options] [args]",
)
parser.add_argument(
"-r",
"--request",
help="重试失败的request 如 feapder retry --request <redis_key>",
metavar="",
)
parser.add_argument(
"-i", "--item", help="重试失败的item 如 feapder retry --item <redis_key>", metavar=""
)
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.request:
retry_failed_requests(args.request)
if args.item:
retry_failed_items(args.item)
if __name__ == "__main__":
main()
| 1,354 | Python | .py | 43 | 25.813953 | 87 | 0.6744 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,908 | shell.py | demigody_nas-tools/third_party/feapder/feapder/commands/shell.py | # -*- coding: utf-8 -*-
"""
Created on 2020/5/9 12:37 AM
---------
@summary:
---------
@author: Boris
@email: [email protected]
"""
import argparse
import re
import shlex
import sys
import IPython
import pyperclip
from feapder import Request
from feapder.utils import tools
def parse_curl(curl_str):
parser = argparse.ArgumentParser(description="")
parser.add_argument("target_url", type=str, nargs="?")
parser.add_argument("-X", "--request", type=str, nargs=1, default="")
parser.add_argument("-H", "--header", nargs=1, action="append", default=[])
parser.add_argument("-d", "--data", nargs=1, action="append", default=[])
parser.add_argument("--data-ascii", nargs=1, action="append", default=[])
parser.add_argument("--data-binary", nargs=1, action="append", default=[])
parser.add_argument("--data-urlencode", nargs=1, action="append", default=[])
parser.add_argument("--data-raw", nargs=1, action="append", default=[])
parser.add_argument("-F", "--form", nargs=1, action="append", default=[])
parser.add_argument("--digest", action="store_true")
parser.add_argument("--ntlm", action="store_true")
parser.add_argument("--anyauth", action="store_true")
parser.add_argument("-e", "--referer", type=str)
parser.add_argument("-G", "--get", action="store_true", default=False)
parser.add_argument("-I", "--head", action="store_true")
parser.add_argument("-k", "--insecure", action="store_true")
parser.add_argument("-o", "--output", type=str)
parser.add_argument("-O", "--remote_name", action="store_true")
parser.add_argument("-r", "--range", type=str)
parser.add_argument("-u", "--user", type=str)
parser.add_argument("--url", type=str)
parser.add_argument("-A", "--user-agent", type=str)
parser.add_argument("--compressed", action="store_true", default=False)
curl_split = shlex.split(curl_str)
try:
args = parser.parse_known_args(curl_split[1:])[0]
except:
raise ValueError("Could not parse arguments.")
# 请求地址
url = args.target_url
# # 请求方法
# try:
# method = args.request.lower()
# except AttributeError:
# method = args.request[0].lower()
# 请求头
headers = {
h[0].split(":", 1)[0]: ("".join(h[0].split(":", 1)[1]).strip())
for h in args.header
}
if args.user_agent:
headers["User-Agent"] = args.user_agent
if args.referer:
headers["Referer"] = args.referer
if args.range:
headers["Range"] = args.range
# Cookie
cookie_str = headers.pop("Cookie", "") or headers.pop("cookie", "")
cookies = tools.get_cookies_from_str(cookie_str) if cookie_str else {}
# params
url, params = tools.parse_url_params(url)
# data
data = "".join(
[
"".join(d)
for d in args.data
+ args.data_ascii
+ args.data_binary
+ args.data_raw
+ args.form
]
)
if data:
data = re.sub(r"^\$", "", data)
# method
if args.head:
method = "head"
elif args.get:
method = "get"
params.update(data)
elif args.request:
method = (
args.request[0].lower()
if isinstance(args.request, list)
else args.request.lower()
)
elif data:
method = "post"
else:
method = "get"
params.update(data)
username = None
password = None
if args.user:
u = args.user
if ":" in u:
username, password = u.split(":")
else:
username = u
password = input(f"请输入用户{username}的密码")
auth = None
if args.digest:
auth = "digest"
elif args.ntlm:
auth = "ntlm"
elif username:
auth = "basic"
insecure = args.insecure
return dict(
url=url,
method=method,
cookies=cookies,
headers=headers,
params=params,
data=data,
insecure=insecure,
username=username,
password=password,
auth=auth,
)
def request(**kwargs):
kwargs.setdefault("proxies", None)
response = Request(**kwargs).get_response()
print(response)
IPython.embed(header="now you can use response")
def fetch_url(url):
request(url=url)
def fetch_curl():
input("请复制请求为cURL (bash),复制后按任意键读取剪切板内容\n")
curl = pyperclip.paste()
if curl:
kwargs = parse_curl(curl)
request(**kwargs)
def usage():
"""
下载调试器
usage: feapder shell [options] [args]
optional arguments:
-u, --url 抓取指定url
-c, --curl 抓取curl格式的请求
"""
print(usage.__doc__)
sys.exit()
def parse_args():
parser = argparse.ArgumentParser(
description="测试请求",
usage="usage: feapder shell [options] [args]",
)
parser.add_argument(
"-u",
"--url",
help="请求指定地址, 如 feapder shell --url http://www.spidertools.cn/",
metavar="",
)
parser.add_argument("-c", "--curl", help="执行curl,调试响应", action="store_true")
args = parser.parse_args()
return parser, args
def main():
parser, args = parse_args()
if args.url:
fetch_url(args.url)
elif args.curl:
fetch_curl()
else:
parser.print_help()
if __name__ == "__main__":
main()
| 5,514 | Python | .py | 176 | 24.261364 | 81 | 0.591191 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,909 | create_project.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_project.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 创建项目
---------
@author: Boris
@email: [email protected]
"""
import getpass
import os
import shutil
import feapder.utils.tools as tools
def deal_file_info(file):
file = file.replace("{DATE}", tools.get_current_date())
file = file.replace("{USER}", os.getenv("FEAPDER_USER") or getpass.getuser())
return file
class CreateProject:
def copy_callback(self, src, dst, *, follow_symlinks=True):
if src.endswith(".py"):
with open(src, "r", encoding="utf-8") as src_file, open(
dst, "w", encoding="utf8"
) as dst_file:
content = src_file.read()
content = deal_file_info(content)
dst_file.write(content)
else:
shutil.copy2(src, dst, follow_symlinks=follow_symlinks)
def create(self, project_name):
if os.path.exists(project_name):
print("%s 项目已经存在" % project_name)
else:
template_path = os.path.abspath(
os.path.join(__file__, "../../../templates/project_template")
)
shutil.copytree(
template_path, project_name, copy_function=self.copy_callback
)
print("\n%s 项目生成成功" % project_name)
| 1,360 | Python | .py | 39 | 26.153846 | 81 | 0.579937 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,910 | create_params.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_params.py | # -*- coding: utf-8 -*-
"""
Created on 2021/4/25 10:22 上午
---------
@summary: 将浏览器的cookie转为request的cookie
---------
@author: Boris
@email: [email protected]
"""
import sys
from feapder.utils.tools import dumps_json
class CreateParams:
def get_data(self):
"""
@summary: 从控制台读取多行
---------
---------
@result:
"""
print("请输入请求地址")
data = []
while True:
line = sys.stdin.readline().strip()
if not line:
break
data.append(line)
return "".join(data)
def get_params(self, url):
params_json = {}
params = url.split("?")[-1].split("&")
for param in params:
key_value = param.split("=", 1)
params_json[key_value[0]] = key_value[1]
return params_json
def create(self):
data = self.get_data()
params = self.get_params(data)
url = data.split("?")[0]
print(f'url = "{url}"')
print(f"params = {dumps_json(params)}")
| 1,106 | Python | .py | 40 | 19.125 | 52 | 0.517413 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,911 | create_init.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_init.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 创建__init__.py
---------
@author: Boris
@email: [email protected]
"""
from feapder.utils.tools import dumps_json
class CreateInit:
def create(self):
__all__ = []
import os
path = os.getcwd()
for file in os.listdir(path):
if file.endswith(".py") and not file.startswith("__init__"):
model = file.split(".")[0]
__all__.append(model)
del os
with open("__init__.py", "w", encoding="utf-8") as file:
text = "__all__ = %s" % dumps_json(__all__)
file.write(text)
| 670 | Python | .py | 23 | 22.434783 | 72 | 0.52044 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,912 | create_spider.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_spider.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 创建spider
---------
@author: Boris
@email: [email protected]
"""
import getpass
import os
import re
import feapder.utils.tools as tools
from .create_init import CreateInit
def deal_file_info(file):
file = file.replace("{DATE}", tools.get_current_date())
file = file.replace("{USER}", os.getenv("FEAPDER_USER") or getpass.getuser())
return file
class CreateSpider:
def __init__(self):
self._create_init = CreateInit()
def cover_to_underline(self, key):
regex = "[A-Z]*"
capitals = re.findall(regex, key)
if capitals:
for pos, capital in enumerate(capitals):
if not capital:
continue
if pos == 0:
if len(capital) > 1:
key = key.replace(capital, capital.lower() + "_", 1)
else:
key = key.replace(capital, capital.lower(), 1)
else:
if len(capital) > 1:
key = key.replace(capital, "_" + capital.lower() + "_", 1)
else:
key = key.replace(capital, "_" + capital.lower(), 1)
return key
def get_spider_template(self, spider_type):
if spider_type == "AirSpider":
template_path = "air_spider_template.tmpl"
elif spider_type == "Spider":
template_path = "spider_template.tmpl"
elif spider_type == "TaskSpider":
template_path = "task_spider_template.tmpl"
elif spider_type == "BatchSpider":
template_path = "batch_spider_template.tmpl"
else:
raise ValueError("spider type error, only support AirSpider、 Spider、TaskSpider、BatchSpider")
template_path = os.path.abspath(
os.path.join(__file__, "../../../templates", template_path)
)
with open(template_path, "r", encoding="utf-8") as file:
spider_template = file.read()
return spider_template
def create_spider(self, spider_template, spider_name, file_name):
spider_template = spider_template.replace("${spider_name}", spider_name)
spider_template = spider_template.replace("${file_name}", file_name)
spider_template = deal_file_info(spider_template)
return spider_template
def save_spider_to_file(self, spider, spider_name, file_name):
if os.path.exists(file_name):
confirm = input("%s 文件已存在 是否覆盖 (y/n). " % file_name)
if confirm != "y":
print("取消覆盖 退出")
return
with open(file_name, "w", encoding="utf-8") as file:
file.write(spider)
print("\n%s 生成成功" % spider_name)
if os.path.basename(os.path.dirname(os.path.abspath(file_name))) == "spiders":
self._create_init.create()
def create(self, spider_name, spider_type):
# 检查spider_name
if not re.search("^[a-zA-Z][a-zA-Z0-9_]*$", spider_name):
print("爬虫命名不符合规范,请用蛇形或驼峰命名方式")
return
underline_format = self.cover_to_underline(spider_name)
spider_name = tools.key2hump(underline_format)
file_name = underline_format + ".py"
print(spider_name, file_name)
spider_template = self.get_spider_template(spider_type)
spider = self.create_spider(spider_template, spider_name, file_name)
self.save_spider_to_file(spider, spider_name, file_name)
| 3,648 | Python | .py | 84 | 32.345238 | 104 | 0.580748 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,913 | create_table.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_table.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 根据json生成表
---------
@author: Boris
@email: [email protected]
"""
import time
import pyperclip
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.db.mysqldb import MysqlDB
from feapder.utils.tools import key2underline
class CreateTable:
def __init__(self):
self._db = MysqlDB()
def is_valid_date(self, date):
try:
if ":" in date:
time.strptime(date, "%Y-%m-%d %H:%M:%S")
else:
time.strptime(date, "%Y-%m-%d")
return True
except:
return False
def get_key_type(self, value):
if isinstance(value, int):
key_type = "int"
elif isinstance(value, float):
key_type = "double"
elif isinstance(value, str):
if self.is_valid_date(value):
if ":" in value:
key_type = "datetime"
else:
key_type = "date"
elif len(value) > 50:
key_type = "text"
else:
key_type = "varchar(255)"
elif isinstance(value, (dict, list)):
key_type = "longtext"
else:
key_type = "varchar(255)"
return key_type
def get_data(self):
"""
@summary: 从控制台读取多行
---------
---------
@result:
"""
input("请复制json格式数据, 复制后按任意键读取剪切板内容\n")
text = pyperclip.paste()
print(text + "\n")
return tools.get_json(text)
def create(self, table_name):
# 输入表字段
data = self.get_data()
if not isinstance(data, dict):
raise Exception("表数据格式不正确")
# 拼接表结构
sql = """
CREATE TABLE `{db}`.`{table_name}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id主键',
{other_key}
`crawl_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '采集时间',
{unique}
PRIMARY KEY (`id`)
) COMMENT='';
"""
# print("请设置注释 回车跳过")
other_key = ""
for key, value in data.items():
key = key2underline(key)
comment = ""
if key == "id":
key = "data_id"
comment = "原始数据id"
key_type = self.get_key_type(value)
# comment = input("%s : %s -> comment:" % (key, key_type))
other_key += (
"`{key}` {key_type} COMMENT '{comment}',\n ".format(
key=key, key_type=key_type, comment=comment
)
)
print("\n")
while True:
yes = input("是否添加批次字段 batch_date(y/n):")
if yes == "y":
other_key += (
"`{key}` {key_type} COMMENT '{comment}',\n ".format(
key="batch_date", key_type="date", comment="批次时间"
)
)
break
elif yes == "n":
break
print("\n")
while True:
yes = input("是否设置唯一索引(y/n):")
if yes == "y":
unique = input("请设置唯一索引, 多个逗号间隔\n等待输入:\n").replace(",", ",")
if unique:
unique = "UNIQUE `idx` USING BTREE (`%s`) comment ''," % "`,`".join(
unique.split(",")
)
break
elif yes == "n":
unique = ""
break
sql = sql.format(
db=setting.MYSQL_DB,
table_name=table_name,
other_key=other_key.strip(),
unique=unique,
)
print(sql)
if self._db.execute(sql):
print("\n%s 创建成功" % table_name)
print("注意手动检查下字段类型,确保无误!!!")
else:
print("\n%s 创建失败" % table_name)
| 4,288 | Python | .py | 125 | 20.44 | 88 | 0.450843 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,914 | __init__.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/__init__.py | __all__ = [
"CreateProject",
"CreateSpider",
"CreateItem",
"CreateInit",
"CreateJson",
"CreateTable",
"CreateCookies",
"CreateSetting",
"CreateParams",
]
from .create_table import CreateTable
from .create_json import CreateJson
from .create_spider import CreateSpider
from .create_init import CreateInit
from .create_item import CreateItem
from .create_project import CreateProject
from .create_cookies import CreateCookies
from .create_setting import CreateSetting
from .create_params import CreateParams
| 543 | Python | .py | 20 | 24.3 | 41 | 0.775862 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,915 | create_json.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_json.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 字符串转json
---------
@author: Boris
@email: [email protected]
"""
import pyperclip
import feapder.utils.tools as tools
class CreateJson:
def get_data(self):
"""
@summary: 从控制台读取多行
---------
---------
@result:
"""
input("请复制需要转换的内容(xxx:xxx格式,支持多行),复制后按任意键读取剪切板内容\n")
text = pyperclip.paste()
print(text + "\n")
data = []
for line in text.split("\n"):
line = line.strip().replace("\t", " " * 4)
if not line:
break
data.append(line)
return data
def create(self, sort_keys=False):
contents = self.get_data()
json = {}
for content in contents:
content = content.strip()
if not content or content.startswith(":"):
continue
regex = "([^:\s]*)[:|\s]*(.*)"
result = tools.get_info(content, regex, fetch_one=True)
if result[0] in json:
json[result[0]] = json[result[0]] + "&" + result[1]
else:
json[result[0]] = result[1].strip()
print(tools.dumps_json(json, sort_keys=sort_keys))
| 1,366 | Python | .py | 43 | 21.162791 | 67 | 0.505747 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,916 | create_item.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_item.py | # -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 创建item
---------
@author: Boris
@email: [email protected]
"""
import getpass
import os
import feapder.utils.tools as tools
from feapder import setting
from feapder.db.mysqldb import MysqlDB
from .create_init import CreateInit
def deal_file_info(file):
file = file.replace("{DATE}", tools.get_current_date())
file = file.replace("{USER}", os.getenv("FEAPDER_USER") or getpass.getuser())
return file
class CreateItem:
def __init__(self):
self._db = MysqlDB()
self._create_init = CreateInit()
def select_columns(self, table_name):
# sql = 'SHOW COLUMNS FROM ' + table_name
sql = f"SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_DEFAULT, EXTRA, COLUMN_KEY, COLUMN_COMMENT FROM INFORMATION_SCHEMA.Columns WHERE table_name = '{table_name}' and table_schema = '{setting.MYSQL_DB}'"
columns = self._db.find(sql)
return columns
def select_tables_name(self, tables_name):
"""
@summary:
---------
@param tables_name: 一类tables 如 qidian*
---------
@result:
"""
sql = f"select table_name from information_schema.tables where table_name like '{tables_name}' and table_schema = '{setting.MYSQL_DB}'"
tables_name = self._db.find(sql)
return tables_name
def convert_table_name_to_hump(self, table_name):
"""
@summary: 格式化表明为驼峰格式
---------
@param table:
---------
@result:
"""
table_hump_format = ""
words = table_name.split("_")
for word in words:
table_hump_format += word.capitalize() # 首字母大写
return table_hump_format
def get_item_template(self, item_type):
if item_type == "Item":
template_path = os.path.abspath(
os.path.join(__file__, "../../../templates/item_template.tmpl")
)
else:
template_path = os.path.abspath(
os.path.join(__file__, "../../../templates/update_item_template.tmpl")
)
with open(template_path, "r", encoding="utf-8") as file:
item_template = file.read()
return item_template
def create_item(self, item_template, columns, table_name, support_dict):
table_name_hump_format = self.convert_table_name_to_hump(table_name)
# 组装 类名
item_template = item_template.replace("${item_name}", table_name_hump_format)
if support_dict:
item_template = item_template.replace("${command}", table_name + " 1")
else:
item_template = item_template.replace("${command}", table_name)
item_template = item_template.replace("${table_name}", table_name)
# 组装 属性
propertys = ""
for column in columns:
column_name = column[0]
column_type = column[1]
is_nullable = column[2]
column_default = column[3]
column_extra = column[4]
column_key = column[5]
column_comment = column[6]
try:
column_default = None if column_default == "NULL" else column_default
value = (
"kwargs.get('{column_name}')".format(column_name=column_name)
if support_dict
else (
column_default != "CURRENT_TIMESTAMP" and column_default or None
)
and eval(column_default)
)
except:
value = (
"kwargs.get('{column_name}')".format(column_name=column_name)
if support_dict
else (
column_default != "CURRENT_TIMESTAMP" and column_default or None
)
and column_default
)
if column_extra == "auto_increment" or column_default is not None:
propertys += f"# self.{column_name} = {value}"
else:
if value is None or isinstance(value, (float, int)) or support_dict:
propertys += f"self.{column_name} = {value}"
else:
propertys += f"self.{column_name} = '{value}'"
if column_comment:
propertys += f" # {column_comment}"
propertys += "\n" + " " * 8
item_template = item_template.replace("${propertys}", propertys.strip())
item_template = deal_file_info(item_template)
return item_template
def save_template_to_file(self, item_template, table_name):
item_file = table_name + "_item.py"
if os.path.exists(item_file):
confirm = input("%s 文件已存在 是否覆盖 (y/n). " % item_file)
if confirm != "y":
print("取消覆盖 退出")
return
with open(item_file, "w", encoding="utf-8") as file:
file.write(item_template)
print("\n%s 生成成功" % item_file)
if os.path.basename(os.path.dirname(os.path.abspath(item_file))) == "items":
self._create_init.create()
def create(self, tables_name, item_type, support_dict):
input_tables_name = tables_name
tables_name = self.select_tables_name(tables_name)
if not tables_name:
print(tables_name)
tip = "mysql数据库中无 %s 表 " % input_tables_name
raise KeyError(tip)
for table_name in tables_name:
table_name = table_name[0]
columns = self.select_columns(table_name)
item_template = self.get_item_template(item_type)
item_template = self.create_item(
item_template, columns, table_name, support_dict
)
self.save_template_to_file(item_template, table_name)
| 6,004 | Python | .py | 142 | 30.570423 | 218 | 0.555808 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,917 | create_setting.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_setting.py | # -*- coding: utf-8 -*-
"""
Created on 2021/4/23 13:20
---------
@summary: 生成配置文件
---------
@author: mkdir700
@email: [email protected]
"""
import os
import shutil
class CreateSetting:
def create(self):
if os.path.exists("setting.py"):
confirm = input("配置文件已存在 是否覆盖 (y/n). ")
if confirm != "y":
print("取消覆盖 退出")
return
template_file_path = os.path.abspath(
os.path.join(__file__, "../../../templates/project_template/setting.py")
)
shutil.copy(template_file_path, "./", follow_symlinks=False)
print("配置文件生成成功")
| 693 | Python | .py | 23 | 21.391304 | 84 | 0.559603 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,918 | create_cookies.py | demigody_nas-tools/third_party/feapder/feapder/commands/create/create_cookies.py | # -*- coding: utf-8 -*-
"""
Created on 2021/4/25 10:22 上午
---------
@summary: 将浏览器的cookie转为request的cookie
---------
@author: Boris
@email: [email protected]
"""
import json
import pyperclip
from feapder.utils.tools import get_cookies_from_str, print_pretty
class CreateCookies:
def get_data(self):
"""
@summary: 从剪切板中读取内容
---------
---------
@result:
"""
input("请复制浏览器cookie (列表或字符串格式), 复制后按任意键读取剪切板内容\n")
text = pyperclip.paste()
print(text + "\n")
return text
def create(self):
data = self.get_data()
cookies = {}
try:
data_json = json.loads(data)
for data in data_json:
cookies[data.get("name")] = data.get("value")
except:
cookies = get_cookies_from_str(data)
print_pretty(cookies)
| 975 | Python | .py | 34 | 19.294118 | 66 | 0.562201 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,919 | run.py | demigody_nas-tools/tests/run.py | import unittest
from tests.test_metainfo import MetaInfoTest
if __name__ == '__main__':
suite = unittest.TestSuite()
# 测试名称识别
suite.addTest(MetaInfoTest('test_metainfo'))
# 运行测试
runner = unittest.TextTestRunner()
runner.run(suite)
| 278 | Python | .py | 9 | 24.666667 | 48 | 0.703252 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,920 | tests_utils.py | demigody_nas-tools/tests/tests_utils.py | from lxml import etree
import re
from guessit.jsonutils import GuessitEncoder
from guessit.api import guessit, properties, suggested_expected, GuessitException, default_api
import json
class TestUtils:
@staticmethod
def clear_file_name(name):
if not name:
return None
replacement_dict = {
r"[*?\\/\"<>~|,,?]": "",
r"[\s]+": " ",
}
cleaned_name = name
for pattern, replacement in replacement_dict.items():
cleaned_name = re.sub(pattern, replacement, cleaned_name, flags=re.IGNORECASE).strip()
cleaned_name = cleaned_name.replace(":", "-").replace(":", "-")
return cleaned_name
@staticmethod
def find_matching_tables_with_title(html):
tree = etree.HTML(html)
table_elements = tree.xpath('//table[contains(@class, "torrentname") and @width="100%"]')
matching_tables = []
for table_element in table_elements:
title_element = table_element.xpath('.//a[contains(@title, "")]/@title')
if title_element:
matching_tables.append(title_element[0])
return matching_tables
@staticmethod
def find_matching_tables_with_free(html):
tree = etree.HTML(html)
table_elements = tree.xpath('//img[contains(@class, "pro_free") and contains(translate(@alt, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"), "free")]')
matching_tables = []
for table_element in table_elements:
matching_tables.append(table_element.get('class'))
return matching_tables
@staticmethod
def find_matching_tables_with_2xfree(html):
tree = etree.HTML(html)
table_elements = tree.xpath('//img[contains(translate(@class, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"), "pro_2xfree") and contains(translate(@alt, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"), "2xfree")]')
matching_tables = []
for table_element in table_elements:
matching_tables.append(table_element.get('class'))
return matching_tables
@staticmethod
def clean_all_sites_free(html):
# 匹配字符串 "全站 [Free] 生效中",不区分大小写
pattern = re.compile(r'<h1.*?>.*?全站\s+\[Free\]\s+生效中.*?</h1>', re.IGNORECASE)
# 使用 re.sub 进行替换
cleaned_html = re.sub(pattern, '', html)
return cleaned_html
@staticmethod
def guess_movie_info(filename):
if filename:
guess = default_api.guessit(filename)
return guess
else:
return "" | 2,689 | Python | .py | 59 | 35.440678 | 247 | 0.635967 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,921 | playground.py | demigody_nas-tools/tests/playground.py | import os
import sys
# # # 获取当前文件所在目录的上层目录,即项目根目录
# project_root = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
# # 将项目根目录添加到模块搜索路径中
# sys.path.append(project_root)
# from app.helper import OcrHelper
# from app.media.meta import MediaItem
from tests_utils import TestUtils
def main():
print("hello world")
# html = """
# <img class="pro_2Xfree" src="pic/trans.gif" alt="2XFree" title="免費">
# """
# matching_titles = TestUtils.find_matching_tables_with_title(html)
# if matching_titles:
# for title in matching_titles:
# print("match title - ", title)
# else:
# print("No match found.")
# matching_frees = TestUtils.find_matching_tables_with_free(html)
# if matching_frees:
# for free in matching_frees:
# print("match free success")
# else:
# print("No free")
# matching_2xfrees = TestUtils.find_matching_tables_with_2xfree(html)
# if matching_2xfrees:
# for free in matching_2xfrees:
# print("match 2xfree success")
# else:
# print("No 2xfree")
# name = "Super.?Mario.Bros.Movie,.:The.2023.4K.UHD.Blu-ray.REMUX.H265.10bit.Dolby.Vision.TrueHD.Atmos.mkv"
# print(TestUtils.clear_file_name(name))
# html1 = """
# <h1 style="margin-top:15px;margin-buttom:-10px;color:#f29d38;font-size:20px;text-align:center;font-weight:bold;font-family:'Microsoft YaHei'">全站 [Free] 生效中!时间:2023-08-18 00:00:00 ~ 2023-08-24 23:59:59</h1>
# <h1 style="margin-top:15px;margin-buttom:-10px;color:#f29d38;font-size:20px;text-align:center;font-weight:bold;font-family:'Microsoft YaHei'">全站生效中!时间:2023-08-18 00:00:00 ~ 2023-08-24 23:59:59</h1>
# """
# print(TestUtils.clean_all_sites_free(html1))
# video1 = "Super.Mario.Bros.Movie.2023.4K.UHD.Blu-ray.REMUX.H265.10bit.Dolby.Vision.HDR10.TrueHD.Atmos.mkv"
# video1_dict = TestUtils.guess_movie_info(video1)
# video1_item = MediaItem(datas=video1_dict)
# print(video1_item.to_dict_str())
# video2 = "Teenage Mutant Ninja Turtles Mutant Mayhem 2023 2160p iTunes WEB-DL DDP5.1 Atmos DV H 265-HHWEB.mkv"
# video2_dict = TestUtils.guess_movie_info(video2)
# video2_item = MediaItem(datas=video2_dict)
# print(video2_item.to_dict_str())
# video3 = "I AM Nobody S01 2023 1080p WEB-DL H264 AAC-HHWEB"
# video3_dict = TestUtils.guess_movie_info(video3)
# video3_item = MediaItem(datas=video3_dict)
# print(video3_item.to_dict_str())
# video4 = "一人之下 - S01E1001-S01E1110 - 阿威十八式.1024x576.WEB-DL.H264.FLAC.AAC-HHWEB"
# video4_dict = TestUtils.guess_movie_info(video4)
# video4_item = MediaItem(datas=video4_dict)
# print(video4_item.to_dict_str())
# video5 = "夺宝奇兵5.Indiana Jones and the Dial of Destiny.2023.2160p.HDR.H265.内嵌中英字幕.mp4"
# video5_dict = TestUtils.guess_movie_info(video5)
# video5_item = MediaItem(datas=video5_dict)
# print(video5_item.to_dict_str())
# video6 = "[HorribleSubs] 牙狼 -VANISHING LINE - 01 [1080p].mkv"
# video6_dict = TestUtils.guess_movie_info(video6)
# video6_item = MediaItem(datas=video6_dict)
# print(video6_item.to_dict_str())
# video7 = "Spider-Man.Across.the.Spider-Verse.2023.2160p.WEB-DL.DDP5.1.H.265.Part.1-yiiha"
# video7_dict = TestUtils.guess_movie_info(video7)
# video7_item = MediaItem(datas=video7_dict)
# print(video7_item.to_dict_str())
# ocr_result = OcrHelper().get_captcha_text(image_url="https://www.yht7.com/upload/image/20191109/1735560-20191109220533186-1855679599.jpg")
# print(ocr_result)
if __name__ == "__main__":
main() | 3,773 | Python | .py | 71 | 47.774648 | 211 | 0.68608 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,922 | test_metainfo.py | demigody_nas-tools/tests/test_metainfo.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from app.media.meta import MetaInfo
from tests.cases.meta_cases import meta_cases
class MetaInfoTest(TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_metainfo(self):
for info in meta_cases:
if not info.get("title"):
continue
meta_info = MetaInfo(title=info.get("title"), subtitle=info.get("subtitle"))
target = {
"type": meta_info.type.value,
"cn_name": meta_info.cn_name or "",
"en_name": meta_info.en_name or "",
"year": meta_info.year or "",
"part": meta_info.part or "",
"season": meta_info.get_season_string(),
"episode": meta_info.get_episode_string(),
"restype": meta_info.get_edtion_string(),
"pix": meta_info.resource_pix or "",
"video_codec": meta_info.video_encode or "",
"audio_codec": meta_info.audio_encode or ""
}
self.assertEqual(target, info.get("target"))
| 1,156 | Python | .py | 28 | 29.785714 | 88 | 0.540998 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,923 | meta_cases.py | demigody_nas-tools/tests/cases/meta_cases.py | meta_cases = [{
"title": "The Long Season 2017 2160p WEB-DL H265 AAC-XXX",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "The Long Season",
"year": "2017",
"part": "",
"season": "",
"episode": "",
"restype": "WEB-DL",
"pix": "2160p",
"video_codec": "H265",
"audio_codec": "AAC"
}
}, {
"title": "Cherry Season S01 2014 2160p WEB-DL H265 AAC-XXX",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Cherry Season",
"year": "2014",
"part": "",
"season": "S01",
"episode": "",
"restype": "WEB-DL",
"pix": "2160p",
"video_codec": "H265",
"audio_codec": "AAC"
}
}, {
"title": "【爪爪字幕组】★7月新番[欢迎来到实力至上主义的教室 第二季/Youkoso Jitsuryoku Shijou Shugi no Kyoushitsu e S2][11][1080p][HEVC][GB][MP4][招募翻译校对]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Youkoso Jitsuryoku Shijou Shugi No Kyoushitsu E",
"year": "",
"part": "",
"season": "S02",
"episode": "E11",
"restype": "",
"pix": "1080p",
"video_codec": "HEVC",
"audio_codec": ""
}
}, {
"title": "National.Parks.Adventure.AKA.America.Wild:.National.Parks.Adventure.3D.2016.1080p.Blu-ray.AVC.TrueHD.7.1",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "National Parks Adventure",
"year": "2016",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay 3D",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": "TrueHD 7.1"
}
}, {
"title": "[秋叶原冥途战争][Akiba Maid Sensou][2022][WEB-DL][1080][TV Series][第01话][LeagueWEB]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Akiba Maid Sensou",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E01",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "哆啦A梦:大雄的宇宙小战争 2021 (2022) - 1080p.mp4",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "哆啦A梦:大雄的宇宙小战争 2021",
"en_name": "",
"year": "2022",
"part": "",
"season": "",
"episode": "",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "新精武门1991 (1991).mkv",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "新精武门1991",
"en_name": "",
"year": "1991",
"part": "",
"season": "",
"episode": "",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "24 S01 1080p WEB-DL AAC2.0 H.264-BTN",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "24",
"year": "",
"part": "",
"season": "S01",
"episode": "",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC 2.0"
}
}, {
"title": "Qi Refining for 3000 Years S01E06 2022 1080p B-Blobal WEB-DL X264 AAC-AnimeS@AdWeb",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Qi Refining For 3000 Years",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E06",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "AAC"
}
}, {
"title": "Noumin Kanren no Skill Bakka Agetetara Naze ka Tsuyoku Natta S01E02 2022 1080p B-Global WEB-DL X264 AAC-AnimeS@ADWeb[2022年10月新番]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Noumin Kanren No Skill Bakka Agetetara Naze Ka Tsuyoku Natta",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E02",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "AAC"
}
}, {
"title": "dou luo da lu S01E229 2018 2160p WEB-DL H265 AAC-ADWeb[[国漫连载] 斗罗大陆 第229集 4k | 国语中字]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Dou Luo Da Lu",
"year": "2018",
"part": "",
"season": "S01",
"episode": "E229",
"restype": "WEB-DL",
"pix": "2160p",
"video_codec": "H265",
"audio_codec": "AAC"
}
}, {
"title": "Thor Love and Thunder (2022) [1080p] [WEBRip] [5.1]",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "Thor Love And Thunder",
"year": "2022",
"part": "",
"season": "",
"episode": "",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "5.1"
}
}, {
"title": "[Animations(动画片)][[诛仙][Jade Dynasty][2022][WEB-DL][2160][TV Series][TV 08][LeagueWEB]][诛仙/诛仙动画 第一季 第08集 | 类型:动画 [国语中字]][680.12 MB]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Jade Dynasty",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E08",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "钢铁侠2 (2010) 1080p AC3.mp4",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "钢铁侠2",
"en_name": "",
"year": "2010",
"part": "",
"season": "",
"episode": "",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "AC3"
}
}, {
"title": "Wonder Woman 1984 2020 BluRay 1080p Atmos TrueHD 7.1 X264-EPiC",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "Wonder Woman 1984",
"year": "2020",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "Atmos TrueHD 7.1"
}
}, {
"title": "9-1-1 - S04E03 - Future Tense WEBDL-1080p.mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "9 1 1",
"year": "",
"part": "",
"season": "S04",
"episode": "E03",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "【幻月字幕组】【22年日剧】【据幸存的六人所说】【04】【1080P】【中日双语】",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "据幸存的六人所说",
"en_name": "",
"year": "",
"part": "",
"season": "S01",
"episode": "E04",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "【爪爪字幕组】★7月新番[即使如此依旧步步进逼/Soredemo Ayumu wa Yosetekuru][09][1080p][HEVC][GB][MP4][招募翻译校对]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Soredemo Ayumu Wa Yosetekuru",
"year": "",
"part": "",
"season": "S01",
"episode": "E09",
"restype": "",
"pix": "1080p",
"video_codec": "HEVC",
"audio_codec": ""
}
}, {
"title": "[猎户不鸽发布组] 不死者之王 第四季 OVERLORD Ⅳ [02] [1080p] [简中内封] [2022年7月番]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "不死者之王",
"en_name": "Overlord Ⅳ",
"year": "",
"part": "",
"season": "S04",
"episode": "E02",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "[GM-Team][国漫][寻剑 第1季][Sword Quest Season 1][2002][02][AVC][GB][1080P]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Sword Quest",
"year": "2002",
"part": "",
"season": "S01",
"episode": "E02",
"restype": "",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": ""
}
}, {
"title": " [猎户不鸽发布组] 组长女儿与照料专员 / 组长女儿与保姆 Kumichou Musume to Sewagakari [09] [1080p+] [简中内嵌] [2022年7月番]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "组长女儿与保姆",
"en_name": "Kumichou Musume To Sewagakari",
"year": "",
"part": "",
"season": "S01",
"episode": "E09",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "Nande Koko ni Sensei ga!? 2019 Blu-ray Remux 1080p AVC LPCM-7³ ACG",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "Nande Koko Ni Sensei Ga!?",
"year": "2019",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay Remux",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": "LPCM 7³"
}
}, {
"title": "30.Rock.S02E01.1080p.BluRay.X264-BORDURE.mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "30 Rock",
"year": "",
"part": "",
"season": "S02",
"episode": "E01",
"restype": "BluRay",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": ""
}
}, {
"title": "[Gal to Kyouryuu][02][BDRIP][1080P][H264_FLAC].mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Gal To Kyouryuu",
"year": "",
"part": "",
"season": "S01",
"episode": "E02",
"restype": "",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "FLAC"
}
}, {
"title": "[AI-Raws] 逆境無頼カイジ #13 (BD HEVC 1920x1080 yuv444p10le FLAC)[7CFEE642].mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "逆境無頼カイジ",
"en_name": "",
"year": "",
"part": "",
"season": "S01",
"episode": "E13",
"restype": "BD",
"pix": "1080p",
"video_codec": "HEVC",
"audio_codec": "FLAC"
}
}, {
"title": "Mr. Robot - S02E06 - eps2.4_m4ster-s1ave.aes SDTV.mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Mr Robot",
"year": "",
"part": "",
"season": "S02",
"episode": "E06",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "[神印王座][Throne of Seal][2022][WEB-DL][2160][TV Series][TV 22][LeagueWEB] 神印王座 第一季 第22集 | 类型:动画 [国语中字][967.44 MB]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Throne Of Seal",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E22",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "S02E1000.mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "",
"year": "",
"part": "",
"season": "S02",
"episode": "E1000",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "西部世界 12.mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "西部世界",
"en_name": "",
"year": "",
"part": "",
"season": "S01",
"episode": "E12",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "[ANi] OVERLORD 第四季 - 04 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Overlord",
"year": "",
"part": "",
"season": "S04",
"episode": "E04",
"restype": "",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": "AAC"
}
}, {
"title": "[SweetSub&LoliHouse] Made in Abyss S2 - 03v2 [WebRip 1080p HEVC-10bit AAC ASSx2].mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Made In Abyss",
"year": "",
"part": "",
"season": "S02",
"episode": "E03",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "AAC"
}
}, {
"title": "[GM-Team][国漫][斗破苍穹 第5季][Fights Break Sphere V][2022][05][HEVC][GB][4K]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Fights Break Sphere V",
"year": "2022",
"part": "",
"season": "S05",
"episode": "E05",
"restype": "",
"pix": "2160p",
"video_codec": "HEVC",
"audio_codec": ""
}
}, {
"title": "Ousama Ranking S01E02-[1080p][BDRIP][X265.FLAC].mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Ousama Ranking",
"year": "",
"part": "",
"season": "S01",
"episode": "E02",
"restype": "BDRIP",
"pix": "1080p",
"video_codec": "X265",
"audio_codec": "FLAC"
}
}, {
"title": "[Nekomoe kissaten&LoliHouse] Soredemo Ayumu wa Yosetekuru - 01v2 [WebRip 1080p HEVC-10bit EAC3 ASSx2].mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Soredemo Ayumu Wa Yosetekuru",
"year": "",
"part": "",
"season": "S01",
"episode": "E01",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "EAC3"
}
}, {
"title": "[喵萌奶茶屋&LoliHouse] 金装的薇尔梅 / Kinsou no Vermeil - 01 [WebRip 1080p HEVC-10bit AAC][简繁内封字幕]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Kinsou No Vermeil",
"year": "",
"part": "",
"season": "S01",
"episode": "E01",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "AAC"
}
}, {
"title": "Hataraku.Maou-sama.S02E05.2022.1080p.CR.WEB-DL.X264.AAC-ADWeb.mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Hataraku Maou Sama",
"year": "2022",
"part": "",
"season": "S02",
"episode": "E05",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "AAC"
}
}, {
"title": "The Witch Part 2:The Other One 2022 1080p WEB-DL AAC5.1 H264-tG1R0",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "The Witch Part 2:The Other One",
"year": "2022",
"part": "",
"season": "",
"episode": "",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC 5.1"
}
}, {
"title": "一夜新娘 - S02E07 - 第 7 集.mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "一夜新娘",
"en_name": "",
"year": "",
"part": "",
"season": "S02",
"episode": "E07",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "[ANi] 處刑少女的生存之道 - 07 [1080P][Baha][WEB-DL][AAC AVC][CHT].mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "处刑少女的生存之道",
"en_name": "",
"year": "",
"part": "",
"season": "S01",
"episode": "E07",
"restype": "",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": "AAC"
}
}, {
"title": "Stand-up.Comedy.S01E01.PartA.2022.1080p.WEB-DL.H264.AAC-TJUPT.mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Stand Up Comedy",
"year": "2022",
"part": "PartA",
"season": "S01",
"episode": "E01",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC"
}
}, {
"title": "教父3.The.Godfather.Part.III.1990.1080p.NF.WEBRip.H264.DDP5.1-PTerWEB.mkv",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "教父3",
"en_name": "The Godfather Part Iii",
"year": "1990",
"part": "",
"season": "",
"episode": "",
"restype": "WEBRip",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "DDP 5.1"
}
}, {
"title": "A.Quiet.Place.Part.II.2020.1080p.UHD.BluRay.DD+7.1.DoVi.X265-PuTao",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "A Quiet Place Part Ii",
"year": "2020",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay DoVi UHD",
"pix": "1080p",
"video_codec": "X265",
"audio_codec": "DD 7.1"
}
}, {
"title": "Childhood.In.A.Capsule.S01E16.2022.1080p.KKTV.WEB-DL.X264.AAC-ADWeb.mkv",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Childhood In A Capsule",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E16",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "AAC"
}
}, {
"title": "[桜都字幕组] 异世界归来的舅舅 / Isekai Ojisan [01][1080p][简体内嵌]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Isekai Ojisan",
"year": "",
"part": "",
"season": "S01",
"episode": "E01",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "【喵萌奶茶屋】★04月新番★[夏日重現/Summer Time Rendering][15][720p][繁日雙語][招募翻譯片源]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Summer Time Rendering",
"year": "",
"part": "",
"season": "S01",
"episode": "E15",
"restype": "",
"pix": "720p",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "[NC-Raws] 打工吧!魔王大人 第二季 / Hataraku Maou-sama!! - 02 (B-Global 1920x1080 HEVC AAC MKV)",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Hataraku Maou-Sama!!",
"year": "",
"part": "",
"season": "S02",
"episode": "E02",
"restype": "",
"pix": "1080p",
"video_codec": "HEVC",
"audio_codec": "AAC"
}
}, {
"title": "The Witch Part 2 The Other One 2022 1080p WEB-DL AAC5.1 H.264-tG1R0",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "The Witch Part 2 The Other One",
"year": "2022",
"part": "",
"season": "",
"episode": "",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC 5.1"
}
}, {
"title": "The 355 2022 BluRay 1080p DTS-HD MA5.1 X265.10bit-BeiTai",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "The 355",
"year": "2022",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay",
"pix": "1080p",
"video_codec": "X265 10bit",
"audio_codec": "DTS-HD MA 5.1"
}
}, {
"title": "Sense8 s01-s02 2015-2017 1080P WEB-DL X265 AC3£cXcY@FRDS",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Sense8",
"year": "2015",
"part": "",
"season": "S01-S02",
"episode": "",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X265",
"audio_codec": ""
}
}, {
"title": "The Heart of Genius S01 13-14 2022 1080p WEB-DL H264 AAC",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "The Heart Of Genius",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E13-E14",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC"
}
}, {
"title": "The Heart of Genius E13-14 2022 1080p WEB-DL H264 AAC",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "The Heart Of Genius",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E13-E14",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "H264",
"audio_codec": "AAC"
}
}, {
"title": "2022.8.2.Twelve.Monkeys.1995.GBR.4K.REMASTERED.BluRay.1080p.X264.DTS [3.4 GB]",
"subtitle": "",
"target": {
"type": "电影",
"cn_name": "",
"en_name": "Twelve Monkeys",
"year": "1995",
"part": "",
"season": "",
"episode": "",
"restype": "BluRay",
"pix": "4k",
"video_codec": "X264",
"audio_codec": "DTS"
}
}, {
"title": "[NC-Raws] 王者天下 第四季 - 17 (Baha 1920x1080 AVC AAC MP4) [3B1AA7BB].mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "王者天下",
"en_name": "",
"year": "",
"part": "",
"season": "S04",
"episode": "E17",
"restype": "",
"pix": "1080p",
"video_codec": "AVC",
"audio_codec": "AAC"
}
}, {
"title": "Sense8 S2E1 2015-2017 1080P WEB-DL X265 AC3£cXcY@FRDS",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Sense8",
"year": "2015",
"part": "",
"season": "S02",
"episode": "E01",
"restype": "WEB-DL",
"pix": "1080p",
"video_codec": "X265",
"audio_codec": ""
}
}, {
"title": "[xyx98]传颂之物/Utawarerumono/うたわれるもの[BDrip][1920x1080][TV 01-26 Fin][hevc-yuv420p10 flac_ac3][ENG PGS]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "うたわれるもの",
"year": "",
"part": "",
"season": "S01",
"episode": "E01-E26",
"restype": "",
"pix": "1080p",
"video_codec": "",
"audio_codec": "flac"
}
}, {
"title": "[云歌字幕组][7月新番][欢迎来到实力至上主义的教室 第二季][01][X264 10bit][1080p][简体中文].mp4",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "欢迎来到实力至上主义的教室",
"en_name": "",
"year": "",
"part": "",
"season": "S02",
"episode": "E01",
"restype": "",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": ""
}
}, {
"title": "[诛仙][Jade Dynasty][2022][WEB-DL][2160][TV Series][TV 04][LeagueWEB]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Jade Dynasty",
"year": "2022",
"part": "",
"season": "S01",
"episode": "E04",
"restype": "",
"pix": "",
"video_codec": "",
"audio_codec": ""
}
}, {
"title": "Rick and Morty.S06E06.JuRicksic.Mort.1080p.HMAX.WEBRip.DD5.1.X264-NTb[rartv]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Rick And Morty",
"year": "",
"part": "",
"season": "S06",
"episode": "E06",
"restype": "WEBRip",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "DD 5.1"
}
}, {
"title": "rick and Morty.S06E05.JuRicksic.Mort.1080p.HMAX.WEBRip.DD5.1.X264-NTb[rartv]",
"subtitle": "",
"target": {
"type": "电视剧",
"cn_name": "",
"en_name": "Rick And Morty",
"year": "",
"part": "",
"season": "S06",
"episode": "E05",
"restype": "WEBRip",
"pix": "1080p",
"video_codec": "X264",
"audio_codec": "DD 5.1"
}
}]
| 25,759 | Python | .py | 945 | 18.372487 | 146 | 0.431356 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,924 | security.py | demigody_nas-tools/web/security.py | import base64
import datetime
import hashlib
import hmac
import json
import os
import log
from functools import wraps, partial
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from cryptography.fernet import Fernet
from base64 import b64encode
import jwt
from flask import request
from app.utils import TokenCache
from config import Config
def require_auth(func=None, force=True):
"""
API安全认证
force 是否强制检查apikey,为False时,会检查 check_apikey 配置值
"""
if func is None:
return partial(require_auth, force=force)
@wraps(func)
def wrapper(*args, **kwargs):
if not force and \
not Config().get_config("security").get("check_apikey"):
return func(*args, **kwargs)
log.debug(f"【Security】{func.__name__} 认证检查")
# 允许在请求头Authorization中添加apikey
auth = request.headers.get("Authorization")
if auth:
auth = str(auth).split()[-1]
if auth == Config().get_config("security").get("api_key"):
return func(*args, **kwargs)
# 允许使用在api后面拼接 ?apikey=xxx 的方式进行验证
# 从query中获取apikey
auth = request.args.get("apikey")
if auth:
if auth == Config().get_config("security").get("api_key"):
return func(*args, **kwargs)
log.warn(f"【Security】{func.__name__} 认证未通过,请检查API Key")
return {
"code": 401,
"success": False,
"message": "安全认证未通过,请检查ApiKey"
}
return wrapper
def generate_access_token(username: str, algorithm: str = 'HS256', exp: float = 2):
"""
生成access_token
:param username: 用户名(自定义部分)
:param algorithm: 加密算法
:param exp: 过期时间,默认2小时
:return:token
"""
now = datetime.datetime.utcnow()
exp_datetime = now + datetime.timedelta(hours=exp)
access_payload = {
'exp': exp_datetime,
'iat': now,
'username': username
}
access_token = jwt.encode(access_payload,
Config().get_config("security").get("api_key"),
algorithm=algorithm)
return access_token
def __decode_auth_token(token: str, algorithms='HS256'):
"""
解密token
:param token:token字符串
:return: 是否有效,playload
"""
key = Config().get_config("security").get("api_key")
try:
payload = jwt.decode(token,
key=key,
algorithms=algorithms)
except jwt.ExpiredSignatureError:
return False, jwt.decode(token,
key=key,
algorithms=algorithms,
options={'verify_exp': False})
except (jwt.DecodeError, jwt.InvalidTokenError, jwt.ImmatureSignatureError):
return False, {}
else:
return True, payload
def identify(auth_header: str):
"""
用户鉴权,返回是否有效、用户名
"""
flag = False
if auth_header:
flag, payload = __decode_auth_token(auth_header)
if payload:
return flag, payload.get("username") or ""
return flag, ""
def login_required(func):
"""
登录保护,验证用户是否登录
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
def auth_failed():
return {
"code": 403,
"success": False,
"message": "安全认证未通过,请检查Token"
}
token = request.headers.get("Authorization", default=None)
if not token:
return auth_failed()
latest_token = TokenCache.get(token)
if not latest_token:
return auth_failed()
flag, username = identify(latest_token)
if not username:
return auth_failed()
if not flag and username:
TokenCache.set(token, generate_access_token(username))
return func(*args, **kwargs)
return wrapper
def encrypt_message(message, key):
"""
使用给定的key对消息进行加密,并返回加密后的字符串
"""
f = Fernet(key)
encrypted_message = f.encrypt(message.encode())
return encrypted_message.decode()
def hash_sha256(message):
"""
对字符串做hash运算
"""
return hashlib.sha256(message.encode()).hexdigest()
def aes_decrypt(data, key):
"""
AES解密
"""
if not data:
return ""
data = base64.b64decode(data)
iv = data[:16]
encrypted = data[16:]
# 使用AES-256-CBC解密
cipher = AES.new(key.encode('utf-8'), AES.MODE_CBC, iv)
result = cipher.decrypt(encrypted)
# 去除填充
padding = result[-1]
if padding < 1 or padding > AES.block_size:
return ""
result = result[:-padding]
return result.decode('utf-8')
def aes_encrypt(data, key):
"""
AES加密
"""
if not data:
return ""
# 使用AES-256-CBC加密
cipher = AES.new(key.encode('utf-8'), AES.MODE_CBC)
# 填充
padding = AES.block_size - len(data) % AES.block_size
data += chr(padding) * padding
result = cipher.encrypt(data.encode('utf-8'))
# 使用base64编码
return b64encode(cipher.iv + result).decode('utf-8')
def nexusphp_encrypt(data_str: str, key):
"""
NexusPHP加密
"""
# 生成16字节长的随机字符串
iv = os.urandom(16)
# 对向量进行 Base64 编码
iv_base64 = base64.b64encode(iv)
# 加密数据
cipher = AES.new(key, AES.MODE_CBC, iv)
ciphertext = cipher.encrypt(pad(data_str.encode(), AES.block_size))
ciphertext_base64 = base64.b64encode(ciphertext)
# 对向量的字符串表示进行签名
mac = hmac.new(key, msg=iv_base64 + ciphertext_base64, digestmod=hashlib.sha256).hexdigest()
# 构造 JSON 字符串
json_str = json.dumps({
'iv': iv_base64.decode(),
'value': ciphertext_base64.decode(),
'mac': mac,
'tag': ''
})
# 对 JSON 字符串进行 Base64 编码
return base64.b64encode(json_str.encode()).decode()
| 6,323 | Python | .py | 191 | 23.115183 | 96 | 0.599643 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,925 | apiv1.py | demigody_nas-tools/web/apiv1.py | from flask import Blueprint, request
from flask_restx import Api, reqparse, Resource
from app.brushtask import BrushTask
from app.rsschecker import RssChecker
from app.sites import Sites
from app.utils import TokenCache
from config import Config
from web.action import WebAction
from web.backend.pro_user import ProUser
from web.security import require_auth, login_required, generate_access_token
apiv1_bp = Blueprint("apiv1",
__name__,
static_url_path='',
static_folder='./frontend/static/',
template_folder='./frontend/', )
Apiv1 = Api(apiv1_bp,
version="1.0",
title="NAStool Api",
description="POST接口调用 /user/login 获取Token,GET接口使用 基础设置->安全->Api Key 调用",
doc="/",
security='Bearer Auth',
authorizations={"Bearer Auth": {"type": "apiKey", "name": "Authorization", "in": "header"}},
)
# API分组
user = Apiv1.namespace('user', description='用户')
system = Apiv1.namespace('system', description='系统')
config = Apiv1.namespace('config', description='设置')
site = Apiv1.namespace('site', description='站点')
service = Apiv1.namespace('service', description='服务')
subscribe = Apiv1.namespace('subscribe', description='订阅')
rss = Apiv1.namespace('rss', description='自定义RSS')
recommend = Apiv1.namespace('recommend', description='推荐')
search = Apiv1.namespace('search', description='搜索')
download = Apiv1.namespace('download', description='下载')
organization = Apiv1.namespace('organization', description='整理')
torrentremover = Apiv1.namespace('torrentremover', description='自动删种')
library = Apiv1.namespace('library', description='媒体库')
brushtask = Apiv1.namespace('brushtask', description='刷流')
media = Apiv1.namespace('media', description='媒体')
sync = Apiv1.namespace('sync', description='目录同步')
filterrule = Apiv1.namespace('filterrule', description='过滤规则')
words = Apiv1.namespace('words', description='识别词')
message = Apiv1.namespace('message', description='消息通知')
plugin = Apiv1.namespace('plugin', description='插件')
class ApiResource(Resource):
"""
API 认证
"""
method_decorators = [require_auth]
class ClientResource(Resource):
"""
登录认证
"""
method_decorators = [login_required]
def Failed():
"""
返回失败报名
"""
return {
"code": -1,
"success": False,
"data": {}
}
@user.route('/login')
class UserLogin(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='用户名', location='form', required=True)
parser.add_argument('password', type=str, help='密码', location='form', required=True)
@user.doc(parser=parser)
def post(self):
"""
用户登录
"""
args = self.parser.parse_args()
username = args.get('username')
password = args.get('password')
if not username or not password:
return {"code": 1, "success": False, "message": "用户名或密码错误"}
user_info = ProUser().get_user(username)
if not user_info:
return {"code": 1, "success": False, "message": "用户名或密码错误"}
# 校验密码
if not user_info.verify_password(password):
return {"code": 1, "success": False, "message": "用户名或密码错误"}
# 缓存Token
token = generate_access_token(username)
TokenCache.set(token, token)
return {
"code": 0,
"success": True,
"data": {
"token": token,
"apikey": Config().get_config("security").get("api_key"),
"userinfo": {
"userid": user_info.id,
"username": user_info.username,
"userpris": str(user_info.pris).split(",")
}
}
}
@user.route('/info')
class UserInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, help='用户名', location='form', required=True)
@user.doc(parser=parser)
def post(self):
"""
获取用户信息
"""
args = self.parser.parse_args()
username = args.get('username')
user_info = ProUser().get_user(username)
if not user_info:
return {"code": 1, "success": False, "message": "用户名不正确"}
return {
"code": 0,
"success": True,
"data": {
"userid": user_info.id,
"username": user_info.username,
"userpris": str(user_info.pris).split(",")
}
}
@user.route('/manage')
class UserManage(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('oper', type=str, help='操作类型(add 新增/del删除)', location='form', required=True)
parser.add_argument('name', type=str, help='用户名', location='form', required=True)
parser.add_argument('pris', type=str, help='权限', location='form')
@user.doc(parser=parser)
def post(self):
"""
用户管理
"""
return WebAction().api_action(cmd='user_manager', data=self.parser.parse_args())
@user.route('/list')
class UserList(ClientResource):
@staticmethod
def post():
"""
查询所有用户
"""
return WebAction().api_action(cmd='get_users')
@user.route('/auth')
class UserAuth(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('site', type=str, help='合作站点', location='form', required=True)
parser.add_argument('params', type=str, help='认证参数', location='form', required=True)
@user.doc(parser=parser)
def post(self):
"""
用户认证
"""
return WebAction().api_action(cmd='auth_user_level', data=self.parser.parse_args())
@service.route('/mediainfo')
class ServiceMediaInfo(ApiResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='名称', location='args', required=True)
@service.doc(parser=parser)
def get(self):
"""
识别媒体信息(密钥认证)
"""
return WebAction().api_action(cmd='name_test', data=self.parser.parse_args())
@service.route('/name/test')
class ServiceNameTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='名称', location='form', required=True)
@service.doc(parser=parser)
def post(self):
"""
名称识别测试
"""
return WebAction().api_action(cmd='name_test', data=self.parser.parse_args())
@service.route('/rule/test')
class ServiceRuleTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('title', type=str, help='名称', location='form', required=True)
parser.add_argument('subtitle', type=str, help='描述', location='form')
parser.add_argument('size', type=float, help='大小(GB)', location='form')
@service.doc(parser=parser)
def post(self):
"""
过滤规则测试
"""
return WebAction().api_action(cmd='rule_test', data=self.parser.parse_args())
@service.route('/network/test')
class ServiceNetworkTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, help='URL地址', location='form', required=True)
@service.doc(parser=parser)
def post(self):
"""
网络连接性测试
"""
return WebAction().api_action(cmd='net_test', data=self.parser.parse_args().get("url"))
@service.route('/run')
class ServiceRun(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('item', type=str,
help='服务名称(pttransfer、sync、rssdownload、subscribe_search_all)',
location='form',
required=True)
@service.doc(parser=parser)
def post(self):
"""
运行服务
"""
return WebAction().api_action(cmd='sch', data=self.parser.parse_args())
@site.route('/statistics')
class SiteStatistic(ApiResource):
@staticmethod
def get():
"""
获取站点数据明细(密钥认证)
"""
# 返回站点信息
return {
"code": 0,
"success": True,
"data": {
"user_statistics": WebAction().get_site_user_statistics({"encoding": "DICT"}).get("data")
}
}
@site.route('/sites')
class SiteSites(ApiResource):
@staticmethod
def get():
"""
获取所有站点配置(密钥认证)
"""
return {
"code": 0,
"success": True,
"data": {
"user_sites": Sites().get_sites()
}
}
@site.route('/update')
class SiteUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('site_name', type=str, help='站点名称', location='form', required=True)
parser.add_argument('site_id', type=int, help='更新站点ID', location='form')
parser.add_argument('site_pri', type=str, help='优先级', location='form')
parser.add_argument('site_rssurl', type=str, help='RSS地址', location='form')
parser.add_argument('site_signurl', type=str, help='站点地址', location='form')
parser.add_argument('site_cookie', type=str, help='Cookie', location='form')
parser.add_argument('site_note', type=str, help='站点属性', location='form')
parser.add_argument('site_include', type=str, help='站点用途', location='form')
@site.doc(parser=parser)
def post(self):
"""
新增/删除站点
"""
return WebAction().api_action(cmd='update_site', data=self.parser.parse_args())
@site.route('/info')
class SiteInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
查询单个站点详情
"""
return WebAction().api_action(cmd='get_site', data=self.parser.parse_args())
@site.route('/favicon')
class SiteFavicon(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
获取站点图标(Base64)
"""
return WebAction().api_action(cmd='get_site_favicon', data=self.parser.parse_args())
@site.route('/test')
class SiteTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
测试站点连通性
"""
return WebAction().api_action(cmd='test_site', data=self.parser.parse_args())
@site.route('/delete')
class SiteDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='站点ID', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
删除站点
"""
return WebAction().api_action(cmd='del_site', data=self.parser.parse_args())
@site.route('/cookie/update')
class SiteUpdateCookie(ApiResource):
parser = reqparse.RequestParser()
parser.add_argument('site_id', type=int, help='更新站点ID', location='form')
parser.add_argument('site_cookie', type=str, help='Cookie', location='form')
parser.add_argument('site_ua', type=str, help='Ua', location='form')
@site.doc(parser=parser)
def post(self):
"""
更新站点Cookie和Ua
"""
return WebAction().api_action(cmd='update_site_cookie_ua', data=self.parser.parse_args())
@site.route('/statistics/activity')
class SiteStatisticsActivity(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
查询站点 上传/下载/做种数据
"""
return WebAction().api_action(cmd='get_site_activity', data=self.parser.parse_args())
@site.route('/check')
class SiteCheck(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, help='站点地址', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
检查站点是否支持FREE/HR检测
"""
return WebAction().api_action(cmd='check_site_attr', data=self.parser.parse_args())
@site.route('/statistics/history')
class SiteStatisticsHistory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('days', type=int, help='时间范围(天)', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
查询所有站点历史数据
"""
return WebAction().api_action(cmd='get_site_history', data=self.parser.parse_args())
@site.route('/statistics/seedinfo')
class SiteStatisticsSeedinfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='站点名称', location='form', required=True)
@site.doc(parser=parser)
def post(self):
"""
查询站点做种分布
"""
return WebAction().api_action(cmd='get_site_seeding_info', data=self.parser.parse_args())
@site.route('/resources')
class SiteResources(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='站点索引ID', location='form', required=True)
parser.add_argument('page', type=int, help='页码', location='form')
parser.add_argument('keyword', type=str, help='站点名称', location='form')
@site.doc(parser=parser)
def post(self):
"""
查询站点资源列表
"""
return WebAction().api_action(cmd='list_site_resources', data=self.parser.parse_args())
@site.route('/list')
class SiteList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('basic', type=int, help='只查询基本信息(0-否/1-是)', location='form')
parser.add_argument('rss', type=int, help='订阅(0-否/1-是)', location='form')
parser.add_argument('brush', type=int, help='刷流(0-否/1-是)', location='form')
parser.add_argument('statistic', type=int, help='数据统计(0-否/1-是)', location='form')
def post(self):
"""
查询站点列表
"""
return WebAction().api_action(cmd='get_sites', data=self.parser.parse_args())
@site.route('/indexers')
class SiteIndexers(ClientResource):
@staticmethod
def post():
"""
查询站点索引列表
"""
return WebAction().api_action(cmd='get_indexers')
@search.route('/keyword')
class SearchKeyword(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('search_word', type=str, help='搜索关键字', location='form', required=True)
parser.add_argument('unident', type=int, help='快速模式(0-否/1-是)', location='form')
parser.add_argument('filters', type=str, help='过滤条件', location='form')
parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
parser.add_argument('media_type', type=str, help='类型(电影/电视剧)', location='form')
@search.doc(parser=parser)
def post(self):
"""
根据关键字/TMDBID搜索
"""
return WebAction().api_action(cmd='search', data=self.parser.parse_args())
@search.route('/result')
class SearchResult(ClientResource):
@staticmethod
def post():
"""
查询搜索结果
"""
return WebAction().api_action(cmd='get_search_result')
@download.route('/search')
class DownloadSearch(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='搜索结果ID', location='form', required=True)
parser.add_argument('dir', type=str, help='保存目录', location='form')
parser.add_argument('setting', type=str, help='下载设置', location='form')
@download.doc(parser=parser)
def post(self):
"""
下载搜索结果
"""
return WebAction().api_action(cmd='download', data=self.parser.parse_args())
@download.route('/item')
class DownloadItem(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('enclosure', type=str, help='链接URL', location='form', required=True)
parser.add_argument('title', type=str, help='标题', location='form', required=True)
parser.add_argument('site', type=str, help='站点名称', location='form')
parser.add_argument('description', type=str, help='描述', location='form')
parser.add_argument('page_url', type=str, help='详情页面URL', location='form')
parser.add_argument('size', type=str, help='大小', location='form')
parser.add_argument('seeders', type=str, help='做种数', location='form')
parser.add_argument('uploadvolumefactor', type=float, help='上传因子', location='form')
parser.add_argument('downloadvolumefactor', type=float, help='下载因子', location='form')
parser.add_argument('dl_dir', type=str, help='保存目录', location='form')
@download.doc(parser=parser)
def post(self):
"""
下载链接
"""
return WebAction().api_action(cmd='download_link', data=self.parser.parse_args())
@download.route('/start')
class DownloadStart(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
开始下载任务
"""
return WebAction().api_action(cmd='pt_start', data=self.parser.parse_args())
@download.route('/stop')
class DownloadStop(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
暂停下载任务
"""
return WebAction().api_action(cmd='pt_stop', data=self.parser.parse_args())
@download.route('/info')
class DownloadInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('ids', type=str, help='任务IDS', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
查询下载进度
"""
return WebAction().api_action(cmd='pt_info', data=self.parser.parse_args())
@download.route('/remove')
class DownloadRemove(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='任务ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
删除下载任务
"""
return WebAction().api_action(cmd='pt_remove', data=self.parser.parse_args())
@download.route('/history')
class DownloadHistory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('page', type=str, help='第几页', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
查询下载历史
"""
return WebAction().api_action(cmd='get_downloaded', data=self.parser.parse_args())
@download.route('/now')
class DownloadNow(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='下载器 id', location='form', required=False)
parser.add_argument('force_list', type=bool, help='强制列出所有下载任务', location='form', required=False)
@download.doc(parser=parser)
def post(self):
"""
查询正在下载的任务
"""
return WebAction().api_action(cmd='get_downloading', data=self.parser.parse_args())
@download.route('/config/info')
class DownloadConfigInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
查询下载设置
"""
return WebAction().api_action(cmd='get_download_setting', data=self.parser.parse_args())
@download.route('/config/update')
class DownloadConfigUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('category', type=str, help='分类', location='form')
parser.add_argument('tags', type=str, help='标签', location='form')
parser.add_argument('is_paused', type=int, help='动作(0-添加后开始/1-添加后暂停)', location='form')
parser.add_argument('upload_limit', type=int, help='上传速度限制', location='form')
parser.add_argument('download_limit', type=int, help='下载速度限制', location='form')
parser.add_argument('ratio_limit', type=int, help='分享率限制', location='form')
parser.add_argument('seeding_time_limit', type=int, help='做种时间限制', location='form')
parser.add_argument('downloader', type=str, help='下载器(Qbittorrent/Transmission)', location='form')
@download.doc(parser=parser)
def post(self):
"""
新增/修改下载设置
"""
return WebAction().api_action(cmd='update_download_setting', data=self.parser.parse_args())
@download.route('/config/delete')
class DownloadConfigDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=str, help='下载设置ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
删除下载设置
"""
return WebAction().api_action(cmd='delete_download_setting', data=self.parser.parse_args())
@download.route('/config/list')
class DownloadConfigList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=str, help='ID', location='form')
@download.doc(parser=parser)
def post(self):
"""
查询下载设置
"""
return WebAction().api_action(cmd="get_download_setting", data=self.parser.parse_args())
@download.route('/config/directory')
class DownloadConfigDirectory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=str, help='下载设置ID', location='form')
@download.doc(parser=parser)
def post(self):
"""
查询下载保存目录
"""
return WebAction().api_action(cmd="get_download_dirs", data=self.parser.parse_args())
@download.route('/client/add')
class DownloadClientAdd(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('did', type=str, help='下载器ID', location='form')
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('type', type=str, help='类型(qbittorrent/transmission)', location='form', required=True)
parser.add_argument('enabled', type=str, help='状态(0-停用 1-启动)', location='form', required=True)
parser.add_argument('transfer', type=str, help='监控(0-停用 1-启动)', location='form', required=True)
parser.add_argument('only_nastool', type=str, help='隔离(0-停用 1-启动)', location='form', required=True)
parser.add_argument('rmt_mode', type=str, help='转移方式', location='form', required=True)
parser.add_argument('config', type=str, help='配置数据(JSON)', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
新增/修改下载器
"""
return WebAction().api_action(cmd="update_downloader", data=self.parser.parse_args())
@download.route('/client/delete')
class DownloadClientDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('did', type=str, help='下载器ID', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
删除下载器
"""
return WebAction().api_action(cmd="del_downloader", data=self.parser.parse_args())
@download.route('/client/list')
class DownloadClientList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('did', type=str, help='下载器ID', location='form')
@download.doc(parser=parser)
def post(self):
"""
查询下载器
"""
return WebAction().api_action(cmd="get_downloaders", data=self.parser.parse_args())
@download.route('/client/check')
class DownloadClientCheck(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('did', type=str, help='下载器ID', location='form', required=True)
parser.add_argument('checked', type=str, help='状态(0-关闭 1-开启)', location='form', required=True)
parser.add_argument('flag', type=str, help='标识(enabled transfer only_nastool)', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
设置下载器状态
"""
return WebAction().api_action(cmd="check_downloader", data=self.parser.parse_args())
@download.route('/client/test')
class DownloadClientTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(qbittorrent/transmission)', location='form', required=True)
parser.add_argument('config', type=str, help='配置数据(JSON)', location='form', required=True)
@download.doc(parser=parser)
def post(self):
"""
测试下载器
"""
return WebAction().api_action(cmd="test_downloader", data=self.parser.parse_args())
@organization.route('/unknown/delete')
class UnknownDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='未识别记录ID', location='form', required=True)
@organization.doc(parser=parser)
def post(self):
"""
删除未识别记录
"""
return WebAction().api_action(cmd='del_unknown_path', data=self.parser.parse_args())
@organization.route('/unknown/rename')
class UnknownRename(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('logid', type=str, help='转移历史记录ID', location='form')
parser.add_argument('unknown_id', type=str, help='未识别记录ID', location='form')
parser.add_argument('syncmod', type=str, help='转移模式', location='form', required=True)
parser.add_argument('tmdb', type=int, help='TMDB ID', location='form')
parser.add_argument('title', type=str, help='标题', location='form')
parser.add_argument('year', type=str, help='年份', location='form')
parser.add_argument('type', type=str, help='类型(MOV/TV/ANIME)', location='form')
parser.add_argument('season', type=int, help='季号', location='form')
parser.add_argument('episode_format', type=str, help='集数定位', location='form')
parser.add_argument('min_filesize', type=int, help='最小文件大小', location='form')
@organization.doc(parser=parser)
def post(self):
"""
手动识别
"""
return WebAction().api_action(cmd='rename', data=self.parser.parse_args())
@organization.route('/unknown/renameudf')
class UnknownRenameUDF(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('inpath', type=str, help='源目录', location='form', required=True)
parser.add_argument('outpath', type=str, help='目的目录', location='form', required=True)
parser.add_argument('syncmod', type=str, help='转移模式', location='form', required=True)
parser.add_argument('tmdb', type=int, help='TMDB ID', location='form')
parser.add_argument('title', type=str, help='标题', location='form')
parser.add_argument('year', type=str, help='年份', location='form')
parser.add_argument('type', type=str, help='类型(MOV/TV/ANIME)', location='form')
parser.add_argument('season', type=int, help='季号', location='form')
parser.add_argument('episode_format', type=str, help='集数定位', location='form')
parser.add_argument('episode_details', type=str, help='集数范围', location='form')
parser.add_argument('episode_offset', type=str, help='集数偏移', location='form')
parser.add_argument('min_filesize', type=int, help='最小文件大小', location='form')
@organization.doc(parser=parser)
def post(self):
"""
自定义识别
"""
return WebAction().api_action(cmd='rename_udf', data=self.parser.parse_args())
@organization.route('/unknown/redo')
class UnknownRedo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('flag', type=str, help='类型(unknow/history)', location='form', required=True)
parser.add_argument('ids', type=list, help='记录ID', location='form', required=True)
@organization.doc(parser=parser)
def post(self):
"""
重新识别
"""
return WebAction().api_action(cmd='re_identification', data=self.parser.parse_args())
@organization.route('/history/delete')
class TransferHistoryDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('logids', type=list, help='记录IDS', location='form', required=True)
@organization.doc(parser=parser)
def post(self):
"""
删除媒体整理历史记录
"""
return WebAction().api_action(cmd='delete_history', data=self.parser.parse_args())
@organization.route('/unknown/list')
class TransferUnknownList(ClientResource):
@staticmethod
def post():
"""
查询所有未识别记录
"""
return WebAction().api_action(cmd='get_unknown_list')
@organization.route('/history/list')
class TransferHistoryList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('page', type=int, help='页码', location='form', required=True)
parser.add_argument('pagenum', type=int, help='每页条数', location='form', required=True)
parser.add_argument('keyword', type=str, help='过滤关键字', location='form')
@organization.doc(parser=parser)
def post(self):
"""
查询媒体整理历史记录
"""
return WebAction().api_action(cmd='get_transfer_history', data=self.parser.parse_args())
@organization.route('/history/statistics')
class HistoryStatistics(ClientResource):
@staticmethod
def post():
"""
查询转移历史统计数据
"""
return WebAction().api_action(cmd='get_transfer_statistics')
@organization.route('/cache/empty')
class TransferCacheEmpty(ClientResource):
@staticmethod
def post():
"""
清空文件转移缓存
"""
return WebAction().api_action(cmd='truncate_blacklist')
@library.route('/sync/start')
class LibrarySyncStart(ClientResource):
@staticmethod
def post():
"""
开始媒体库同步
"""
return WebAction().api_action(cmd='start_mediasync')
@library.route('/sync/status')
class LibrarySyncStatus(ClientResource):
@staticmethod
def post():
"""
查询媒体库同步状态
"""
return WebAction().api_action(cmd='mediasync_state')
@library.route('/mediaserver/playhistory')
class LibraryPlayHistory(ClientResource):
@staticmethod
def post():
"""
查询媒体库播放历史
"""
return WebAction().api_action(cmd='get_library_playhistory')
@library.route('/mediaserver/resume')
class LibraryResume(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('num', type=int, help='返回记录数', location='form', required=True)
@library.doc(parser=parser)
def post(self):
"""
查询媒体库继续观看列表
"""
return WebAction().api_action(cmd='get_library_resume', data=self.parser.parse_args())
@library.route('/mediaserver/statistics')
class LibraryStatistics(ClientResource):
@staticmethod
def post():
"""
查询媒体库统计数据
"""
return WebAction().api_action(cmd="get_library_mediacount")
@library.route('/space')
class LibrarySpace(ClientResource):
@staticmethod
def post():
"""
查询媒体库存储空间
"""
return WebAction().api_action(cmd='get_library_spacesize')
@system.route('/version')
class SystemVersion(ClientResource):
@staticmethod
def post():
"""
查询最新版本号
"""
return WebAction().api_action(cmd='version')
@system.route('/path')
class SystemPath(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('dir', type=str, help='路径', location='form', required=True)
parser.add_argument('filter', type=str,
help='过滤器(ONLYFILE/ONLYDIR/MEDIAFILE/SUBFILE/AUDIOTRACKFILE/ALL)',
location='form',
required=True)
@system.doc(parser=parser)
def post(self):
"""
查询目录的子目录/文件
"""
return WebAction().api_action(cmd='get_sub_path', data=self.parser.parse_args())
@system.route('/restart')
class SystemRestart(ClientResource):
@staticmethod
def post():
"""
重启
"""
return WebAction().api_action(cmd='restart')
@system.route('/update')
class SystemUpdate(ClientResource):
@staticmethod
def post():
"""
升级
"""
return WebAction().api_action(cmd='update_system')
@system.route('/logout')
class SystemUpdate(ClientResource):
@staticmethod
def post():
"""
注销
"""
token = request.headers.get("Authorization", default=None)
if token:
TokenCache.delete(token)
return {
"code": 0,
"success": True
}
@system.route('/progress')
class SystemProgress(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(ProgressKey)', location='form', required=True)
@system.doc(parser=parser)
def post(self):
"""
查询搜索/媒体同步等进度
"""
return WebAction().api_action(cmd='refresh_process', data=self.parser.parse_args())
@config.route('/update')
class ConfigUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('items', type=dict, help='配置项', location='form', required=True)
@config.doc(parser=parser)
def post(self):
"""
新增/修改配置
"""
return WebAction().api_action(cmd='update_config', data=self.parser.parse_args().get("items"))
@config.route('/test')
class ConfigTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('command', type=str, help='测试命令', location='form', required=True)
@config.doc(parser=parser)
def post(self):
"""
测试配置连通性
"""
return WebAction().api_action(cmd='test_connection', data=self.parser.parse_args())
@config.route('/restore')
class ConfigRestore(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('file_name', type=str, help='备份文件名', location='form', required=True)
@config.doc(parser=parser)
def post(self):
"""
恢复备份的配置
"""
return WebAction().api_action(cmd='restory_backup', data=self.parser.parse_args())
@config.route('/info')
class ConfigInfo(ClientResource):
@staticmethod
def post():
"""
获取所有配置文件信息
"""
return {
"code": 0,
"success": True,
"data": Config().get_config()
}
@config.route('/directory')
class ConfigDirectory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('oper', type=str, help='操作类型(add/sub/set)', location='form', required=True)
parser.add_argument('key', type=str, help='配置项', location='form', required=True)
parser.add_argument('value', type=str, help='配置值', location='form', required=True)
@config.doc(parser=parser)
def post(self):
"""
配置媒体库目录
"""
return WebAction().api_action(cmd='update_directory', data=self.parser.parse_args())
@config.route('/set')
class ConfigSet(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('key', type=str, help='配置项', location='form', required=True)
parser.add_argument('value', type=str, help='配置值', location='form', required=True)
@config.doc(parser=parser)
def post(self):
"""
保存系统配置值
"""
return WebAction().api_action(cmd='set_system_config', data=self.parser.parse_args())
@subscribe.route('/delete')
class SubscribeDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='名称', location='form')
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form')
parser.add_argument('year', type=str, help='发行年份', location='form')
parser.add_argument('season', type=int, help='季号', location='form')
parser.add_argument('rssid', type=int, help='已有订阅ID', location='form')
parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
@subscribe.doc(parser=parser)
def post(self):
"""
删除订阅
"""
return WebAction().api_action(cmd='remove_rss_media', data=self.parser.parse_args())
@subscribe.route('/add')
class SubscribeAdd(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('year', type=str, help='发行年份', location='form')
parser.add_argument('keyword', type=str, help='自定义搜索词', location='form')
parser.add_argument('season', type=int, help='季号', location='form')
parser.add_argument('rssid', type=int, help='已有订阅ID', location='form')
parser.add_argument('mediaid', type=str, help='TMDBID/DB:豆瓣ID', location='form')
parser.add_argument('fuzzy_match', type=int, help='模糊匹配(0-否/1-是)', location='form')
parser.add_argument('rss_sites', type=str, help='RSS站点(,号分隔)', location='form')
parser.add_argument('search_sites', type=str, help='搜索站点(,号分隔)', location='form')
parser.add_argument('over_edition', type=int, help='洗版(0-否/1-是)', location='form')
parser.add_argument('filter_restype', type=str, help='资源类型', location='form')
parser.add_argument('filter_pix', type=str, help='分辨率', location='form')
parser.add_argument('filter_team', type=str, help='字幕组/发布组', location='form')
parser.add_argument('filter_rule', type=int, help='过滤规则', location='form')
parser.add_argument('download_setting', type=int, help='下载设置', location='form')
parser.add_argument('save_path', type=str, help='保存路径', location='form')
parser.add_argument('total_ep', type=int, help='总集数', location='form')
parser.add_argument('current_ep', type=int, help='开始集数', location='form')
@subscribe.doc(parser=parser)
def post(self):
"""
新增/修改订阅
"""
return WebAction().api_action(cmd='add_rss_media', data=self.parser.parse_args())
@subscribe.route('/movie/date')
class SubscribeMovieDate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
电影上映日期
"""
return WebAction().api_action(cmd='movie_calendar_data', data=self.parser.parse_args())
@subscribe.route('/tv/date')
class SubscribeTVDate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form', required=True)
parser.add_argument('season', type=int, help='季号', location='form', required=True)
parser.add_argument('name', type=str, help='名称', location='form')
@subscribe.doc(parser=parser)
def post(self):
"""
电视剧上映日期
"""
return WebAction().api_action(cmd='tv_calendar_data', data=self.parser.parse_args())
@subscribe.route('/search')
class SubscribeSearch(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
订阅刷新搜索
"""
return WebAction().api_action(cmd='refresh_rss', data=self.parser.parse_args())
@subscribe.route('/info')
class SubscribeInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
parser.add_argument('type', type=str, help='订阅类型(MOV/TV)', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
订阅详情
"""
return WebAction().api_action(cmd='rss_detail', data=self.parser.parse_args())
@subscribe.route('/redo')
class SubscribeRedo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('rssid', type=int, help='订阅历史ID', location='form', required=True)
parser.add_argument('type', type=str, help='订阅类型(MOV/TV)', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
历史重新订阅
"""
return WebAction().api_action(cmd='re_rss_history', data=self.parser.parse_args())
@subscribe.route('/history/delete')
class SubscribeHistoryDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('rssid', type=int, help='订阅ID', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
删除订阅历史
"""
return WebAction().api_action(cmd='delete_rss_history', data=self.parser.parse_args())
@subscribe.route('/history')
class SubscribeHistory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
@subscribe.doc(parser=parser)
def post(self):
"""
查询订阅历史
"""
return WebAction().api_action(cmd='get_rss_history', data=self.parser.parse_args())
@subscribe.route('/cache/delete')
class SubscribeCacheDelete(ClientResource):
@staticmethod
def post():
"""
清理订阅缓存
"""
return WebAction().api_action(cmd='truncate_rsshistory')
@subscribe.route('/movie/list')
class SubscribeMovieList(ClientResource):
@staticmethod
def post():
"""
查询所有电影订阅
"""
return WebAction().api_action(cmd='get_movie_rss_list')
@subscribe.route('/tv/list')
class SubscribeTvList(ClientResource):
@staticmethod
def post():
"""
查询所有电视剧订阅
"""
return WebAction().api_action(cmd='get_tv_rss_list')
@recommend.route('/list')
class RecommendList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型', location='form', required=True)
parser.add_argument('subtype', type=str, help='子类型', location='form', required=True)
parser.add_argument('page', type=int, help='页码', location='form', required=True)
@recommend.doc(parser=parser)
def post(self):
"""
推荐列表
"""
return WebAction().api_action(cmd='get_recommend', data=self.parser.parse_args())
@rss.route('/info')
class RssInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅任务详情
"""
return WebAction().api_action(cmd='get_userrss_task', data=self.parser.parse_args())
@rss.route('/delete')
class RssDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
删除自定义订阅任务
"""
return WebAction().api_action(cmd='delete_userrss_task', data=self.parser.parse_args())
@rss.route('/update')
class RssUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='任务ID', location='form')
parser.add_argument('name', type=str, help='任务名称', location='form', required=True)
parser.add_argument('address', type=str, help='RSS地址', location='form', required=True)
parser.add_argument('parser', type=int, help='解析器ID', location='form', required=True)
parser.add_argument('interval', type=int, help='刷新间隔(分钟)', location='form', required=True)
parser.add_argument('uses', type=str, help='动作', location='form', required=True)
parser.add_argument('state', type=str, help='状态(Y/N)', location='form', required=True)
parser.add_argument('include', type=str, help='包含', location='form')
parser.add_argument('exclude', type=str, help='排除', location='form')
parser.add_argument('filterrule', type=int, help='过滤规则', location='form')
parser.add_argument('note', type=str, help='备注', location='form')
@rss.doc(parser=parser)
def post(self):
"""
新增/修改自定义订阅任务
"""
return WebAction().api_action(cmd='update_userrss_task', data=self.parser.parse_args())
@rss.route('/parser/info')
class RssParserInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
解析器详情
"""
return WebAction().api_action(cmd='get_rssparser', data=self.parser.parse_args())
@rss.route('/parser/delete')
class RssParserDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
删除解析器
"""
return WebAction().api_action(cmd='delete_rssparser', data=self.parser.parse_args())
@rss.route('/parser/update')
class RssParserUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='解析器ID', location='form', required=True)
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('type', type=str, help='类型(JSON/XML)', location='form', required=True)
parser.add_argument('format', type=str, help='解析格式', location='form', required=True)
parser.add_argument('params', type=str, help='附加参数', location='form')
@rss.doc(parser=parser)
def post(self):
"""
新增/修改解析器
"""
return WebAction().api_action(cmd='update_rssparser', data=self.parser.parse_args())
@rss.route('/parser/list')
class RssParserList(ClientResource):
@staticmethod
def post():
"""
查询所有解析器
"""
return {
"code": 0,
"success": True,
"data": {
"parsers": RssChecker().get_userrss_parser()
}
}
@rss.route('/list')
class RssList(ClientResource):
@staticmethod
def post():
"""
查询所有自定义订阅任务
"""
return {
"code": 0,
"success": False,
"data": {
"tasks": RssChecker().get_rsstask_info(),
"parsers": RssChecker().get_userrss_parser()
}
}
@rss.route('/preview')
class RssPreview(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅预览
"""
return WebAction().api_action(cmd='list_rss_articles', data=self.parser.parse_args())
@rss.route('/name/test')
class RssNameTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('taskid', type=int, help='任务ID', location='form', required=True)
parser.add_argument('title', type=str, help='名称', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅名称测试
"""
return WebAction().api_action(cmd='rss_article_test', data=self.parser.parse_args())
@rss.route('/item/history')
class RssItemHistory(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='任务ID', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅任务条目处理记录
"""
return WebAction().api_action(cmd='list_rss_history', data=self.parser.parse_args())
@rss.route('/item/set')
class RssItemSet(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('flag', type=str, help='操作类型(set_finished/set_unfinish)', location='form', required=True)
parser.add_argument('articles', type=list, help='条目({title/enclosure})', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅任务条目状态调整
"""
return WebAction().api_action(cmd='rss_articles_check', data=self.parser.parse_args())
@rss.route('/item/download')
class RssItemDownload(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('taskid', type=int, help='任务ID', location='form', required=True)
parser.add_argument('articles', type=list, help='条目({title/enclosure})', location='form', required=True)
@rss.doc(parser=parser)
def post(self):
"""
自定义订阅任务条目下载
"""
return WebAction().api_action(cmd='rss_articles_download', data=self.parser.parse_args())
@media.route('/search')
class MediaSearch(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('keyword', type=str, help='关键字', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
搜索TMDB/豆瓣词条
"""
return WebAction().api_action(cmd='search_media_infos', data=self.parser.parse_args())
@media.route('/cache/update')
class MediaCacheUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('key', type=str, help='缓存Key值', location='form', required=True)
parser.add_argument('title', type=str, help='标题', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
修改TMDB缓存标题
"""
return WebAction().api_action(cmd='modify_tmdb_cache', data=self.parser.parse_args())
@media.route('/cache/delete')
class MediaCacheDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('cache_key', type=str, help='缓存Key值', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
删除TMDB缓存
"""
return WebAction().api_action(cmd='delete_tmdb_cache', data=self.parser.parse_args())
@media.route('/cache/clear')
class MediaCacheClear(ClientResource):
@staticmethod
def post():
"""
清空TMDB缓存
"""
return WebAction().api_action(cmd='clear_tmdb_cache')
@media.route('/tv/seasons')
class MediaTvSeasons(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('tmdbid', type=str, help='TMDBID', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
查询电视剧季列表
"""
return WebAction().api_action(cmd='get_tvseason_list', data=self.parser.parse_args())
@media.route('/category/list')
class MediaCategoryList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(电影/电视剧/动漫)', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
查询二级分类配置
"""
return WebAction().api_action(cmd='get_categories', data=self.parser.parse_args())
@media.route('/info')
class MediaInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('id', type=str, help='TMDBID/DB:豆瓣ID', location='form')
parser.add_argument('title', type=str, help='标题', location='form')
parser.add_argument('year', type=str, help='年份', location='form')
parser.add_argument('rssid', type=str, help='订阅ID', location='form')
@media.doc(parser=parser)
def post(self):
"""
识别媒体信息
"""
return WebAction().api_action(cmd='media_info', data=self.parser.parse_args())
@media.route('/detail')
class MediaDetail(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('tmdbid', type=str, help='TMDBID/DB:豆瓣ID', location='form')
@media.doc(parser=parser)
def post(self):
"""
查询TMDB媒体详情
"""
return WebAction().api_action(cmd='media_detail', data=self.parser.parse_args())
@media.route('/similar')
class MediaSimilar(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
parser.add_argument('page', type=int, help='页码', location='form')
@media.doc(parser=parser)
def post(self):
"""
根据TMDBID查询类似媒体
"""
return WebAction().api_action(cmd='media_similar', data=self.parser.parse_args())
@media.route('/recommendations')
class MediaRecommendations(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('tmdbid', type=str, help='TMDBID', location='form')
parser.add_argument('page', type=int, help='页码', location='form')
@media.doc(parser=parser)
def post(self):
"""
根据TMDBID查询推荐媒体
"""
return WebAction().api_action(cmd='media_recommendations', data=self.parser.parse_args())
@media.route('/person')
class MediaPersonList(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(MOV/TV)', location='form', required=True)
parser.add_argument('personid', type=str, help='演员ID', location='form')
parser.add_argument('page', type=int, help='页码', location='form')
@media.doc(parser=parser)
def post(self):
"""
查询TMDB演员参演作品
"""
return WebAction().api_action(cmd='person_medias', data=self.parser.parse_args())
@media.route('/subtitle/download')
class MediaSubtitleDownload(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('path', type=str, help='文件路径(含文件名)', location='form', required=True)
parser.add_argument('name', type=str, help='名称(用于识别)', location='form', required=True)
@media.doc(parser=parser)
def post(self):
"""
下载单个文件字幕
"""
return WebAction().api_action(cmd='download_subtitle', data=self.parser.parse_args())
@brushtask.route('/update')
class BrushTaskUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('brushtask_id', type=str, help='刷流任务ID', location='form')
parser.add_argument('brushtask_name', type=str, help='任务名称', location='form', required=True)
parser.add_argument('brushtask_site', type=int, help='站点', location='form', required=True)
parser.add_argument('brushtask_interval', type=int, help='刷新间隔(分钟)', location='form', required=True)
parser.add_argument('brushtask_downloader', type=int, help='下载器', location='form', required=True)
parser.add_argument('brushtask_totalsize', type=int, help='保种体积(GB)', location='form', required=True)
parser.add_argument('brushtask_savepath', type=str, help='保存目录', location='form')
parser.add_argument('brushtask_label', type=str, help='标签', location='form')
parser.add_argument('brushtask_rssurl', type=str, help='RSS地址', location='form')
parser.add_argument('brushtask_state', type=str, help='状态(Y/N)', location='form', required=True)
parser.add_argument('brushtask_transfer', type=str, help='转移到媒体库(Y/N)', location='form')
parser.add_argument('brushtask_sendmessage', type=str, help='消息推送(Y/N)', location='form')
parser.add_argument('brushtask_free', type=str, help='促销(FREE/2XFREE)', location='form')
parser.add_argument('brushtask_hr', type=str, help='Hit&Run(HR)', location='form')
parser.add_argument('brushtask_torrent_size', type=int, help='种子大小(GB)', location='form')
parser.add_argument('brushtask_include', type=str, help='包含', location='form')
parser.add_argument('brushtask_exclude', type=str, help='排除', location='form')
parser.add_argument('brushtask_dlcount', type=int, help='同时下载任务数', location='form')
parser.add_argument('brushtask_current_site_count', type=int, help='当前站点任务总数', location='form')
parser.add_argument('brushtask_current_site_dlcount', type=int, help='当前站点下载任务数', location='form')
parser.add_argument('brushtask_peercount', type=int, help='做种人数限制', location='form')
parser.add_argument('brushtask_seedtime', type=float, help='做种时间(小时)', location='form')
parser.add_argument('brushtask_seedratio', type=float, help='分享率', location='form')
parser.add_argument('brushtask_seedsize', type=int, help='上传量(GB)', location='form')
parser.add_argument('brushtask_dltime', type=float, help='下载耗时(小时)', location='form')
parser.add_argument('brushtask_avg_upspeed', type=int, help='平均上传速度(KB/S)', location='form')
parser.add_argument('brushtask_iatime', type=float, help='未活动时间(小时)', location='form')
parser.add_argument('brushtask_pubdate', type=int, help='发布时间(小时)', location='form')
parser.add_argument('brushtask_upspeed', type=int, help='上传限速(KB/S)', location='form')
parser.add_argument('brushtask_downspeed', type=int, help='下载限速(KB/S)', location='form')
@brushtask.doc(parser=parser)
def post(self):
"""
新增/修改刷流任务
"""
return WebAction().api_action(cmd='add_brushtask', data=self.parser.parse_args())
@brushtask.route('/delete')
class BrushTaskDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
@brushtask.doc(parser=parser)
def post(self):
"""
删除刷流任务
"""
return WebAction().api_action(cmd='del_brushtask', data=self.parser.parse_args())
@brushtask.route('/info')
class BrushTaskInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
@brushtask.doc(parser=parser)
def post(self):
"""
刷流任务详情
"""
return WebAction().api_action(cmd='brushtask_detail', data=self.parser.parse_args())
@brushtask.route('/list')
class BrushTaskList(ClientResource):
@staticmethod
def post():
"""
查询所有刷流任务
"""
return {
"code": 0,
"success": True,
"data": {
"tasks": BrushTask().get_brushtask_info()
}
}
@brushtask.route('/torrents')
class BrushTaskTorrents(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='刷流任务ID', location='form', required=True)
@brushtask.doc(parser=parser)
def post(self):
"""
查询刷流任务种子明细
"""
return WebAction().api_action(cmd='list_brushtask_torrents', data=self.parser.parse_args())
@brushtask.route('/run')
class BrushTaskRun(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='刷流任务ID', location='form', required=True)
@brushtask.doc(parser=parser)
def post(self):
"""
立即运行刷流任务
"""
return WebAction().api_action(cmd='run_brushtask', data=self.parser.parse_args())
@filterrule.route('/list')
class FilterRuleList(ClientResource):
@staticmethod
def post():
"""
查询所有过滤规则
"""
return WebAction().api_action(cmd='get_filterrules')
@filterrule.route('/group/add')
class FilterRuleGroupAdd(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('default', type=str, help='默认(Y/N)', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
新增规则组
"""
return WebAction().api_action(cmd='add_filtergroup', data=self.parser.parse_args())
@filterrule.route('/group/restore')
class FilterRuleGroupRestore(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('groupids', type=list, help='规则组ID', location='form', required=True)
parser.add_argument('init_rulegroups', type=list, help='规则组脚本', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
恢复默认规则组
"""
return WebAction().api_action(cmd='restore_filtergroup', data=self.parser.parse_args())
@filterrule.route('/group/default')
class FilterRuleGroupDefault(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='规则组ID', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
设置默认规则组
"""
return WebAction().api_action(cmd='set_default_filtergroup', data=self.parser.parse_args())
@filterrule.route('/group/delete')
class FilterRuleGroupDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=str, help='规则组ID', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
删除规则组
"""
return WebAction().api_action(cmd='del_filtergroup', data=self.parser.parse_args())
@filterrule.route('/rule/update')
class FilterRuleUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('rule_id', type=int, help='规则ID', location='form')
parser.add_argument('group_id', type=int, help='规则组ID', location='form', required=True)
parser.add_argument('rule_name', type=str, help='规则名称', location='form', required=True)
parser.add_argument('rule_pri', type=str, help='优先级', location='form', required=True)
parser.add_argument('rule_include', type=str, help='包含', location='form')
parser.add_argument('rule_exclude', type=str, help='排除', location='form')
parser.add_argument('rule_sizelimit', type=str, help='大小限制', location='form')
parser.add_argument('rule_free', type=str, help='促销(FREE/2XFREE)', location='form')
@filterrule.doc(parser=parser)
def post(self):
"""
新增/修改规则
"""
return WebAction().api_action(cmd='add_filterrule', data=self.parser.parse_args())
@filterrule.route('/rule/delete')
class FilterRuleDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='规则ID', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
删除规则
"""
return WebAction().api_action(cmd='del_filterrule', data=self.parser.parse_args())
@filterrule.route('/rule/info')
class FilterRuleInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('ruleid', type=int, help='规则ID', location='form', required=True)
parser.add_argument('groupid', type=int, help='规则组ID', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
规则详情
"""
return WebAction().api_action(cmd='filterrule_detail', data=self.parser.parse_args())
@filterrule.route('/rule/share')
class FilterRuleShare(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='规则组ID', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
分享规则组
"""
return WebAction().api_action(cmd='share_filtergroup', data=self.parser.parse_args())
@filterrule.route('/rule/import')
class FilterRuleImport(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('content', type=str, help='规则内容', location='form', required=True)
@filterrule.doc(parser=parser)
def post(self):
"""
导入规则组
"""
return WebAction().api_action(cmd='import_filtergroup', data=self.parser.parse_args())
@words.route('/group/add')
class WordsGroupAdd(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('tmdb_id', type=str, help='TMDBID', location='form', required=True)
parser.add_argument('tmdb_type', type=str, help='类型(movie/tv)', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
新增识别词组
"""
return WebAction().api_action(cmd='add_custom_word_group', data=self.parser.parse_args())
@words.route('/group/delete')
class WordsGroupDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('gid', type=int, help='识别词组ID', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
删除识别词组
"""
return WebAction().api_action(cmd='delete_custom_word_group', data=self.parser.parse_args())
@words.route('/item/update')
class WordItemUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='识别词ID', location='form', required=True)
parser.add_argument('gid', type=int, help='识别词组ID', location='form', required=True)
parser.add_argument('group_type', type=str, help='媒体类型(1-电影/2-电视剧)', location='form', required=True)
parser.add_argument('new_replaced', type=str, help='被替换词', location='form')
parser.add_argument('new_replace', type=str, help='替换词', location='form')
parser.add_argument('new_front', type=str, help='前定位词', location='form')
parser.add_argument('new_back', type=str, help='后定位词', location='form')
parser.add_argument('new_offset', type=str, help='偏移集数', location='form')
parser.add_argument('new_help', type=str, help='备注', location='form')
parser.add_argument('type', type=str, help='识别词类型(1-屏蔽/2-替换/3-替换+集偏移/4-集偏移)', location='form',
required=True)
parser.add_argument('season', type=str, help='季', location='form')
parser.add_argument('enabled', type=str, help='状态(1-启用/0-停用)', location='form', required=True)
parser.add_argument('regex', type=str, help='正则表达式(1-使用/0-不使用)', location='form')
@words.doc(parser=parser)
def post(self):
"""
新增/修改识别词
"""
return WebAction().api_action(cmd='add_or_edit_custom_word', data=self.parser.parse_args())
@words.route('/item/info')
class WordItemInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('wid', type=int, help='识别词ID', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
识别词详情
"""
return WebAction().api_action(cmd='get_custom_word', data=self.parser.parse_args())
@words.route('/item/delete')
class WordItemDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='识别词ID', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
删除识别词
"""
return WebAction().api_action(cmd='delete_custom_word', data=self.parser.parse_args())
@words.route('/item/status')
class WordItemStatus(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('ids_info', type=list, help='识别词IDS', location='form', required=True)
parser.add_argument('flag', type=int, help='状态(1/0)', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
设置识别词状态
"""
return WebAction().api_action(cmd='check_custom_words', data=self.parser.parse_args())
@words.route('/item/export')
class WordItemExport(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('note', type=str, help='备注', location='form', required=True)
parser.add_argument('ids_info', type=str, help='识别词IDS(@_)', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
导出识别词
"""
return WebAction().api_action(cmd='export_custom_words', data=self.parser.parse_args())
@words.route('/item/analyse')
class WordItemAnalyse(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('import_code', type=str, help='识别词代码', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
分析识别词
"""
return WebAction().api_action(cmd='analyse_import_custom_words_code', data=self.parser.parse_args())
@words.route('/item/import')
class WordItemImport(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('import_code', type=str, help='识别词代码', location='form', required=True)
parser.add_argument('ids_info', type=list, help='识别词IDS', location='form', required=True)
@words.doc(parser=parser)
def post(self):
"""
导入识别词
"""
return WebAction().api_action(cmd='import_custom_words', data=self.parser.parse_args())
@words.route('/list')
class WordList(ClientResource):
@staticmethod
def post():
"""
查询所有自定义识别词
"""
return WebAction().api_action(cmd='get_customwords')
@sync.route('/directory/update')
class SyncDirectoryUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=int, help='同步目录ID', location='form')
parser.add_argument('from', type=str, help='源目录', location='form', required=True)
parser.add_argument('to', type=str, help='目的目录', location='form')
parser.add_argument('unknown', type=str, help='未知目录', location='form')
parser.add_argument('syncmod', type=str, help='同步模式', location='form')
parser.add_argument('compatibility', type=str, help='兼容模式', location='form')
parser.add_argument('rename', type=str, help='重命名', location='form')
parser.add_argument('enabled', type=str, help='开启', location='form')
@sync.doc(parser=parser)
def post(self):
"""
新增/修改同步目录
"""
return WebAction().api_action(cmd='add_or_edit_sync_path', data=self.parser.parse_args())
@sync.route('/directory/info')
class SyncDirectoryInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
@sync.doc(parser=parser)
def post(self):
"""
同步目录详情
"""
return WebAction().api_action(cmd='get_sync_path', data=self.parser.parse_args())
@sync.route('/directory/delete')
class SyncDirectoryDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
@sync.doc(parser=parser)
def post(self):
"""
删除同步目录
"""
return WebAction().api_action(cmd='delete_sync_path', data=self.parser.parse_args())
@sync.route('/directory/status')
class SyncDirectoryStatus(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=int, help='同步目录ID', location='form', required=True)
parser.add_argument('flag', type=str, help='操作(rename/enable)', location='form', required=True)
parser.add_argument('checked', type=int, help='状态(0-否/1-是)', location='form', required=True)
@sync.doc(parser=parser)
def post(self):
"""
设置同步目录状态
"""
return WebAction().api_action(cmd='check_sync_path', data=self.parser.parse_args())
@sync.route('/directory/list')
class SyncDirectoryList(ClientResource):
@staticmethod
def post():
"""
查询所有同步目录
"""
return WebAction().api_action(cmd='get_sync_path')
@sync.route('/directory/run')
class SyncDirectoryRun(ApiResource):
parser = reqparse.RequestParser()
parser.add_argument('sid', type=int, help='同步目录ID', location='args', required=True)
@sync.doc(parser=parser)
def get(self):
"""
立即运行单个目录同步服务(密钥认证)
"""
return WebAction().api_action(cmd='run_directory_sync', data=self.parser.parse_args())
@sync.route('/run')
class SyncRun(ApiResource):
@staticmethod
def get():
"""
立即运行所有目录同步服务(密钥认证)
"""
return WebAction().api_action(cmd='sch', data={"item": "sync"})
@sync.route('/file/hardlinks')
class SystemFileHardlinks(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('filepath', type=str, help='路径', location='form', required=True)
@system.doc(parser=parser)
def post(self):
"""
查询文件的硬链接
"""
return WebAction().api_action(cmd='get_filehardlinks', data=self.parser.parse_args())
@sync.route('/directory/hardlink')
class SystemDirectoryHardlink(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('dirpath', type=str, help='路径', location='form', required=True)
@system.doc(parser=parser)
def post(self):
"""
查询目录的硬链接
"""
return WebAction().api_action(cmd='get_dirhardlink', data=self.parser.parse_args())
@message.route('/client/update')
class MessageClientUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('cid', type=int, help='ID', location='form')
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('type', type=str, help='类型(wechat/telegram/serverchan/bark/pushplus/iyuu/slack/gotify)',
location='form', required=True)
parser.add_argument('config', type=str, help='配置项(JSON)', location='form', required=True)
parser.add_argument('switchs', type=list, help='开关', location='form', required=True)
parser.add_argument('interactive', type=int, help='是否开启交互(0/1)', location='form', required=True)
parser.add_argument('enabled', type=int, help='是否启用(0/1)', location='form', required=True)
@message.doc(parser=parser)
def post(self):
"""
新增/修改通知消息服务渠道
"""
return WebAction().api_action(cmd='update_message_client', data=self.parser.parse_args())
@message.route('/client/delete')
class MessageClientDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('cid', type=int, help='ID', location='form', required=True)
@message.doc(parser=parser)
def post(self):
"""
删除通知消息服务渠道
"""
return WebAction().api_action(cmd='delete_message_client', data=self.parser.parse_args())
@message.route('/client/status')
class MessageClientStatus(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('flag', type=str, help='操作类型(interactive/enable)', location='form', required=True)
parser.add_argument('cid', type=int, help='ID', location='form', required=True)
@message.doc(parser=parser)
def post(self):
"""
设置通知消息服务渠道状态
"""
return WebAction().api_action(cmd='check_message_client', data=self.parser.parse_args())
@message.route('/client/info')
class MessageClientInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('cid', type=int, help='ID', location='form', required=True)
@message.doc(parser=parser)
def post(self):
"""
查询通知消息服务渠道设置
"""
return WebAction().api_action(cmd='get_message_client', data=self.parser.parse_args())
@message.route('/client/test')
class MessageClientTest(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('type', type=str, help='类型(wechat/telegram/serverchan/bark/pushplus/iyuu/slack/gotify)',
location='form', required=True)
parser.add_argument('config', type=str, help='配置(JSON)', location='form', required=True)
@message.doc(parser=parser)
def post(self):
"""
测试通知消息服务配置正确性
"""
return WebAction().api_action(cmd='test_message_client', data=self.parser.parse_args())
@torrentremover.route('/task/info')
class TorrentRemoverTaskInfo(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('tid', type=int, help='任务ID', location='form', required=True)
@torrentremover.doc(parser=parser)
def post(self):
"""
查询自动删种任务详情
"""
return WebAction().api_action(cmd='get_torrent_remove_task', data=self.parser.parse_args())
@torrentremover.route('/task/list')
class TorrentRemoverTaskList(ClientResource):
@staticmethod
@torrentremover.doc()
def post():
"""
查询所有自动删种任务
"""
return WebAction().api_action(cmd='get_torrent_remove_task')
@torrentremover.route('/task/delete')
class TorrentRemoverTaskDelete(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('tid', type=int, help='任务ID', location='form', required=True)
@torrentremover.doc(parser=parser)
def post(self):
"""
删除自动删种任务
"""
return WebAction().api_action(cmd='delete_torrent_remove_task', data=self.parser.parse_args())
@torrentremover.route('/task/update')
class TorrentRemoverTaskUpdate(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('tid', type=int, help='任务ID', location='form')
parser.add_argument('name', type=str, help='名称', location='form', required=True)
parser.add_argument('action', type=int, help='动作(1-暂停/2-删除种子/3-删除种子及文件)', location='form',
required=True)
parser.add_argument('interval', type=int, help='运行间隔(分钟)', location='form', required=True)
parser.add_argument('enabled', type=int, help='状态(0-停用/1-启用)', location='form', required=True)
parser.add_argument('samedata', type=int, help='处理辅种(0-否/1-是)', location='form', required=True)
parser.add_argument('onlynastool', type=int, help='只管理NAStool添加的下载(0-否/1-是)', location='form',
required=True)
parser.add_argument('ratio', type=float, help='分享率', location='form')
parser.add_argument('seeding_time', type=int, help='做种时间(小时)', location='form')
parser.add_argument('upload_avs', type=int, help='平均上传速度(KB/S)', location='form')
parser.add_argument('size', type=str, help='种子大小(GB)', location='form')
parser.add_argument('savepath_key', type=str, help='保存路径关键词', location='form')
parser.add_argument('tracker_key', type=str, help='tracker关键词', location='form')
parser.add_argument('downloader', type=str, help='下载器(Qb/Tr)', location='form')
parser.add_argument('qb_state', type=str, help='Qb种子状态(多个用;分隔)', location='form')
parser.add_argument('qb_category', type=str, help='Qb分类(多个用;分隔)', location='form')
parser.add_argument('tr_state', type=str, help='Tr种子状态(多个用;分隔)', location='form')
parser.add_argument('tr_error_key', type=str, help='Tr错误信息关键词', location='form')
@torrentremover.doc(parser=parser)
def post(self):
"""
新增/修改自动删种任务
"""
return WebAction().api_action(cmd='update_torrent_remove_task', data=self.parser.parse_args())
@plugin.route('/install')
class PluginInstall(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='插件ID', location='form', required=True)
@plugin.doc(parser=parser)
def post(self):
"""
安装插件
"""
return WebAction().api_action(cmd='install_plugin', data=self.parser.parse_args())
@plugin.route('/uninstall')
class PluginUninstall(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='插件ID', location='form', required=True)
@plugin.doc(parser=parser)
def post(self):
"""
卸载插件
"""
return WebAction().api_action(cmd='uninstall_plugin', data=self.parser.parse_args())
@plugin.route('/apps')
class PluginApps(ClientResource):
@staticmethod
@plugin.doc()
def post():
"""
获取插件市场所有插件
"""
return WebAction().api_action(cmd='get_plugin_apps')
@plugin.route('/list')
class PluginList(ClientResource):
@staticmethod
@plugin.doc()
def post():
"""
获取已安装插件
"""
return WebAction().api_action(cmd='get_plugins_conf')
@plugin.route('/status')
class PluginStatus(ClientResource):
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, help='插件ID', location='form', required=True)
@plugin.doc(parser=parser)
def post(self):
"""
获取插件运行状态
"""
return WebAction().api_action(cmd='get_plugin_state', data=self.parser.parse_args())
| 85,303 | Python | .py | 1,914 | 35.392894 | 115 | 0.660688 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,926 | action.py | demigody_nas-tools/web/action.py | import base64
import datetime
import importlib
import inspect
import json
import os.path
import re
import shutil
import signal
import sqlite3
import time
from math import floor
from pathlib import Path
from typing import Dict, Optional
from urllib.parse import unquote
import ast
import copy
import cn2an
from flask_login import logout_user, current_user
from werkzeug.security import generate_password_hash
import log
from app.brushtask import BrushTask
from app.conf import SystemConfig, ModuleConf
from app.downloader import Downloader
from app.filetransfer import FileTransfer
from app.filter import Filter
from app.helper import DbHelper, ProgressHelper, ThreadHelper, \
MetaHelper, DisplayHelper, WordsHelper
from app.helper import RssHelper, PluginHelper
from app.indexer import Indexer
from app.media import Category, Media, Bangumi, DouBan, Scraper
from app.media.meta import MetaInfo, MetaBase
from app.media.tmdbv3api.tmdb import logger
from app.mediaserver import MediaServer
from app.message import Message, MessageCenter
from app.plugins import PluginManager, EventManager
from app.rss import Rss
from app.rsschecker import RssChecker
from app.scheduler import Scheduler
from app.searcher import Searcher
from app.sites import Sites, SiteUserInfo, SiteCookie, SiteConf
from app.subscribe import Subscribe
from app.sync import Sync
from app.torrentremover import TorrentRemover
from app.utils import StringUtils, EpisodeFormat, RequestUtils, PathUtils, \
SystemUtils, ExceptionUtils, Torrent
from app.utils.types import RmtMode, OsType, SearchType, SyncType, MediaType, MovieTypes, TvTypes, \
EventType, SystemConfigKey, RssType
from config import RMT_MEDIAEXT, RMT_SUBEXT, RMT_AUDIO_TRACK_EXT, Config
from web.backend.search_torrents import search_medias_for_web, search_media_by_message
from web.backend.pro_user import ProUser
from web.backend.web_utils import WebUtils
class WebAction:
_actions = {}
_commands = {}
def __init__(self):
# WEB请求响应
self._actions = {
"sch": self.__sch,
"search": self.__search,
"download": self.__download,
"download_link": self.__download_link,
"download_torrent": self.__download_torrent,
"pt_start": self.__pt_start,
"pt_stop": self.__pt_stop,
"pt_remove": self.__pt_remove,
"pt_info": self.__pt_info,
"del_unknown_path": self.__del_unknown_path,
"rename": self.__rename,
"rename_udf": self.__rename_udf,
"delete_history": self.delete_history,
"version": self.__version,
"update_site": self.__update_site,
"get_site": self.__get_site,
"del_site": self.__del_site,
"get_site_favicon": self.__get_site_favicon,
"restart": self.__restart,
"update_system": self.update_system,
"reset_db_version": self.__reset_db_version,
"logout": self.__logout,
"update_config": self.__update_config,
"update_directory": self.__update_directory,
"add_or_edit_sync_path": self.__add_or_edit_sync_path,
"get_sync_path": self.get_sync_path,
"delete_sync_path": self.__delete_sync_path,
"check_sync_path": self.__check_sync_path,
"remove_rss_media": self.__remove_rss_media,
"add_rss_media": self.__add_rss_media,
"re_identification": self.re_identification,
"media_info": self.__media_info,
"test_connection": self.__test_connection,
"user_manager": self.__user_manager,
"refresh_rss": self.__refresh_rss,
"delete_tmdb_cache": self.__delete_tmdb_cache,
"movie_calendar_data": self.__movie_calendar_data,
"tv_calendar_data": self.__tv_calendar_data,
"modify_tmdb_cache": self.__modify_tmdb_cache,
"rss_detail": self.__rss_detail,
"truncate_blacklist": self.truncate_blacklist,
"truncate_rsshistory": self.truncate_rsshistory,
"add_brushtask": self.__add_brushtask,
"del_brushtask": self.__del_brushtask,
"brushtask_enable": self.__brushtask_enable,
"brushtask_detail": self.__brushtask_detail,
"update_brushtask_state": self.__update_brushtask_state,
"name_test": self.__name_test,
"rule_test": self.__rule_test,
"net_test": self.__net_test,
"add_filtergroup": self.__add_filtergroup,
"restore_filtergroup": self.__restore_filtergroup,
"set_default_filtergroup": self.__set_default_filtergroup,
"del_filtergroup": self.__del_filtergroup,
"add_filterrule": self.__add_filterrule,
"del_filterrule": self.__del_filterrule,
"filterrule_detail": self.__filterrule_detail,
"get_site_activity": self.__get_site_activity,
"get_site_history": self.__get_site_history,
"get_recommend": self.get_recommend,
"get_downloaded": self.get_downloaded,
"get_site_seeding_info": self.__get_site_seeding_info,
"clear_tmdb_cache": self.__clear_tmdb_cache,
"check_site_attr": self.__check_site_attr,
"refresh_process": self.refresh_process,
"restory_backup": self.__restory_backup,
"start_mediasync": self.__start_mediasync,
"start_mediaDisplayModuleSync": self.__start_mediaDisplayModuleSync,
"mediasync_state": self.__mediasync_state,
"get_tvseason_list": self.__get_tvseason_list,
"get_userrss_task": self.__get_userrss_task,
"delete_userrss_task": self.__delete_userrss_task,
"update_userrss_task": self.__update_userrss_task,
"check_userrss_task": self.__check_userrss_task,
"get_rssparser": self.__get_rssparser,
"delete_rssparser": self.__delete_rssparser,
"update_rssparser": self.__update_rssparser,
"run_userrss": self.__run_userrss,
"run_brushtask": self.__run_brushtask,
"list_site_resources": self.list_site_resources,
"list_rss_articles": self.__list_rss_articles,
"rss_article_test": self.__rss_article_test,
"list_rss_history": self.__list_rss_history,
"rss_articles_check": self.__rss_articles_check,
"rss_articles_download": self.__rss_articles_download,
"add_custom_word_group": self.__add_custom_word_group,
"delete_custom_word_group": self.__delete_custom_word_group,
"add_or_edit_custom_word": self.__add_or_edit_custom_word,
"get_custom_word": self.__get_custom_word,
"delete_custom_words": self.__delete_custom_words,
"check_custom_words": self.__check_custom_words,
"export_custom_words": self.__export_custom_words,
"analyse_import_custom_words_code": self.__analyse_import_custom_words_code,
"import_custom_words": self.__import_custom_words,
"get_categories": self.get_categories,
"re_rss_history": self.__re_rss_history,
"delete_rss_history": self.__delete_rss_history,
"share_filtergroup": self.__share_filtergroup,
"import_filtergroup": self.__import_filtergroup,
"get_transfer_statistics": self.get_transfer_statistics,
"get_library_spacesize": self.get_library_spacesize,
"get_library_mediacount": self.get_library_mediacount,
"get_library_playhistory": self.get_library_playhistory,
"get_search_result": self.get_search_result,
"search_media_infos": self.search_media_infos,
"get_movie_rss_list": self.get_movie_rss_list,
"get_tv_rss_list": self.get_tv_rss_list,
"get_rss_history": self.get_rss_history,
"get_transfer_history": self.get_transfer_history,
"truncate_transfer_history": self.truncate_transfer_history,
"get_unknown_list": self.get_unknown_list,
"get_unknown_list_by_page": self.get_unknown_list_by_page,
"truncate_transfer_unknown": self.truncate_transfer_unknown,
"get_customwords": self.get_customwords,
"get_users": self.get_users,
"get_filterrules": self.get_filterrules,
"get_downloading": self.get_downloading,
"test_site": self.__test_site,
"get_sub_path": self.__get_sub_path,
"get_filehardlinks": self.__get_filehardlinks,
"get_dirhardlink": self.__get_dirhardlink,
"rename_file": self.__rename_file,
"delete_files": self.__delete_files,
"download_subtitle": self.__download_subtitle,
"get_download_setting": self.__get_download_setting,
"update_download_setting": self.__update_download_setting,
"delete_download_setting": self.__delete_download_setting,
"update_message_client": self.__update_message_client,
"delete_message_client": self.__delete_message_client,
"check_message_client": self.__check_message_client,
"get_message_client": self.__get_message_client,
"test_message_client": self.__test_message_client,
"get_sites": self.__get_sites,
"get_indexers": self.__get_indexers,
"get_download_dirs": self.__get_download_dirs,
"find_hardlinks": self.__find_hardlinks,
"update_sites_cookie_ua": self.__update_sites_cookie_ua,
"update_site_cookie_ua": self.__update_site_cookie_ua,
"set_site_captcha_code": self.__set_site_captcha_code,
"update_api_key": self.__update_api_key,
"update_torrent_remove_task": self.__update_torrent_remove_task,
"get_torrent_remove_task": self.__get_torrent_remove_task,
"delete_torrent_remove_task": self.__delete_torrent_remove_task,
"get_remove_torrents": self.__get_remove_torrents,
"auto_remove_torrents": self.__auto_remove_torrents,
"list_brushtask_torrents": self.__list_brushtask_torrents,
"set_system_config": self.__set_system_config,
"get_site_user_statistics": self.get_site_user_statistics,
"send_plugin_message": self.send_plugin_message,
"send_custom_message": self.send_custom_message,
"media_detail": self.media_detail,
"media_similar": self.__media_similar,
"media_recommendations": self.__media_recommendations,
"media_person": self.__media_person,
"person_medias": self.__person_medias,
"save_user_script": self.__save_user_script,
"run_directory_sync": self.__run_directory_sync,
"update_plugin_config": self.__update_plugin_config,
"get_season_episodes": self.__get_season_episodes,
"get_user_menus": self.get_user_menus,
"get_top_menus": self.get_top_menus,
"auth_user_level": self.auth_user_level,
"update_downloader": self.__update_downloader,
"del_downloader": self.__del_downloader,
"check_downloader": self.__check_downloader,
"get_downloaders": self.__get_downloaders,
"test_downloader": self.__test_downloader,
"get_indexer_statistics": self.__get_indexer_statistics,
"media_path_scrap": self.__media_path_scrap,
"get_default_rss_setting": self.get_default_rss_setting,
"get_movie_rss_items": self.get_movie_rss_items,
"get_tv_rss_items": self.get_tv_rss_items,
"get_ical_events": self.get_ical_events,
"install_plugin": self.install_plugin,
"uninstall_plugin": self.uninstall_plugin,
"get_plugin_apps": self.get_plugin_apps,
"get_plugin_page": self.get_plugin_page,
"get_plugin_state": self.get_plugin_state,
"get_plugins_conf": self.get_plugins_conf,
"update_category_config": self.update_category_config,
"get_category_config": self.get_category_config,
"get_system_processes": self.get_system_processes,
"run_plugin_method": self.run_plugin_method,
"get_library_resume": self.__get_resume,
}
# 远程命令响应
self._commands = {
"/ptr": {"func": TorrentRemover().auto_remove_torrents, "desc": "自动删种"},
"/ptt": {"func": Downloader().transfer, "desc": "下载文件转移"},
"/rst": {"func": Sync().transfer_sync, "desc": "目录同步"},
"/rss": {"func": Rss().rssdownload, "desc": "电影/电视剧订阅"},
"/ssa": {"func": Subscribe().subscribe_search_all, "desc": "订阅搜索"},
"/tbl": {"func": self.truncate_blacklist, "desc": "清理转移缓存"},
"/trh": {"func": self.truncate_rsshistory, "desc": "清理RSS缓存"},
"/utf": {"func": self.unidentification, "desc": "重新识别"},
# "/udt": {"func": self.update_system, "desc": "系统更新"},
"/sta": {"func": self.user_statistics, "desc": "站点数据统计"}
}
def action(self, cmd, data):
"""
执行WEB请求
"""
func = self._actions.get(cmd)
if not func:
return {"code": -1, "msg": "非授权访问!"}
elif inspect.signature(func).parameters:
return func(data)
else:
return func(**{})
def api_action(self, cmd, data=None):
"""
执行API请求
"""
result = self.action(cmd, data)
if not result:
return {
"code": -1,
"success": False,
"message": "服务异常,未获取到返回结果"
}
code = result.get("code", result.get("retcode", 0))
if not code or str(code) == "0":
success = True
else:
success = False
message = result.get("msg", result.get("retmsg", ""))
for key in ['code', 'retcode', 'msg', 'retmsg']:
if key in result:
result.pop(key)
return {
"code": code,
"success": success,
"message": message,
"data": result
}
@staticmethod
def stop_service():
"""
关闭服务
"""
# 停止定时服务
Scheduler().stop_service()
# 停止监控
Sync().stop_service()
# 关闭虚拟显示
DisplayHelper().stop_service()
# 关闭刷流
BrushTask().stop_service()
# 关闭自定义订阅
RssChecker().stop_service()
# 关闭自动删种
TorrentRemover().stop_service()
# 关闭下载器监控
Downloader().stop_service()
# 关闭插件
PluginManager().stop_service()
@staticmethod
def start_service():
# 加载站点配置
SiteConf()
# 启动虚拟显示
DisplayHelper()
# 启动定时服务
Scheduler()
# 启动监控服务
Sync()
# 启动刷流服务
BrushTask()
# 启动自定义订阅服务
RssChecker()
# 启动自动删种服务
TorrentRemover()
# 加载插件
PluginManager()
def restart_service(self):
"""
重启服务
"""
self.stop_service()
self.start_service()
def restart_server(self):
"""
停止进程
"""
# 关闭服务
self.stop_service()
# 重启进程
if os.name == "nt":
os.kill(os.getpid(), getattr(signal, "SIGKILL", signal.SIGTERM))
elif SystemUtils.is_synology():
os.system(
"ps -ef | grep -v grep | grep 'python run.py'|awk '{print $2}'|xargs kill -9")
else:
if SystemUtils.check_process('node'):
os.system("pm2 restart NAStool")
else:
os.system("pkill -f 'python3 run.py'")
def handle_message_job(self, msg, in_from=SearchType.OT, user_id=None, user_name=None):
"""
处理消息事件
"""
if not msg:
return
# 触发MessageIncoming事件
EventManager().send_event(EventType.MessageIncoming, {
"channel": in_from.value,
"user_id": user_id,
"user_name": user_name,
"message": msg
})
# 系统内置命令
command = self._commands.get(msg)
if command:
# 启动服务
ThreadHelper().start_thread(command.get("func"), ())
# 消息回应
Message().send_channel_msg(
channel=in_from, title="正在运行 %s ..." % command.get("desc"), user_id=user_id)
return
# 插件命令
plugin_commands = PluginManager().get_plugin_commands()
for command in plugin_commands:
if command.get("cmd") == msg:
# 发送事件
EventManager().send_event(command.get("event"), command.get("data") or {})
# 消息回应
Message().send_channel_msg(
channel=in_from, title="正在运行 %s ..." % command.get("desc"), user_id=user_id)
return
# 站点搜索或者添加订阅
ThreadHelper().start_thread(search_media_by_message,
(msg, in_from, user_id, user_name))
@staticmethod
def set_config_value(cfg, cfg_key, cfg_value):
"""
根据Key设置配置值
"""
# 密码
if cfg_key == "app.login_password":
if cfg_value and not cfg_value.startswith("[hash]"):
cfg['app']['login_password'] = "[hash]%s" % generate_password_hash(
cfg_value)
else:
cfg['app']['login_password'] = cfg_value or "password"
return cfg
# 代理
if cfg_key == "app.proxies":
if cfg_value:
if not cfg_value.startswith("http") and not cfg_value.startswith("sock"):
cfg['app']['proxies'] = {
"https": "http://%s" % cfg_value, "http": "http://%s" % cfg_value}
else:
cfg['app']['proxies'] = {"https": "%s" %
cfg_value, "http": "%s" % cfg_value}
else:
cfg['app']['proxies'] = {"https": None, "http": None}
return cfg
# 最大支持三层赋值
keys = cfg_key.split(".")
if keys:
if len(keys) == 1:
cfg[keys[0]] = cfg_value
elif len(keys) == 2:
if not cfg.get(keys[0]):
cfg[keys[0]] = {}
cfg[keys[0]][keys[1]] = cfg_value
elif len(keys) == 3:
if cfg.get(keys[0]):
if not cfg[keys[0]].get(keys[1]) or isinstance(cfg[keys[0]][keys[1]], str):
cfg[keys[0]][keys[1]] = {}
cfg[keys[0]][keys[1]][keys[2]] = cfg_value
else:
cfg[keys[0]] = {}
cfg[keys[0]][keys[1]] = {}
cfg[keys[0]][keys[1]][keys[2]] = cfg_value
return cfg
@staticmethod
def set_config_directory(cfg, oper, cfg_key, cfg_value, update_value=None):
"""
更新目录数据
"""
def remove_sync_path(obj, key):
if not isinstance(obj, list):
return []
ret_obj = []
for item in obj:
if item.split("@")[0].replace("\\", "/") != key.split("@")[0].replace("\\", "/"):
ret_obj.append(item)
return ret_obj
# 最大支持二层赋值
keys = cfg_key.split(".")
if keys:
if len(keys) == 1:
if cfg.get(keys[0]):
if not isinstance(cfg[keys[0]], list):
cfg[keys[0]] = [cfg[keys[0]]]
if oper == "add":
cfg[keys[0]].append(cfg_value)
elif oper == "sub":
cfg[keys[0]].remove(cfg_value)
if not cfg[keys[0]]:
cfg[keys[0]] = None
elif oper == "set":
cfg[keys[0]].remove(cfg_value)
if update_value:
cfg[keys[0]].append(update_value)
else:
cfg[keys[0]] = cfg_value
elif len(keys) == 2:
if cfg.get(keys[0]):
if not cfg[keys[0]].get(keys[1]):
cfg[keys[0]][keys[1]] = []
if not isinstance(cfg[keys[0]][keys[1]], list):
cfg[keys[0]][keys[1]] = [cfg[keys[0]][keys[1]]]
if oper == "add":
cfg[keys[0]][keys[1]].append(
cfg_value.replace("\\", "/"))
elif oper == "sub":
cfg[keys[0]][keys[1]] = remove_sync_path(
cfg[keys[0]][keys[1]], cfg_value)
if not cfg[keys[0]][keys[1]]:
cfg[keys[0]][keys[1]] = None
elif oper == "set":
cfg[keys[0]][keys[1]] = remove_sync_path(
cfg[keys[0]][keys[1]], cfg_value)
if update_value:
cfg[keys[0]][keys[1]].append(
update_value.replace("\\", "/"))
else:
cfg[keys[0]] = {}
cfg[keys[0]][keys[1]] = cfg_value.replace("\\", "/")
return cfg
@staticmethod
def __sch(data):
"""
启动服务
"""
commands = {
"pttransfer": Downloader().transfer,
"sync": Sync().transfer_sync,
"rssdownload": Rss().rssdownload,
"subscribe_search_all": Subscribe().subscribe_search_all,
}
sch_item = data.get("item")
if sch_item and commands.get(sch_item):
ThreadHelper().start_thread(commands.get(sch_item), ())
return {"retmsg": "服务已启动", "item": sch_item}
@staticmethod
def __search(data):
"""
WEB搜索资源
"""
search_word = data.get("search_word")
ident_flag = False if data.get("unident") else True
filters = data.get("filters")
tmdbid = data.get("tmdbid")
media_type = data.get("media_type")
if media_type:
if media_type in MovieTypes:
media_type = MediaType.MOVIE
else:
media_type = MediaType.TV
if search_word:
ret, ret_msg = search_medias_for_web(content=search_word,
ident_flag=ident_flag,
filters=filters,
tmdbid=tmdbid,
media_type=media_type)
if ret != 0:
return {"code": ret, "msg": ret_msg}
return {"code": 0}
@staticmethod
def __download(data):
"""
从WEB添加下载
"""
dl_id = data.get("id")
dl_dir = data.get("dir")
dl_setting = data.get("setting")
results = Searcher().get_search_result_by_id(dl_id)
for res in results:
# TODO 下载链接或媒体信息有问题,仍会显示添加成功
if not res.ENCLOSURE:
continue
media = Media().get_media_info(title=res.TORRENT_NAME, subtitle=res.DESCRIPTION)
if not media:
continue
media.set_torrent_info(enclosure=res.ENCLOSURE,
size=res.SIZE,
site=res.SITE,
page_url=res.PAGEURL,
upload_volume_factor=float(
res.UPLOAD_VOLUME_FACTOR),
download_volume_factor=float(res.DOWNLOAD_VOLUME_FACTOR))
# 添加下载
_, ret, ret_msg = Downloader().download(media_info=media,
download_dir=dl_dir,
download_setting=dl_setting,
in_from=SearchType.WEB,
user_name=current_user.username)
if not ret:
return {"retcode": -1, "retmsg": ret_msg}
return {"retcode": 0, "retmsg": ""}
@staticmethod
def __download_link(data):
"""
从WEB添加下载链接
"""
site = data.get("site")
enclosure = data.get("enclosure")
title = data.get("title")
description = data.get("description")
page_url = data.get("page_url")
size = data.get("size")
seeders = data.get("seeders")
uploadvolumefactor = data.get("uploadvolumefactor")
downloadvolumefactor = data.get("downloadvolumefactor")
dl_dir = data.get("dl_dir")
dl_setting = data.get("dl_setting")
if not title or not enclosure:
return {"code": -1, "msg": "种子信息有误"}
media = Media().get_media_info(title=title, subtitle=description)
media.site = site
media.enclosure = enclosure
media.page_url = page_url
media.size = size
media.upload_volume_factor = float(uploadvolumefactor)
media.download_volume_factor = float(downloadvolumefactor)
media.seeders = seeders
# 添加下载
_, ret, ret_msg = Downloader().download(media_info=media,
download_dir=dl_dir,
download_setting=dl_setting,
in_from=SearchType.WEB,
user_name=current_user.username)
if not ret:
return {"code": 1, "msg": ret_msg or "如连接正常,请检查下载任务是否存在"}
return {"code": 0, "msg": "下载成功"}
@staticmethod
def __download_torrent(data):
"""
从种子文件或者URL链接添加下载
files:文件地址的列表,urls:种子链接地址列表或者单个链接地址
"""
dl_dir = data.get("dl_dir")
dl_setting = data.get("dl_setting")
files = data.get("files") or []
urls = data.get("urls") or []
if not files and not urls:
return {"code": -1, "msg": "没有种子文件或者种子链接"}
# 下载种子
for file_item in files:
if not file_item:
continue
file_name = file_item.get("upload", {}).get("filename")
file_path = os.path.join(Config().get_temp_path(), file_name)
media_info = Media().get_media_info(title=file_name)
if media_info:
media_info.site = "WEB"
# 添加下载
Downloader().download(media_info=media_info,
download_dir=dl_dir,
download_setting=dl_setting,
torrent_file=file_path,
in_from=SearchType.WEB,
user_name=current_user.username)
# 下载链接
if urls and not isinstance(urls, list):
urls = [urls]
for url in urls:
if not url:
continue
# 查询站点
site_info = Sites().get_sites(siteurl=url)
if not site_info:
return {"code": -1, "msg": "根据链接地址未匹配到站点"}
# 下载种子文件,并读取信息
file_path, _, _, _, retmsg = Torrent().get_torrent_info(
url=url,
cookie=site_info.get("cookie"),
ua=site_info.get("ua"),
proxy=site_info.get("proxy")
)
if not file_path:
return {"code": -1, "msg": f"下载种子文件失败: {retmsg}"}
media_info = Media().get_media_info(title=os.path.basename(file_path))
if media_info:
media_info.site = "WEB"
# 添加下载
Downloader().download(media_info=media_info,
download_dir=dl_dir,
download_setting=dl_setting,
torrent_file=file_path,
in_from=SearchType.WEB,
user_name=current_user.username)
return {"code": 0, "msg": "添加下载完成!"}
@staticmethod
def __pt_start(data):
"""
开始下载
"""
tid = data.get("id")
if id:
Downloader().start_torrents(ids=tid)
return {"retcode": 0, "id": tid}
@staticmethod
def __pt_stop(data):
"""
停止下载
"""
tid = data.get("id")
if id:
Downloader().stop_torrents(ids=tid)
return {"retcode": 0, "id": tid}
@staticmethod
def __pt_remove(data):
"""
删除下载
"""
tid = data.get("id")
if id:
Downloader().delete_torrents(ids=tid, delete_file=True)
return {"retcode": 0, "id": tid}
@staticmethod
def __pt_info(data):
"""
查询具体种子的信息
"""
ids = data.get("ids")
torrents = Downloader().get_downloading_progress(ids=ids)
return {"retcode": 0, "torrents": torrents}
@staticmethod
def __del_unknown_path(data):
"""
删除路径
"""
tids = data.get("id")
if isinstance(tids, list):
for tid in tids:
if not tid:
continue
FileTransfer().delete_transfer_unknown(tid)
return {"retcode": 0}
else:
retcode = FileTransfer().delete_transfer_unknown(tids)
return {"retcode": retcode}
def __rename(self, data):
"""
手工转移
"""
path = dest_dir = None
syncmod = ModuleConf.RMT_MODES.get(data.get("syncmod"))
logid = data.get("logid")
if logid:
transinfo = FileTransfer().get_transfer_info_by_id(logid)
if transinfo:
path = os.path.join(
transinfo.SOURCE_PATH, transinfo.SOURCE_FILENAME)
dest_dir = transinfo.DEST
else:
return {"retcode": -1, "retmsg": "未查询到转移日志记录"}
else:
unknown_id = data.get("unknown_id")
if unknown_id:
inknowninfo = FileTransfer().get_unknown_info_by_id(unknown_id)
if inknowninfo:
path = inknowninfo.PATH
dest_dir = inknowninfo.DEST
else:
return {"retcode": -1, "retmsg": "未查询到未识别记录"}
if not dest_dir:
dest_dir = ""
if not path:
return {"retcode": -1, "retmsg": "输入路径有误"}
tmdbid = data.get("tmdb")
mtype = data.get("type")
season = data.get("season")
episode_format = data.get("episode_format")
episode_details = data.get("episode_details")
episode_part = data.get("episode_part")
episode_offset = data.get("episode_offset")
min_filesize = data.get("min_filesize")
ignore_download_history = data.get("ignore_download_history")
if mtype in MovieTypes:
media_type = MediaType.MOVIE
elif mtype in TvTypes:
media_type = MediaType.TV
else:
media_type = MediaType.ANIME
# 如果改次手动修复时一个单文件,自动修复改目录下同名文件,需要配合episode_format生效
need_fix_all = False
if os.path.splitext(path)[-1].lower() in RMT_MEDIAEXT and episode_format:
path = os.path.dirname(path)
need_fix_all = True
# 开始转移
succ_flag, ret_msg = self.__manual_transfer(inpath=path,
syncmod=syncmod,
outpath=dest_dir,
media_type=media_type,
episode_format=episode_format,
episode_details=episode_details,
episode_part=episode_part,
episode_offset=episode_offset,
need_fix_all=need_fix_all,
min_filesize=min_filesize,
tmdbid=tmdbid,
season=season,
ignore_download_history=ignore_download_history)
if succ_flag:
if not need_fix_all and not logid:
# 更新记录状态
FileTransfer().update_transfer_unknown_state(path)
return {"retcode": 0, "retmsg": "转移成功"}
else:
return {"retcode": 2, "retmsg": ret_msg}
def __rename_udf(self, data):
"""
自定义识别
"""
inpath = data.get("inpath")
if not os.path.exists(inpath):
return {"retcode": -1, "retmsg": "输入路径不存在"}
outpath = data.get("outpath")
syncmod = ModuleConf.RMT_MODES.get(data.get("syncmod"))
tmdbid = data.get("tmdb")
mtype = data.get("type")
season = data.get("season")
episode_format = data.get("episode_format")
episode_details = data.get("episode_details")
episode_part = data.get("episode_part")
episode_offset = data.get("episode_offset")
min_filesize = data.get("min_filesize")
ignore_download_history = data.get("ignore_download_history")
if mtype in MovieTypes:
media_type = MediaType.MOVIE
elif mtype in TvTypes:
media_type = MediaType.TV
else:
media_type = MediaType.ANIME
# 开始转移
succ_flag, ret_msg = self.__manual_transfer(inpath=inpath,
syncmod=syncmod,
outpath=outpath,
media_type=media_type,
episode_format=episode_format,
episode_details=episode_details,
episode_part=episode_part,
episode_offset=episode_offset,
min_filesize=min_filesize,
tmdbid=tmdbid,
season=season,
ignore_download_history=ignore_download_history)
if succ_flag:
return {"retcode": 0, "retmsg": "转移成功"}
else:
return {"retcode": 2, "retmsg": ret_msg}
@staticmethod
def __manual_transfer(inpath,
syncmod,
outpath=None,
media_type=None,
episode_format=None,
episode_details=None,
episode_part=None,
episode_offset=None,
min_filesize=None,
tmdbid=None,
season=None,
need_fix_all=False,
ignore_download_history=False
):
"""
开始手工转移文件
"""
inpath = os.path.normpath(inpath)
if outpath:
outpath = os.path.normpath(outpath)
if not os.path.exists(inpath):
return False, "输入路径不存在"
if tmdbid:
# 有输入TMDBID
tmdb_info = Media().get_tmdb_info(mtype=media_type, tmdbid=tmdbid)
if not tmdb_info:
return False, "识别失败,无法查询到TMDB信息"
# 按识别的信息转移
succ_flag, ret_msg = FileTransfer().transfer_media(in_from=SyncType.MAN,
in_path=inpath,
rmt_mode=syncmod,
target_dir=outpath,
tmdb_info=tmdb_info,
media_type=media_type,
season=season,
episode=(
EpisodeFormat(episode_format,
episode_details,
episode_part,
episode_offset),
need_fix_all),
min_filesize=min_filesize,
udf_flag=True,
ignore_download_history=ignore_download_history)
else:
# 按识别的信息转移
succ_flag, ret_msg = FileTransfer().transfer_media(in_from=SyncType.MAN,
in_path=inpath,
rmt_mode=syncmod,
target_dir=outpath,
media_type=media_type,
episode=(
EpisodeFormat(episode_format,
episode_details,
episode_part,
episode_offset),
need_fix_all),
min_filesize=min_filesize,
udf_flag=True,
ignore_download_history=ignore_download_history)
return succ_flag, ret_msg
def delete_history(self, data):
"""
删除识别记录及文件
"""
logids = data.get('logids') or []
flag = data.get('flag')
_filetransfer = FileTransfer()
for logid in logids:
# 读取历史记录
transinfo = _filetransfer.get_transfer_info_by_id(logid)
if transinfo:
# 删除记录
_filetransfer.delete_transfer_log_by_id(logid)
# 根据flag删除文件
source_path = transinfo.SOURCE_PATH
source_filename = transinfo.SOURCE_FILENAME
media_info = {
"type": transinfo.TYPE,
"category": transinfo.CATEGORY,
"title": transinfo.TITLE,
"year": transinfo.YEAR,
"tmdbid": transinfo.TMDBID,
"season_episode": transinfo.SEASON_EPISODE
}
# 删除该识别记录对应的转移记录
_filetransfer.delete_transfer_blacklist("%s/%s" % (source_path, source_filename))
dest = transinfo.DEST
dest_path = transinfo.DEST_PATH
dest_filename = transinfo.DEST_FILENAME
if flag in ["del_source", "del_all"]:
# 删除源文件
del_flag, del_msg = self.delete_media_file(source_path, source_filename)
if not del_flag:
log.error(del_msg)
else:
log.info(del_msg)
# 触发源文件删除事件
EventManager().send_event(EventType.SourceFileDeleted, {
"media_info": media_info,
"path": source_path,
"filename": source_filename
})
if flag in ["del_dest", "del_all"]:
# 删除媒体库文件
if dest_path and dest_filename:
del_flag, del_msg = self.delete_media_file(dest_path, dest_filename)
if not del_flag:
log.error(del_msg)
else:
log.info(del_msg)
# 触发媒体库文件删除事件
EventManager().send_event(EventType.LibraryFileDeleted, {
"media_info": media_info,
"path": dest_path,
"filename": dest_filename
})
else:
meta_info = MetaInfo(title=source_filename)
meta_info.title = transinfo.TITLE
meta_info.category = transinfo.CATEGORY
meta_info.year = transinfo.YEAR
if transinfo.SEASON_EPISODE:
meta_info.begin_season = int(
str(transinfo.SEASON_EPISODE).replace("S", ""))
if transinfo.TYPE == MediaType.MOVIE.value:
meta_info.type = MediaType.MOVIE
else:
meta_info.type = MediaType.TV
# 删除文件
dest_path = _filetransfer.get_dest_path_by_info(dest=dest, meta_info=meta_info)
if dest_path and dest_path.find(meta_info.title) != -1:
rm_parent_dir = False
if not meta_info.get_season_list():
# 电影,删除整个目录
try:
shutil.rmtree(dest_path)
# 触发媒体库文件删除事件
EventManager().send_event(EventType.LibraryFileDeleted, {
"media_info": media_info,
"path": dest_path
})
except Exception as e:
ExceptionUtils.exception_traceback(e)
elif not meta_info.get_episode_string():
# 电视剧但没有集数,删除季目录
try:
shutil.rmtree(dest_path)
# 触发媒体库文件删除事件
EventManager().send_event(EventType.LibraryFileDeleted, {
"media_info": media_info,
"path": dest_path
})
except Exception as e:
ExceptionUtils.exception_traceback(e)
rm_parent_dir = True
else:
# 有集数的电视剧,删除对应的集数文件
for dest_file in PathUtils.get_dir_files(dest_path):
file_meta_info = MetaInfo(
os.path.basename(dest_file))
if file_meta_info.get_episode_list() and set(
file_meta_info.get_episode_list()
).issubset(set(meta_info.get_episode_list())):
try:
os.remove(dest_file)
# 触发媒体库文件删除事件
EventManager().send_event(EventType.LibraryFileDeleted, {
"media_info": media_info,
"path": os.path.dirname(dest_file),
"filename": os.path.basename(dest_file)
})
except Exception as e:
ExceptionUtils.exception_traceback(
e)
rm_parent_dir = True
if rm_parent_dir \
and not PathUtils.get_dir_files(os.path.dirname(dest_path), exts=RMT_MEDIAEXT):
# 没有媒体文件时,删除整个目录
try:
shutil.rmtree(os.path.dirname(dest_path))
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"retcode": 0}
@staticmethod
def delete_media_file(filedir, filename):
"""
删除媒体文件,空目录也会被删除
"""
filedir = os.path.normpath(filedir).replace("\\", "/")
file = os.path.join(filedir, filename)
try:
if not os.path.exists(file):
return False, f"{file} 不存在"
os.remove(file)
nfoname = f"{os.path.splitext(filename)[0]}.nfo"
nfofile = os.path.join(filedir, nfoname)
if os.path.exists(nfofile):
os.remove(nfofile)
# 检查空目录并删除
if re.findall(r"^S\d{2}|^Season", os.path.basename(filedir), re.I):
# 当前是季文件夹,判断并删除
seaon_dir = filedir
if seaon_dir.count('/') > 1 and not PathUtils.get_dir_files(seaon_dir, exts=RMT_MEDIAEXT):
shutil.rmtree(seaon_dir)
# 媒体文件夹
media_dir = os.path.dirname(seaon_dir)
else:
media_dir = filedir
# 检查并删除媒体文件夹,非根目录且目录大于二级,且没有媒体文件时才会删除
if media_dir != '/' \
and media_dir.count('/') > 1 \
and not re.search(r'[a-zA-Z]:/$', media_dir) \
and not PathUtils.get_dir_files(media_dir, exts=RMT_MEDIAEXT):
shutil.rmtree(media_dir)
return True, f"{file} 删除成功"
except Exception as e:
ExceptionUtils.exception_traceback(e)
return True, f"{file} 删除失败"
@staticmethod
def __version():
"""
去除检查新版本
"""
return {"code": -1, "version": "3.3.19", "url": ""}
@staticmethod
def __update_site(data):
"""
维护站点信息
"""
_sites = Sites()
def __is_site_duplicate(query_name, query_tid):
# 检查是否重名
for site in _sites.get_sites_by_name(name=query_name):
if str(site.get("id")) != str(query_tid):
return True
return False
tid = data.get('site_id')
name = data.get('site_name')
site_pri = data.get('site_pri')
rssurl = data.get('site_rssurl')
signurl = data.get('site_signurl')
cookie = data.get('site_cookie')
apikey = data.get('site_apikey')
note = data.get('site_note')
if isinstance(note, dict):
note = json.dumps(note)
rss_uses = data.get('site_include')
if __is_site_duplicate(name, tid):
return {"code": 400, "msg": "站点名称重复"}
if tid:
sites = _sites.get_sites(siteid=tid)
# 站点不存在
if not sites:
return {"code": 400, "msg": "站点不存在"}
old_name = sites.get('name')
ret = _sites.update_site(tid=tid,
name=name,
site_pri=site_pri,
rssurl=rssurl,
signurl=signurl,
cookie=cookie,
apikey=apikey,
note=note,
rss_uses=rss_uses)
if ret and (name != old_name):
# 更新历史站点数据信息
SiteUserInfo().update_site_name(name, old_name)
else:
ret = _sites.add_site(name=name,
site_pri=site_pri,
rssurl=rssurl,
signurl=signurl,
cookie=cookie,
apikey=apikey,
note=note,
rss_uses=rss_uses)
if ret:
return {"code": "200"}
else:
return {"code": "400", "msg": "更新数据库失败,请重试"}
@staticmethod
def __get_site(data):
"""
查询单个站点信息
"""
tid = data.get("id")
site_free = False
site_2xfree = False
site_hr = False
if tid:
ret = Sites().get_sites(siteid=tid)
if ret.get("rssurl"):
site_attr = SiteConf().get_grap_conf(ret.get("rssurl"))
if site_attr.get("FREE"):
site_free = True
if site_attr.get("2XFREE"):
site_2xfree = True
if site_attr.get("HR"):
site_hr = True
else:
ret = []
return {"code": 0, "site": ret, "site_free": site_free, "site_2xfree": site_2xfree, "site_hr": site_hr}
@staticmethod
def __get_sites(data):
"""
查询多个站点信息
"""
rss = True if data.get("rss") else False
brush = True if data.get("brush") else False
statistic = True if data.get("statistic") else False
basic = True if data.get("basic") else False
if basic:
sites = Sites().get_site_dict(rss=rss,
brush=brush,
statistic=statistic)
else:
sites = Sites().get_sites(rss=rss,
brush=brush,
statistic=statistic)
return {"code": 0, "sites": sites}
@staticmethod
def __del_site(data):
"""
删除单个站点信息
"""
tid = data.get("id")
if tid:
ret = Sites().delete_site(tid)
return {"code": ret}
else:
return {"code": 0}
def __restart(self):
"""
重启
"""
# 退出主进程
self.restart_server()
return {"code": 0}
def update_system(self):
"""
更新
"""
# 升级
if SystemUtils.is_synology():
if SystemUtils.execute('/bin/ps -w -x | grep -v grep | grep -w "nastool update" | wc -l') == '0':
# 调用群晖套件内置命令升级
os.system('nastool update')
# 重启
self.restart_server()
else:
# 清除git代理
os.system("sudo git config --global --unset http.proxy")
os.system("sudo git config --global --unset https.proxy")
# 设置git代理
proxy = Config().get_proxies() or {}
http_proxy = proxy.get("http")
https_proxy = proxy.get("https")
if http_proxy or https_proxy:
os.system(
f"sudo git config --global http.proxy {http_proxy or https_proxy}")
os.system(
f"sudo git config --global https.proxy {https_proxy or http_proxy}")
# 清理
os.system("sudo git clean -dffx")
# 升级
branch = os.getenv("NASTOOL_VERSION", "master")
os.system(f"sudo git fetch --depth 1 origin {branch}")
os.system(f"sudo git reset --hard origin/{branch}")
os.system("sudo git submodule update --init --recursive")
# 安装依赖
os.system('sudo pip install -r /nas-tools/requirements.txt')
# 修复权限
os.system('sudo chown -R nt:nt /nas-tools')
# 重启
self.restart_server()
return {"code": 0}
@staticmethod
def __reset_db_version():
"""
重置数据库版本
"""
try:
DbHelper().drop_table("alembic_version")
return {"code": 0}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __logout():
"""
注销
"""
logout_user()
return {"code": 0}
def __update_config(self, data):
"""
更新配置信息
"""
cfg = Config().get_config()
cfgs = dict(data).items()
# 仅测试不保存
config_test = False
# 修改配置
for key, value in cfgs:
if key == "test" and value:
config_test = True
continue
# 生效配置
cfg = self.set_config_value(cfg, key, value)
# 保存配置
if not config_test:
Config().save_config(cfg)
return {"code": 0}
@staticmethod
def __add_or_edit_sync_path(data):
"""
维护同步目录
"""
sid = data.get("sid")
source = data.get("from")
dest = data.get("to")
unknown = data.get("unknown")
mode = data.get("syncmod")
compatibility = data.get("compatibility")
rename = data.get("rename")
enabled = data.get("enabled")
locating = data.get("locating")
_sync = Sync()
# 源目录检查
if not source:
return {"code": 1, "msg": f'源目录不能为空'}
if not os.path.exists(source):
return {"code": 1, "msg": f'{source}目录不存在'}
# windows目录用\,linux目录用/
source = os.path.normpath(source)
# 目的目录检查,目的目录可为空
if dest:
dest = os.path.normpath(dest)
if PathUtils.is_path_in_path(source, dest):
return {"code": 1, "msg": "目的目录不可包含在源目录中"}
if unknown:
unknown = os.path.normpath(unknown)
# 硬链接不能跨盘
if mode == "link" and dest:
common_path = os.path.commonprefix([source, dest])
if not common_path or common_path == "/":
return {"code": 1, "msg": "硬链接不能跨盘"}
# 编辑先删再增
if sid:
_sync.delete_sync_path(sid)
# 若启用,则关闭其他相同源目录的同步目录
if enabled == 1:
_sync.check_source(source=source)
# 插入数据库
_sync.insert_sync_path(source=source,
dest=dest,
unknown=unknown,
mode=mode,
compatibility=compatibility,
rename=rename,
enabled=enabled,
locating=locating)
return {"code": 0, "msg": ""}
@staticmethod
def get_sync_path(data=None):
"""
查询同步目录
"""
if data:
sync_path = Sync().get_sync_path_conf(sid=data.get("sid"))
else:
sync_path = Sync().get_sync_path_conf()
return {"code": 0, "result": sync_path}
@staticmethod
def __delete_sync_path(data):
"""
移出同步目录
"""
sid = data.get("sid")
Sync().delete_sync_path(sid)
return {"code": 0}
@staticmethod
def __check_sync_path(data):
"""
维护同步目录
"""
flag = data.get("flag")
sid = data.get("sid")
checked = data.get("checked")
_sync = Sync()
if flag == "compatibility":
_sync.check_sync_paths(sid=sid, compatibility=1 if checked else 0)
return {"code": 0}
elif flag == "rename":
_sync.check_sync_paths(sid=sid, rename=1 if checked else 0)
return {"code": 0}
elif flag == "enable":
# 若启用,则关闭其他相同源目录的同步目录
if checked:
_sync.check_source(sid=sid)
_sync.check_sync_paths(sid=sid, enabled=1 if checked else 0)
return {"code": 0}
elif flag == "locating":
_sync.check_sync_paths(sid=sid, locating=1 if checked else 0)
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __remove_rss_media(data):
"""
移除RSS订阅
"""
name = data.get("name")
mtype = data.get("type")
year = data.get("year")
season = data.get("season")
rssid = data.get("rssid")
page = data.get("page")
tmdbid = data.get("tmdbid")
if not str(tmdbid).isdigit():
tmdbid = None
if name:
name = MetaInfo(title=name).get_name()
if mtype:
if mtype in MovieTypes:
Subscribe().delete_subscribe(mtype=MediaType.MOVIE,
title=name,
year=year,
rssid=rssid,
tmdbid=tmdbid)
else:
Subscribe().delete_subscribe(mtype=MediaType.TV,
title=name,
season=season,
rssid=rssid,
tmdbid=tmdbid)
return {"code": 0, "page": page, "name": name}
@staticmethod
def __add_rss_media(data):
"""
添加RSS订阅
"""
_subscribe = Subscribe()
channel = RssType.Manual if data.get("in_form") == "manual" else RssType.Auto
name = data.get("name")
year = data.get("year")
keyword = data.get("keyword")
season = data.get("season")
fuzzy_match = data.get("fuzzy_match")
mediaid = data.get("mediaid")
rss_sites = data.get("rss_sites")
search_sites = data.get("search_sites")
over_edition = data.get("over_edition")
filter_restype = data.get("filter_restype")
filter_pix = data.get("filter_pix")
filter_team = data.get("filter_team")
filter_rule = data.get("filter_rule")
filter_include = data.get("filter_include")
filter_exclude = data.get("filter_exclude")
save_path = data.get("save_path")
download_setting = data.get("download_setting")
total_ep = data.get("total_ep")
current_ep = data.get("current_ep")
rssid = data.get("rssid")
page = data.get("page")
mtype = MediaType.MOVIE if data.get(
"type") in MovieTypes else MediaType.TV
media_info = None
if isinstance(season, list):
code = 0
msg = ""
for sea in season:
code, msg, media_info = _subscribe.add_rss_subscribe(mtype=mtype,
name=name,
year=year,
channel=channel,
keyword=keyword,
season=sea,
fuzzy_match=fuzzy_match,
mediaid=mediaid,
rss_sites=rss_sites,
search_sites=search_sites,
over_edition=over_edition,
filter_restype=filter_restype,
filter_pix=filter_pix,
filter_team=filter_team,
filter_rule=filter_rule,
filter_include=filter_include,
filter_exclude=filter_exclude,
save_path=save_path,
download_setting=download_setting,
rssid=rssid)
if code != 0:
break
else:
code, msg, media_info = _subscribe.add_rss_subscribe(mtype=mtype,
name=name,
year=year,
channel=channel,
keyword=keyword,
season=season,
fuzzy_match=fuzzy_match,
mediaid=mediaid,
rss_sites=rss_sites,
search_sites=search_sites,
over_edition=over_edition,
filter_restype=filter_restype,
filter_pix=filter_pix,
filter_team=filter_team,
filter_rule=filter_rule,
filter_include=filter_include,
filter_exclude=filter_exclude,
save_path=save_path,
download_setting=download_setting,
total_ep=total_ep,
current_ep=current_ep,
rssid=rssid)
if not rssid and media_info:
rssid = _subscribe.get_subscribe_id(mtype=mtype,
title=name,
tmdbid=media_info.tmdb_id)
return {"code": code, "msg": msg, "page": page, "name": name, "rssid": rssid}
@staticmethod
def re_identification(data):
"""
未识别的重新识别
"""
flag = data.get("flag")
ids = data.get("ids")
ret_flag = True
ret_msg = []
_filetransfer = FileTransfer()
if flag == "unidentification":
for wid in ids:
unknowninfo = _filetransfer.get_unknown_info_by_id(wid)
if unknowninfo:
path = unknowninfo.PATH
dest_dir = unknowninfo.DEST
rmt_mode = ModuleConf.get_enum_item(
RmtMode, unknowninfo.MODE) if unknowninfo.MODE else None
else:
return {"retcode": -1, "retmsg": "未查询到未识别记录"}
if not dest_dir:
dest_dir = ""
if not path:
return {"retcode": -1, "retmsg": "未识别路径有误"}
succ_flag, msg = _filetransfer.transfer_media(in_from=SyncType.MAN,
rmt_mode=rmt_mode,
in_path=path,
target_dir=dest_dir)
if succ_flag:
_filetransfer.update_transfer_unknown_state(path)
else:
ret_flag = False
if msg not in ret_msg:
ret_msg.append(msg)
elif flag == "history":
for wid in ids:
transinfo = _filetransfer.get_transfer_info_by_id(wid)
if transinfo:
path = os.path.join(
transinfo.SOURCE_PATH, transinfo.SOURCE_FILENAME)
dest_dir = transinfo.DEST
rmt_mode = ModuleConf.get_enum_item(
RmtMode, transinfo.MODE) if transinfo.MODE else None
else:
return {"retcode": -1, "retmsg": "未查询到转移日志记录"}
if not dest_dir:
dest_dir = ""
if not path:
return {"retcode": -1, "retmsg": "未识别路径有误"}
succ_flag, msg = _filetransfer.transfer_media(in_from=SyncType.MAN,
rmt_mode=rmt_mode,
in_path=path,
target_dir=dest_dir)
if not succ_flag:
ret_flag = False
if msg not in ret_msg:
ret_msg.append(msg)
if ret_flag:
return {"retcode": 0, "retmsg": "转移成功"}
else:
return {"retcode": 2, "retmsg": "、".join(ret_msg)}
@staticmethod
def __media_info(data):
"""
查询媒体信息
"""
mediaid = data.get("id")
mtype = data.get("type")
title = data.get("title")
year = data.get("year")
page = data.get("page")
rssid = data.get("rssid")
seasons = []
link_url = ""
vote_average = 0
poster_path = ""
release_date = ""
overview = ""
# 类型
if mtype in MovieTypes:
media_type = MediaType.MOVIE
else:
media_type = MediaType.TV
# 先取订阅信息
_subcribe = Subscribe()
_media = Media()
rssid_ok = False
if rssid:
rssid = str(rssid)
if media_type == MediaType.MOVIE:
rssinfo = _subcribe.get_subscribe_movies(rid=rssid)
else:
rssinfo = _subcribe.get_subscribe_tvs(rid=rssid)
if not rssinfo:
return {
"code": 1,
"retmsg": "无法查询到订阅信息",
"rssid": rssid,
"type_str": media_type.value
}
overview = rssinfo[rssid].get("overview")
poster_path = rssinfo[rssid].get("poster")
title = rssinfo[rssid].get("name")
vote_average = rssinfo[rssid].get("vote")
year = rssinfo[rssid].get("year")
release_date = rssinfo[rssid].get("release_date")
link_url = _media.get_detail_url(mtype=media_type,
tmdbid=rssinfo[rssid].get("tmdbid"))
if overview and poster_path:
rssid_ok = True
# 订阅信息不足
if not rssid_ok:
if mediaid:
media = WebUtils.get_mediainfo_from_id(
mtype=media_type, mediaid=mediaid)
else:
media = _media.get_media_info(
title=f"{title} {year}", mtype=media_type)
if not media or not media.tmdb_info:
return {
"code": 1,
"retmsg": "无法查询到TMDB信息",
"rssid": rssid,
"type_str": media_type.value
}
if not mediaid:
mediaid = media.tmdb_id
link_url = media.get_detail_url()
overview = media.overview
poster_path = media.get_poster_image()
title = media.title
vote_average = round(float(media.vote_average or 0), 1)
year = media.year
if media_type != MediaType.MOVIE:
release_date = media.tmdb_info.get('first_air_date')
seasons = [{
"text": "第%s季" % cn2an.an2cn(season.get("season_number"), mode='low'),
"num": season.get("season_number")} for season in
_media.get_tmdb_tv_seasons(tv_info=media.tmdb_info)]
else:
release_date = media.tmdb_info.get('release_date')
# 查订阅信息
if not rssid:
rssid = _subcribe.get_subscribe_id(mtype=media_type,
title=title,
tmdbid=mediaid)
return {
"code": 0,
"type": mtype,
"type_str": media_type.value,
"page": page,
"title": title,
"vote_average": vote_average,
"poster_path": poster_path,
"release_date": release_date,
"year": year,
"overview": overview,
"link_url": link_url,
"tmdbid": mediaid,
"rssid": rssid,
"seasons": seasons
}
@staticmethod
def __test_connection(data):
"""
测试连通性
"""
# 支持两种传入方式:命令数组或单个命令,单个命令时xx|xx模式解析为模块和类,进行动态引入
command = data.get("command")
ret = None
if command:
try:
module_obj = None
if isinstance(command, list):
for cmd_str in command:
ret = eval(cmd_str)
if not ret:
break
else:
if command.find("|") != -1:
module = command.split("|")[0]
class_name = command.split("|")[1]
module_obj = getattr(
importlib.import_module(module), class_name)()
if hasattr(module_obj, "init_config"):
module_obj.init_config()
ret = module_obj.get_status()
else:
ret = eval(command)
# 重载配置
Config().init_config()
if module_obj:
if hasattr(module_obj, "init_config"):
module_obj.init_config()
except Exception as e:
ret = None
ExceptionUtils.exception_traceback(e)
return {"code": 0 if ret else 1}
return {"code": 0}
@staticmethod
def __user_manager(data):
"""
用户管理
"""
oper = data.get("oper")
name = data.get("name")
if oper == "add":
password = generate_password_hash(str(data.get("password")))
pris = data.get("pris")
if isinstance(pris, list):
pris = ",".join(pris)
ret = ProUser().add_user(name, password, pris)
else:
ret = ProUser().delete_user(name)
if ret == 1 or ret:
return {"code": 0, "success": False}
return {"code": -1, "success": False, 'message': '操作失败'}
@staticmethod
def __refresh_rss(data):
"""
重新搜索RSS
"""
mtype = data.get("type")
rssid = data.get("rssid")
page = data.get("page")
if mtype == "MOV":
ThreadHelper().start_thread(Subscribe().subscribe_search_movie, (rssid,))
else:
ThreadHelper().start_thread(Subscribe().subscribe_search_tv, (rssid,))
return {"code": 0, "page": page}
@staticmethod
def get_system_message(lst_time):
messages = MessageCenter().get_system_messages(lst_time=lst_time)
if messages:
lst_time = messages[0].get("time")
return {
"code": 0,
"message": messages,
"lst_time": lst_time
}
@staticmethod
def __delete_tmdb_cache(data):
"""
删除tmdb缓存
"""
if MetaHelper().delete_meta_data(data.get("cache_key")):
MetaHelper().save_meta_data()
return {"code": 0}
@staticmethod
def __movie_calendar_data(data):
"""
查询电影上映日期
"""
tid = data.get("id")
rssid = data.get("rssid")
if tid and tid.startswith("DB:"):
doubanid = tid.replace("DB:", "")
douban_info = DouBan().get_douban_detail(
doubanid=doubanid, mtype=MediaType.MOVIE)
if not douban_info:
return {"code": 1, "retmsg": "无法查询到豆瓣信息"}
poster_path = douban_info.get("cover_url") or ""
title = douban_info.get("title")
rating = douban_info.get("rating", {}) or {}
vote_average = rating.get("value") or "无"
release_date = douban_info.get("pubdate")
if release_date:
release_date = re.sub(
r"\(.*\)", "", douban_info.get("pubdate")[0])
if not release_date:
return {"code": 1, "retmsg": "上映日期不正确"}
else:
return {"code": 0,
"type": "电影",
"title": title,
"start": release_date,
"id": tid,
"year": release_date[0:4] if release_date else "",
"poster": poster_path,
"vote_average": vote_average,
"rssid": rssid
}
else:
if tid:
tmdb_info = Media().get_tmdb_info(mtype=MediaType.MOVIE, tmdbid=tid)
else:
return {"code": 1, "retmsg": "没有TMDBID信息"}
if not tmdb_info:
return {"code": 1, "retmsg": "无法查询到TMDB信息"}
poster_path = Config().get_tmdbimage_url(tmdb_info.get('poster_path')) \
if tmdb_info.get('poster_path') else ""
title = tmdb_info.get('title')
vote_average = tmdb_info.get("vote_average")
release_date = tmdb_info.get('release_date')
if not release_date:
return {"code": 1, "retmsg": "上映日期不正确"}
else:
return {"code": 0,
"type": "电影",
"title": title,
"start": release_date,
"id": tid,
"year": release_date[0:4] if release_date else "",
"poster": poster_path,
"vote_average": vote_average,
"rssid": rssid
}
@staticmethod
def __tv_calendar_data(data):
"""
查询电视剧上映日期
"""
tid = data.get("id")
season = data.get("season")
name = data.get("name")
rssid = data.get("rssid")
if tid and tid.startswith("DB:"):
doubanid = tid.replace("DB:", "")
douban_info = DouBan().get_douban_detail(doubanid=doubanid, mtype=MediaType.TV)
if not douban_info:
return {"code": 1, "retmsg": "无法查询到豆瓣信息"}
poster_path = douban_info.get("cover_url") or ""
title = douban_info.get("title")
rating = douban_info.get("rating", {}) or {}
vote_average = rating.get("value") or "无"
release_date = re.sub(r"\(.*\)", "", douban_info.get("pubdate")[0])
if not release_date:
return {"code": 1, "retmsg": "上映日期不正确"}
else:
return {
"code": 0,
"events": [{
"type": "电视剧",
"title": title,
"start": release_date,
"id": tid,
"year": release_date[0:4] if release_date else "",
"poster": poster_path,
"vote_average": vote_average,
"rssid": rssid
}]
}
else:
if tid:
tmdb_info = Media().get_tmdb_tv_season_detail(tmdbid=tid, season=season)
else:
return {"code": 1, "retmsg": "没有TMDBID信息"}
if not tmdb_info:
return {"code": 1, "retmsg": "无法查询到TMDB信息"}
episode_events = []
air_date = tmdb_info.get("air_date")
if not tmdb_info.get("poster_path"):
tv_tmdb_info = Media().get_tmdb_info(mtype=MediaType.TV, tmdbid=tid)
if tv_tmdb_info:
poster_path = Config().get_tmdbimage_url(tv_tmdb_info.get('poster_path'))
else:
poster_path = ""
else:
poster_path = Config().get_tmdbimage_url(tmdb_info.get('poster_path'))
year = air_date[0:4] if air_date else ""
for episode in tmdb_info.get("episodes"):
episode_events.append({
"type": "剧集",
"title": "%s 第%s季第%s集" % (
name,
season,
episode.get("episode_number")
) if season != 1 else "%s 第%s集" % (
name,
episode.get("episode_number")
),
"start": episode.get("air_date"),
"id": tid,
"year": year,
"poster": poster_path,
"vote_average": episode.get("vote_average") or "无",
"rssid": rssid
})
return {"code": 0, "events": episode_events}
@staticmethod
def __rss_detail(data):
rid = data.get("rssid")
mtype = data.get("rsstype")
if mtype in MovieTypes:
rssdetail = Subscribe().get_subscribe_movies(rid=rid)
if not rssdetail:
return {"code": 1}
rssdetail = list(rssdetail.values())[0]
rssdetail["type"] = "MOV"
else:
rssdetail = Subscribe().get_subscribe_tvs(rid=rid)
if not rssdetail:
return {"code": 1}
rssdetail = list(rssdetail.values())[0]
rssdetail["type"] = "TV"
return {"code": 0, "detail": rssdetail}
@staticmethod
def __modify_tmdb_cache(data):
"""
修改TMDB缓存的标题
"""
if MetaHelper().modify_meta_data(data.get("key"), data.get("title")):
MetaHelper().save_meta_data(force=True)
return {"code": 0}
@staticmethod
def truncate_blacklist():
"""
清空文件转移黑名单记录
"""
FileTransfer().truncate_transfer_blacklist()
return {"code": 0}
@staticmethod
def truncate_rsshistory():
"""
清空RSS历史记录
"""
RssHelper().truncate_rss_history()
Subscribe().truncate_rss_episodes()
return {"code": 0}
@staticmethod
def __add_brushtask(data):
"""
新增刷流任务
"""
# 输入值
brushtask_id = data.get("brushtask_id")
brushtask_name = data.get("brushtask_name")
brushtask_site = data.get("brushtask_site")
brushtask_interval = data.get("brushtask_interval")
brushtask_downloader = data.get("brushtask_downloader")
brushtask_totalsize = data.get("brushtask_totalsize")
brushtask_state = data.get("brushtask_state")
brushtask_rssurl = data.get("brushtask_rssurl")
brushtask_label = data.get("brushtask_label")
brushtask_up_limit = data.get("brushtask_up_limit")
brushtask_dl_limit = data.get("brushtask_dl_limit")
brushtask_savepath = data.get("brushtask_savepath")
brushtask_transfer = 'Y' if data.get("brushtask_transfer") else 'N'
brushtask_sendmessage = 'Y' if data.get(
"brushtask_sendmessage") else 'N'
brushtask_free = data.get("brushtask_free")
brushtask_hr = data.get("brushtask_hr")
brushtask_torrent_size = data.get("brushtask_torrent_size")
brushtask_include = data.get("brushtask_include")
brushtask_exclude = data.get("brushtask_exclude")
brushtask_dlcount = data.get("brushtask_dlcount")
brushtask_current_site_count = data.get("brushtask_current_site_count")
brushtask_current_site_dlcount = data.get("dl")
brushtask_peercount = data.get("brushtask_peercount")
brushtask_seedtime = data.get("brushtask_seedtime")
brushtask_seedratio = data.get("brushtask_seedratio")
brushtask_seedsize = data.get("brushtask_seedsize")
brushtask_dltime = data.get("brushtask_dltime")
brushtask_avg_upspeed = data.get("brushtask_avg_upspeed")
brushtask_iatime = data.get("brushtask_iatime")
brushtask_pubdate = data.get("brushtask_pubdate")
brushtask_upspeed = data.get("brushtask_upspeed")
brushtask_downspeed = data.get("brushtask_downspeed")
# 选种规则
rss_rule = {
"free": brushtask_free,
"hr": brushtask_hr,
"size": brushtask_torrent_size,
"include": brushtask_include,
"exclude": brushtask_exclude,
"dlcount": brushtask_dlcount,
"current_site_count": brushtask_current_site_count,
"current_site_dlcount": brushtask_current_site_dlcount,
"peercount": brushtask_peercount,
"pubdate": brushtask_pubdate,
"upspeed": brushtask_upspeed,
"downspeed": brushtask_downspeed
}
# 删除规则
remove_rule = {
"time": brushtask_seedtime,
"ratio": brushtask_seedratio,
"uploadsize": brushtask_seedsize,
"dltime": brushtask_dltime,
"avg_upspeed": brushtask_avg_upspeed,
"iatime": brushtask_iatime
}
# 添加记录
item = {
"name": brushtask_name,
"site": brushtask_site,
"free": brushtask_free,
"rssurl": brushtask_rssurl,
"interval": brushtask_interval,
"downloader": brushtask_downloader,
"seed_size": brushtask_totalsize,
"label": brushtask_label,
"up_limit": brushtask_up_limit,
"dl_limit": brushtask_dl_limit,
"savepath": brushtask_savepath,
"transfer": brushtask_transfer,
"state": brushtask_state,
"rss_rule": rss_rule,
"remove_rule": remove_rule,
"sendmessage": brushtask_sendmessage
}
BrushTask().update_brushtask(brushtask_id, item)
return {"code": 0}
@staticmethod
def __del_brushtask(data):
"""
删除刷流任务
"""
brush_id = data.get("id")
if brush_id:
BrushTask().delete_brushtask(brush_id)
return {"code": 0}
return {"code": 1}
@staticmethod
def __brushtask_detail(data):
"""
查询刷流任务详情
"""
brush_id = data.get("id")
brushtask = BrushTask().get_brushtask_info(brush_id)
if not brushtask:
return {"code": 1, "task": {}}
return {"code": 0, "task": brushtask}
@staticmethod
def __update_brushtask_state(data):
"""
批量暂停/开始刷流任务
"""
try:
state = data.get("state")
task_ids = data.get("ids")
_brushtask = BrushTask()
if state is not None:
if task_ids:
for tid in task_ids:
_brushtask.update_brushtask_state(state=state, brushtask_id=tid)
else:
_brushtask.update_brushtask_state(state=state)
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": "刷流任务设置失败"}
@staticmethod
def __brushtask_enable():
"""
刷流任务可用状态
"""
isBeyondOneMonth = SiteUserInfo().is_min_join_date_beyond_one_month()
return {"code": 0, "isBeyondOneMonth": isBeyondOneMonth}
def __name_test(self, data):
"""
名称识别测试
"""
name = data.get("name")
subtitle = data.get("subtitle")
if not name:
return {"code": -1}
media_info = Media().get_media_info(title=name, subtitle=subtitle)
if not media_info:
return {"code": 0, "data": {"name": "无法识别"}}
return {"code": 0, "data": self.mediainfo_dict(media_info)}
@staticmethod
def mediainfo_dict(media_info):
if not media_info:
return {}
tmdb_id = media_info.tmdb_id
tmdb_link = media_info.get_detail_url()
tmdb_S_E_link = ""
if tmdb_id:
if media_info.get_season_string():
tmdb_S_E_link = "%s/season/%s" % (tmdb_link,
media_info.get_season_seq())
if media_info.get_episode_string():
tmdb_S_E_link = "%s/episode/%s" % (
tmdb_S_E_link, media_info.get_episode_seq())
return {
"type": media_info.type.value if media_info.type else "",
"name": media_info.get_name(),
"title": media_info.title,
"year": media_info.year,
"season_episode": media_info.get_season_episode_string(),
"part": media_info.part,
"tmdbid": tmdb_id,
"tmdblink": tmdb_link,
"tmdb_S_E_link": tmdb_S_E_link,
"category": media_info.category,
"restype": media_info.resource_type,
"effect": media_info.resource_effect,
"pix": media_info.resource_pix,
"team": media_info.resource_team,
"customization": media_info.customization,
"video_codec": media_info.video_encode,
"audio_codec": media_info.audio_encode,
"org_string": media_info.org_string,
"rev_string": media_info.rev_string,
"ignored_words": media_info.ignored_words,
"replaced_words": media_info.replaced_words,
"offset_words": media_info.offset_words
}
@staticmethod
def __rule_test(data):
title = data.get("title")
subtitle = data.get("subtitle")
size = data.get("size")
rulegroup = data.get("rulegroup")
if not title:
return {"code": -1}
meta_info = MetaInfo(title=title, subtitle=subtitle)
meta_info.size = float(size) * 1024 ** 3 if size else 0
match_flag, res_order, match_msg = \
Filter().check_torrent_filter(meta_info=meta_info,
filter_args={"rule": rulegroup})
return {
"code": 0,
"flag": match_flag,
"text": "匹配" if match_flag else "未匹配",
"order": 100 - res_order if res_order else 0
}
@staticmethod
def __net_test(data):
target = data
if target == "image.tmdb.org":
target = target + "/t/p/w500/wwemzKWzjKYJFfCeiB57q3r4Bcm.png"
if target == "qyapi.weixin.qq.com":
target = target + "/cgi-bin/message/send"
target = "https://" + target
start_time = datetime.datetime.now()
if target.find("themoviedb") != -1 \
or target.find("telegram") != -1 \
or target.find("fanart") != -1 \
or target.find("tmdb") != -1:
res = RequestUtils(proxies=Config().get_proxies(), timeout=5).get_res(target)
else:
res = RequestUtils(timeout=5).get_res(target)
seconds = int((datetime.datetime.now() -
start_time).microseconds / 1000)
if not res:
return {"res": False, "time": "%s 毫秒" % seconds}
elif res.ok:
return {"res": True, "time": "%s 毫秒" % seconds}
else:
return {"res": False, "time": "%s 毫秒" % seconds}
@staticmethod
def __get_site_activity(data):
"""
查询site活动[上传,下载,魔力值]
:param data: {"name":site_name}
:return:
"""
if not data or "name" not in data:
return {"code": 1, "msg": "查询参数错误"}
resp = {"code": 0}
resp.update(
{"dataset": SiteUserInfo().get_pt_site_activity_history(data["name"])})
return resp
@staticmethod
def __get_site_history(data):
"""
查询site 历史[上传,下载]
:param data: {"days":累计时间}
:return:
"""
if not data or "days" not in data or not isinstance(data["days"], int):
return {"code": 1, "msg": "查询参数错误"}
resp = {"code": 0}
_, _, site, upload, download = SiteUserInfo().get_pt_site_statistics_history(
data["days"] + 1, data.get("end_day", None)
)
# 调整为dataset组织数据
dataset = [["site", "upload", "download"]]
dataset.extend([[site, upload, download]
for site, upload, download in zip(site, upload, download)])
resp.update({"dataset": dataset})
return resp
@staticmethod
def __get_site_seeding_info(data):
"""
查询site 做种分布信息 大小,做种数
:param data: {"name":site_name}
:return:
"""
if not data or "name" not in data:
return {"code": 1, "msg": "查询参数错误"}
resp = {"code": 0}
seeding_info = SiteUserInfo().get_pt_site_seeding_info(
data["name"]).get("seeding_info", [])
# 调整为dataset组织数据
dataset = [["seeders", "size"]]
dataset.extend(seeding_info)
resp.update({"dataset": dataset})
return resp
@staticmethod
def __add_filtergroup(data):
"""
新增规则组
"""
name = data.get("name")
default = data.get("default")
if not name:
return {"code": -1}
Filter().add_group(name, default)
return {"code": 0}
@staticmethod
def __restore_filtergroup(data):
"""
恢复初始规则组
"""
groupids = data.get("groupids")
init_rulegroups = data.get("init_rulegroups")
_filter = Filter()
for groupid in groupids:
try:
_filter.delete_filtergroup(groupid)
except Exception as err:
ExceptionUtils.exception_traceback(err)
for init_rulegroup in init_rulegroups:
if str(init_rulegroup.get("id")) == groupid:
for sql in init_rulegroup.get("sql"):
DbHelper().excute(sql)
return {"code": 0}
@staticmethod
def __set_default_filtergroup(data):
groupid = data.get("id")
if not groupid:
return {"code": -1}
Filter().set_default_filtergroup(groupid)
return {"code": 0}
@staticmethod
def __del_filtergroup(data):
groupid = data.get("id")
Filter().delete_filtergroup(groupid)
return {"code": 0}
@staticmethod
def __add_filterrule(data):
rule_id = data.get("rule_id")
item = {
"group": data.get("group_id"),
"name": data.get("rule_name"),
"pri": data.get("rule_pri"),
"include": data.get("rule_include"),
"exclude": data.get("rule_exclude"),
"size": data.get("rule_sizelimit"),
"free": data.get("rule_free")
}
Filter().add_filter_rule(ruleid=rule_id, item=item)
return {"code": 0}
@staticmethod
def __del_filterrule(data):
ruleid = data.get("id")
Filter().delete_filterrule(ruleid)
return {"code": 0}
@staticmethod
def __filterrule_detail(data):
rid = data.get("ruleid")
groupid = data.get("groupid")
ruleinfo = Filter().get_rules(groupid=groupid, ruleid=rid)
if ruleinfo:
ruleinfo['include'] = "\n".join(ruleinfo.get("include"))
ruleinfo['exclude'] = "\n".join(ruleinfo.get("exclude"))
return {"code": 0, "info": ruleinfo}
def get_recommend(self, data):
Type = data.get("type")
SubType = data.get("subtype")
CurrentPage = data.get("page")
if not CurrentPage:
CurrentPage = 1
else:
CurrentPage = int(CurrentPage)
res_list = []
if Type in ['MOV', 'TV', 'ALL']:
if SubType == "hm":
# TMDB热门电影
res_list = Media().get_tmdb_hot_movies(CurrentPage)
elif SubType == "ht":
# TMDB热门电视剧
res_list = Media().get_tmdb_hot_tvs(CurrentPage)
elif SubType == "nm":
# TMDB最新电影
res_list = Media().get_tmdb_new_movies(CurrentPage)
elif SubType == "nt":
# TMDB最新电视剧
res_list = Media().get_tmdb_new_tvs(CurrentPage)
elif SubType == "dbom":
# 豆瓣正在上映
res_list = DouBan().get_douban_online_movie(CurrentPage)
elif SubType == "dbhm":
# 豆瓣热门电影
res_list = DouBan().get_douban_hot_movie(CurrentPage)
elif SubType == "dbht":
# 豆瓣热门电视剧
res_list = DouBan().get_douban_hot_tv(CurrentPage)
elif SubType == "dbdh":
# 豆瓣热门动画
res_list = DouBan().get_douban_hot_anime(CurrentPage)
elif SubType == "dbnm":
# 豆瓣最新电影
res_list = DouBan().get_douban_new_movie(CurrentPage)
elif SubType == "dbtop":
# 豆瓣TOP250电影
res_list = DouBan().get_douban_top250_movie(CurrentPage)
elif SubType == "dbzy":
# 豆瓣热门综艺
res_list = DouBan().get_douban_hot_show(CurrentPage)
elif SubType == "dbct":
# 华语口碑剧集榜
res_list = DouBan().get_douban_chinese_weekly_tv(CurrentPage)
elif SubType == "dbgt":
# 全球口碑剧集榜
res_list = DouBan().get_douban_weekly_tv_global(CurrentPage)
elif SubType == "sim":
# 相似推荐
TmdbId = data.get("tmdbid")
res_list = self.__media_similar({
"tmdbid": TmdbId,
"page": CurrentPage,
"type": Type
}).get("data")
elif SubType == "more":
# 更多推荐
TmdbId = data.get("tmdbid")
res_list = self.__media_recommendations({
"tmdbid": TmdbId,
"page": CurrentPage,
"type": Type
}).get("data")
elif SubType == "person":
# 人物作品
PersonId = data.get("personid")
res_list = self.__person_medias({
"personid": PersonId,
"type": None if Type == 'ALL' else Type,
"page": CurrentPage
}).get("data")
elif SubType == "bangumi":
# Bangumi每日放送
Week = data.get("week")
res_list = Bangumi().get_bangumi_calendar(page=CurrentPage, week=Week)
elif Type == "SEARCH":
# 搜索词条
Keyword = data.get("keyword")
Source = data.get("source")
medias = WebUtils.search_media_infos(
keyword=Keyword, source=Source, page=CurrentPage)
res_list = [media.to_dict() for media in medias]
elif Type == "DOWNLOADED":
# 近期下载
res_list = self.get_downloaded({
"page": CurrentPage
}).get("Items")
elif Type == "TRENDING":
# TMDB流行趋势
res_list = Media().get_tmdb_trending_all_week(page=CurrentPage)
elif Type == "DISCOVER":
# TMDB发现
mtype = MediaType.MOVIE if SubType in MovieTypes else MediaType.TV
# 过滤参数 with_genres with_original_language
params = data.get("params") or {}
res_list = Media().get_tmdb_discover(mtype=mtype, page=CurrentPage, params=params)
elif Type == "DOUBANTAG":
# 豆瓣发现
mtype = MediaType.MOVIE if SubType in MovieTypes else MediaType.TV
# 参数
params = data.get("params") or {}
# 排序
sort = params.get("sort") or "R"
# 选中的分类
tags = params.get("tags") or ""
# 过滤参数
res_list = DouBan().get_douban_disover(mtype=mtype,
sort=sort,
tags=tags,
page=CurrentPage)
# 补充存在与订阅状态
for res in res_list:
fav, rssid, item_url = self.get_media_exists_info(mtype=res.get("type"),
title=res.get("title"),
year=res.get("year"),
mediaid=res.get("id"))
res.update({
'fav': fav,
'rssid': rssid
})
return {"code": 0, "Items": res_list}
@staticmethod
def get_downloaded(data):
page = data.get("page")
Items = Downloader().get_download_history(page=page)
if Items:
return {"code": 0, "Items": [{
'id': item.TMDBID,
'orgid': item.TMDBID,
'tmdbid': item.TMDBID,
'title': item.TITLE,
'type': 'MOV' if item.TYPE == "电影" else "TV",
'media_type': item.TYPE,
'year': item.YEAR,
'vote': item.VOTE,
'image': item.POSTER,
'overview': item.TORRENT,
"date": item.DATE,
"site": item.SITE
} for item in Items]}
else:
return {"code": 0, "Items": []}
@staticmethod
def parse_brush_rule_string(rules: dict):
if not rules:
return ""
rule_filter_string = {"gt": ">", "lt": "<", "bw": ""}
rule_htmls = []
if rules.get("size"):
sizes = rules.get("size").split("#")
if sizes[0]:
if sizes[1]:
sizes[1] = sizes[1].replace(",", "-")
rule_htmls.append(
'<span class="badge badge-outline text-blue me-1 mb-1" title="种子大小">种子大小: %s %sGB</span>'
% (rule_filter_string.get(sizes[0]), sizes[1]))
if rules.get("pubdate"):
pubdates = rules.get("pubdate").split("#")
if pubdates[0]:
if pubdates[1]:
pubdates[1] = pubdates[1].replace(",", "-")
rule_htmls.append(
'<span class="badge badge-outline text-blue me-1 mb-1" title="发布时间">发布时间: %s %s小时</span>'
% (rule_filter_string.get(pubdates[0]), pubdates[1]))
if rules.get("upspeed"):
rule_htmls.append('<span class="badge badge-outline text-blue me-1 mb-1" title="上传限速">上传限速: %sB/s</span>'
% StringUtils.str_filesize(int(rules.get("upspeed")) * 1024))
if rules.get("downspeed"):
rule_htmls.append('<span class="badge badge-outline text-blue me-1 mb-1" title="下载限速">下载限速: %sB/s</span>'
% StringUtils.str_filesize(int(rules.get("downspeed")) * 1024))
if rules.get("include"):
rule_htmls.append(
'<span class="badge badge-outline text-green me-1 mb-1 text-wrap text-start" title="包含规则">包含: %s</span>'
% rules.get("include"))
if rules.get("hr"):
rule_htmls.append(
'<span class="badge badge-outline text-red me-1 mb-1" title="排除HR">排除: HR</span>')
if rules.get("exclude"):
rule_htmls.append(
'<span class="badge badge-outline text-red me-1 mb-1 text-wrap text-start" title="排除规则">排除: %s</span>'
% rules.get("exclude"))
if rules.get("dlcount"):
rule_htmls.append('<span class="badge badge-outline text-blue me-1 mb-1" title="同时下载数量限制">同时下载: %s</span>'
% rules.get("dlcount"))
if rules.get("peercount"):
peer_counts = None
if rules.get("peercount") == "#":
peer_counts = None
elif "#" in rules.get("peercount"):
peer_counts = rules.get("peercount").split("#")
peer_counts[1] = peer_counts[1].replace(",", "-") if (len(peer_counts) >= 2 and peer_counts[1]) else \
peer_counts[1]
else:
try:
# 兼容性代码
peer_counts = ["lt", int(rules.get("peercount"))]
except Exception as err:
ExceptionUtils.exception_traceback(err)
pass
if peer_counts:
rule_htmls.append(
'<span class="badge badge-outline text-blue me-1 mb-1" title="当前做种人数限制">做种人数: %s %s</span>'
% (rule_filter_string.get(peer_counts[0]), peer_counts[1]))
if rules.get("time"):
times = rules.get("time").split("#")
if times[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="做种时间">做种时间: %s %s小时</span>'
% (rule_filter_string.get(times[0]), times[1]))
if rules.get("ratio"):
ratios = rules.get("ratio").split("#")
if ratios[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="分享率">分享率: %s %s</span>'
% (rule_filter_string.get(ratios[0]), ratios[1]))
if rules.get("uploadsize"):
uploadsizes = rules.get("uploadsize").split("#")
if uploadsizes[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="上传量">上传量: %s %sGB</span>'
% (rule_filter_string.get(uploadsizes[0]), uploadsizes[1]))
if rules.get("dltime"):
dltimes = rules.get("dltime").split("#")
if dltimes[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="下载耗时">下载耗时: %s %s小时</span>'
% (rule_filter_string.get(dltimes[0]), dltimes[1]))
if rules.get("avg_upspeed"):
avg_upspeeds = rules.get("avg_upspeed").split("#")
if avg_upspeeds[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="平均上传速度">平均上传速度: %s %sKB/S</span>'
% (rule_filter_string.get(avg_upspeeds[0]), avg_upspeeds[1]))
if rules.get("iatime"):
iatimes = rules.get("iatime").split("#")
if iatimes[0]:
rule_htmls.append(
'<span class="badge badge-outline text-orange me-1 mb-1" title="未活动时间">未活动时间: %s %s小时</span>'
% (rule_filter_string.get(iatimes[0]), iatimes[1]))
return "<br>".join(rule_htmls)
@staticmethod
def __clear_tmdb_cache():
"""
清空TMDB缓存
"""
try:
MetaHelper().clear_meta_data()
os.remove(MetaHelper().get_meta_data_path())
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 0, "msg": str(e)}
return {"code": 0}
@staticmethod
def __check_site_attr(data):
"""
检查站点标识
"""
site_attr = SiteConf().get_grap_conf(data.get("url"))
site_free = site_2xfree = site_hr = False
if site_attr.get("FREE"):
site_free = True
if site_attr.get("2XFREE"):
site_2xfree = True
if site_attr.get("HR"):
site_hr = True
return {"code": 0, "site_free": site_free, "site_2xfree": site_2xfree, "site_hr": site_hr}
@staticmethod
def refresh_process(data):
"""
刷新进度条
"""
detail = ProgressHelper().get_process(data.get("type"))
if detail:
return {"code": 0, "value": detail.get("value"), "text": detail.get("text")}
else:
return {"code": 1, "value": 0, "text": "正在处理..."}
@staticmethod
def __restory_backup(data):
"""
解压恢复备份文件
"""
filename = data.get("file_name")
if filename:
config_path = Config().get_config_path()
temp_path = Config().get_temp_path()
file_path = os.path.join(temp_path, filename)
try:
shutil.unpack_archive(file_path, config_path, format='zip')
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
finally:
if os.path.exists(file_path):
os.remove(file_path)
return {"code": 1, "msg": "文件不存在"}
@staticmethod
def __get_resume(data):
"""
获得继续观看
"""
num = data.get("num") or 12
# 实测,plex 似乎无法按照数目返回,此处手动切片
return {"code": 0, "list": MediaServer().get_resume(num)[0:num]}
@staticmethod
def __start_mediasync(data):
"""
开始媒体库同步
"""
librarys = data.get("librarys") or []
SystemConfig().set(key=SystemConfigKey.SyncLibrary, value=librarys)
ThreadHelper().start_thread(MediaServer().sync_mediaserver, ())
return {"code": 0}
@staticmethod
def __start_mediaDisplayModuleSync(data):
"""
开始媒体库同步
"""
selectedData = data.get("selected") or []
unselectedData = data.get("unselected") or []
try:
selectedModules = [ast.literal_eval(item) for item in selectedData]
if selectedModules:
for module in selectedModules:
module["selected"] = True
unselectedModules = [ast.literal_eval(item) for item in unselectedData]
if unselectedModules:
for module in unselectedModules:
module["selected"] = False
modules = selectedModules + unselectedModules
sorted_modules = sorted(modules, key=lambda x: x["id"])
sorted_modules_str = json.dumps(sorted_modules, ensure_ascii=False, indent=4)
log.debug(f"【我的媒体库】元数据: {sorted_modules_str}")
SystemConfig().set(key=SystemConfigKey.LibraryDisplayModule, value=sorted_modules)
return {"code": 0}
except Exception as e:
return {"code": 1}
@staticmethod
def __mediasync_state():
"""
获取媒体库同步数据情况
"""
status = MediaServer().get_mediasync_status()
if not status:
return {"code": 0, "text": "未同步"}
else:
return {"code": 0, "text": "电影:%s,电视剧:%s,同步时间:%s" %
(status.get("movie_count"),
status.get("tv_count"),
status.get("time"))}
@staticmethod
def __get_tvseason_list(data):
"""
获取剧集季列表
"""
tmdbid = data.get("tmdbid")
title = data.get("title")
if title:
title_season = MetaInfo(title=title).begin_season
else:
title_season = None
if not str(tmdbid).isdigit():
media_info = WebUtils.get_mediainfo_from_id(mtype=MediaType.TV,
mediaid=tmdbid)
season_infos = Media().get_tmdb_tv_seasons(media_info.tmdb_info)
else:
season_infos = Media().get_tmdb_tv_seasons_byid(tmdbid=tmdbid)
if title_season:
seasons = [
{
"text": "第%s季" % title_season,
"num": title_season
}
]
else:
seasons = [
{
"text": "第%s季" % cn2an.an2cn(season.get("season_number"), mode='low'),
"num": season.get("season_number")
}
for season in season_infos
]
return {"code": 0, "seasons": seasons}
@staticmethod
def __get_userrss_task(data):
"""
获取自定义订阅详情
"""
taskid = data.get("id")
return {"code": 0, "detail": RssChecker().get_rsstask_info(taskid=taskid)}
@staticmethod
def __delete_userrss_task(data):
"""
删除自定义订阅
"""
if RssChecker().delete_userrss_task(data.get("id")):
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __update_userrss_task(data):
"""
新增或修改自定义订阅
"""
uses = data.get("uses")
address_parser = data.get("address_parser")
if not address_parser:
return {"code": 1}
address = list(dict(sorted(
{k.replace("address_", ""): y for k, y in address_parser.items() if k.startswith("address_")}.items(),
key=lambda x: int(x[0])
)).values())
parser = list(dict(sorted(
{k.replace("parser_", ""): y for k, y in address_parser.items() if k.startswith("parser_")}.items(),
key=lambda x: int(x[0])
)).values())
params = {
"id": data.get("id"),
"name": data.get("name"),
"address": address,
"parser": parser,
"interval": data.get("interval"),
"uses": uses,
"include": data.get("include"),
"exclude": data.get("exclude"),
"filter_rule": data.get("rule"),
"state": data.get("state"),
"save_path": data.get("save_path"),
"download_setting": data.get("download_setting"),
"note": {"proxy": data.get("proxy")},
}
if uses == "D":
params.update({
"recognization": data.get("recognization")
})
elif uses == "R":
params.update({
"over_edition": data.get("over_edition"),
"sites": data.get("sites"),
"filter_args": {
"restype": data.get("restype"),
"pix": data.get("pix"),
"team": data.get("team")
}
})
else:
return {"code": 1}
if RssChecker().update_userrss_task(params):
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __check_userrss_task(data):
"""
检测自定义订阅
"""
try:
flag_dict = {"enable": True, "disable": False}
taskids = data.get("ids")
state = flag_dict.get(data.get("flag"))
_rsschecker = RssChecker()
if state is not None:
if taskids:
for taskid in taskids:
_rsschecker.check_userrss_task(tid=taskid, state=state)
else:
_rsschecker.check_userrss_task(state=state)
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": "自定义订阅状态设置失败"}
@staticmethod
def __get_rssparser(data):
"""
获取订阅解析器详情
"""
pid = data.get("id")
return {"code": 0, "detail": RssChecker().get_userrss_parser(pid=pid)}
@staticmethod
def __delete_rssparser(data):
"""
删除订阅解析器
"""
if RssChecker().delete_userrss_parser(data.get("id")):
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __update_rssparser(data):
"""
新增或更新订阅解析器
"""
params = {
"id": data.get("id"),
"name": data.get("name"),
"type": data.get("type"),
"format": data.get("format"),
"params": data.get("params")
}
if RssChecker().update_userrss_parser(params):
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __run_userrss(data):
RssChecker().check_task_rss(data.get("id"))
return {"code": 0}
@staticmethod
def __run_brushtask(data):
BrushTask().check_task_rss(data.get("id"))
return {"code": 0}
@staticmethod
def list_site_resources(data):
resources = Indexer().list_resources(url=data.get("site"),
page=data.get("page"),
keyword=data.get("keyword"))
if not resources:
return {"code": 1, "msg": "获取站点资源出现错误,无法连接到站点!"}
else:
return {"code": 0, "data": resources}
@staticmethod
def __list_rss_articles(data):
task_info = RssChecker().get_rsstask_info(taskid=data.get("id"))
uses = task_info.get("uses")
address_count = len(task_info.get("address"))
articles = RssChecker().get_rss_articles(data.get("id"))
count = len(articles)
if articles:
return {"code": 0, "data": articles, "count": count, "uses": uses, "address_count": address_count}
else:
return {"code": 1, "msg": "未获取到报文"}
def __rss_article_test(self, data):
taskid = data.get("taskid")
title = data.get("title")
if not taskid:
return {"code": -1}
if not title:
return {"code": -1}
media_info, match_flag, exist_flag = RssChecker(
).test_rss_articles(taskid=taskid, title=title)
if not media_info:
return {"code": 0, "data": {"name": "无法识别"}}
media_dict = self.mediainfo_dict(media_info)
media_dict.update({"match_flag": match_flag, "exist_flag": exist_flag})
return {"code": 0, "data": media_dict}
@staticmethod
def __list_rss_history(data):
downloads = []
historys = RssChecker().get_userrss_task_history(data.get("id"))
count = len(historys)
for history in historys:
params = {
"title": history.TITLE,
"downloader": history.DOWNLOADER,
"date": history.DATE
}
downloads.append(params)
if downloads:
return {"code": 0, "data": downloads, "count": count}
else:
return {"code": 1, "msg": "无下载记录"}
@staticmethod
def __rss_articles_check(data):
if not data.get("articles"):
return {"code": 2}
res = RssChecker().check_rss_articles(
taskid=data.get("taskid"),
flag=data.get("flag"),
articles=data.get("articles")
)
if res:
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __rss_articles_download(data):
if not data.get("articles"):
return {"code": 2}
res = RssChecker().download_rss_articles(
taskid=data.get("taskid"), articles=data.get("articles"))
if res:
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __add_custom_word_group(data):
try:
tmdb_id = data.get("tmdb_id")
tmdb_type = data.get("tmdb_type")
_wordshelper = WordsHelper()
_media = Media()
if tmdb_type == "tv":
if not _wordshelper.is_custom_word_group_existed(tmdbid=tmdb_id, gtype=2):
tmdb_info = _media.get_tmdb_info(mtype=MediaType.TV, tmdbid=tmdb_id)
if not tmdb_info:
return {"code": 1, "msg": "添加失败,无法查询到TMDB信息"}
_wordshelper.insert_custom_word_groups(title=tmdb_info.get("name"),
year=tmdb_info.get(
"first_air_date")[0:4],
gtype=2,
tmdbid=tmdb_id,
season_count=tmdb_info.get("number_of_seasons"))
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词组(TMDB ID)已存在"}
elif tmdb_type == "movie":
if not _wordshelper.is_custom_word_group_existed(tmdbid=tmdb_id, gtype=1):
tmdb_info = _media.get_tmdb_info(mtype=MediaType.MOVIE, tmdbid=tmdb_id)
if not tmdb_info:
return {"code": 1, "msg": "添加失败,无法查询到TMDB信息"}
_wordshelper.insert_custom_word_groups(title=tmdb_info.get("title"),
year=tmdb_info.get(
"release_date")[0:4],
gtype=1,
tmdbid=tmdb_id,
season_count=0)
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词组(TMDB ID)已存在"}
else:
return {"code": 1, "msg": "无法识别媒体类型"}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __delete_custom_word_group(data):
try:
gid = data.get("gid")
WordsHelper().delete_custom_word_group(gid=gid)
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __add_or_edit_custom_word(data):
try:
wid = data.get("id")
gid = data.get("gid")
group_type = data.get("group_type")
replaced = data.get("new_replaced")
replace = data.get("new_replace")
front = data.get("new_front")
back = data.get("new_back")
offset = data.get("new_offset")
whelp = data.get("new_help")
wtype = data.get("type")
season = data.get("season")
enabled = data.get("enabled")
regex = data.get("regex")
_wordshelper = WordsHelper()
# 集数偏移格式检查
if wtype in ["3", "4"]:
if not re.findall(r'EP', offset):
return {"code": 1, "msg": "偏移集数格式有误"}
if re.findall(r'(?!-|\+|\*|/|[0-9]).', re.sub(r'EP', "", offset)):
return {"code": 1, "msg": "偏移集数格式有误"}
if wid:
_wordshelper.delete_custom_word(wid=wid)
# 电影
if group_type == "1":
season = -2
# 屏蔽
if wtype == "1":
if not _wordshelper.is_custom_words_existed(replaced=replaced):
_wordshelper.insert_custom_word(replaced=replaced,
replace="",
front="",
back="",
offset="",
wtype=wtype,
gid=gid,
season=season,
enabled=enabled,
regex=regex,
whelp=whelp if whelp else "")
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
# 替换
elif wtype == "2":
if not _wordshelper.is_custom_words_existed(replaced=replaced):
_wordshelper.insert_custom_word(replaced=replaced,
replace=replace,
front="",
back="",
offset="",
wtype=wtype,
gid=gid,
season=season,
enabled=enabled,
regex=regex,
whelp=whelp if whelp else "")
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
# 集偏移
elif wtype == "4":
if not _wordshelper.is_custom_words_existed(front=front, back=back):
_wordshelper.insert_custom_word(replaced="",
replace="",
front=front,
back=back,
offset=offset,
wtype=wtype,
gid=gid,
season=season,
enabled=enabled,
regex=regex,
whelp=whelp if whelp else "")
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词已存在\n(前后定位词:%s@%s)" % (front, back)}
# 替换+集偏移
elif wtype == "3":
if not _wordshelper.is_custom_words_existed(replaced=replaced):
_wordshelper.insert_custom_word(replaced=replaced,
replace=replace,
front=front,
back=back,
offset=offset,
wtype=wtype,
gid=gid,
season=season,
enabled=enabled,
regex=regex,
whelp=whelp if whelp else "")
return {"code": 0, "msg": ""}
else:
return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
else:
return {"code": 1, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __get_custom_word(data):
try:
wid = data.get("wid")
word_info = WordsHelper().get_custom_words(wid=wid)
if word_info:
word_info = word_info[0]
word = {"id": word_info.ID,
"replaced": word_info.REPLACED,
"replace": word_info.REPLACE,
"front": word_info.FRONT,
"back": word_info.BACK,
"offset": word_info.OFFSET,
"type": word_info.TYPE,
"group_id": word_info.GROUP_ID,
"season": word_info.SEASON,
"enabled": word_info.ENABLED,
"regex": word_info.REGEX,
"help": word_info.HELP, }
else:
word = {}
return {"code": 0, "data": word}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": "查询识别词失败"}
@staticmethod
def __delete_custom_words(data):
try:
_wordshelper = WordsHelper()
ids_info = data.get("ids_info")
if not ids_info:
_wordshelper.delete_custom_word()
else:
ids = [id_info.split("_")[1] for id_info in ids_info]
for wid in ids:
_wordshelper.delete_custom_word(wid=wid)
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __check_custom_words(data):
try:
flag_dict = {"enable": 1, "disable": 0}
ids_info = data.get("ids_info")
enabled = flag_dict.get(data.get("flag"))
_wordshelper = WordsHelper()
if not ids_info:
_wordshelper.check_custom_word(enabled=enabled)
else:
ids = [id_info.split("_")[1] for id_info in ids_info]
for wid in ids:
_wordshelper.check_custom_word(wid=wid, enabled=enabled)
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": "识别词状态设置失败"}
@staticmethod
def __export_custom_words(data):
try:
note = data.get("note")
ids_info = data.get("ids_info")
group_ids = []
word_ids = []
group_infos = []
word_infos = []
_wordshelper = WordsHelper()
if ids_info:
ids_info = ids_info.split("@")
for id_info in ids_info:
wid = id_info.split("_")
group_ids.append(wid[0])
word_ids.append(wid[1])
for group_id in group_ids:
if group_id != "-1":
group_info = _wordshelper.get_custom_word_groups(gid=group_id)
if group_info:
group_infos.append(group_info[0])
for word_id in word_ids:
word_info = _wordshelper.get_custom_words(wid=word_id)
if word_info:
word_infos.append(word_info[0])
else:
group_infos = _wordshelper.get_custom_word_groups()
word_infos = _wordshelper.get_custom_words()
export_dict = {}
if not group_ids or "-1" in group_ids:
export_dict["-1"] = {"id": -1,
"title": "通用",
"type": 1,
"words": {}, }
for group_info in group_infos:
export_dict[str(group_info.ID)] = {"id": group_info.ID,
"title": group_info.TITLE,
"year": group_info.YEAR,
"type": group_info.TYPE,
"tmdbid": group_info.TMDBID,
"season_count": group_info.SEASON_COUNT,
"words": {}, }
for word_info in word_infos:
export_dict[str(word_info.GROUP_ID)]["words"][str(word_info.ID)] = {"id": word_info.ID,
"replaced": word_info.REPLACED,
"replace": word_info.REPLACE,
"front": word_info.FRONT,
"back": word_info.BACK,
"offset": word_info.OFFSET,
"type": word_info.TYPE,
"season": word_info.SEASON,
"regex": word_info.REGEX,
"help": word_info.HELP, }
export_string = json.dumps(export_dict) + "@@@@@@" + str(note)
string = base64.b64encode(
export_string.encode("utf-8")).decode('utf-8')
return {"code": 0, "string": string}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __analyse_import_custom_words_code(data):
try:
import_code = data.get('import_code')
string = base64.b64decode(import_code.encode(
"utf-8")).decode('utf-8').split("@@@@@@")
note_string = string[1]
import_dict = json.loads(string[0])
groups = []
for group in import_dict.values():
wid = group.get('id')
title = group.get("title")
year = group.get("year")
wtype = group.get("type")
tmdbid = group.get("tmdbid")
season_count = group.get("season_count") or ""
words = group.get("words")
if tmdbid:
link = "https://www.themoviedb.org/%s/%s" % (
"movie" if int(wtype) == 1 else "tv", tmdbid)
else:
link = ""
groups.append({"id": wid,
"name": "%s(%s)" % (title, year) if year else title,
"link": link,
"type": wtype,
"seasons": season_count,
"words": words})
return {"code": 0, "groups": groups, "note_string": note_string}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def __import_custom_words(data):
try:
_wordshelper = WordsHelper()
import_code = data.get('import_code')
ids_info = data.get('ids_info')
string = base64.b64decode(import_code.encode(
"utf-8")).decode('utf-8').split("@@@@@@")
import_dict = json.loads(string[0])
import_group_ids = [id_info.split("_")[0] for id_info in ids_info]
group_id_dict = {}
for import_group_id in import_group_ids:
import_group_info = import_dict.get(import_group_id)
if int(import_group_info.get("id")) == -1:
group_id_dict["-1"] = -1
continue
title = import_group_info.get("title")
year = import_group_info.get("year")
gtype = import_group_info.get("type")
tmdbid = import_group_info.get("tmdbid")
season_count = import_group_info.get("season_count")
if not _wordshelper.is_custom_word_group_existed(tmdbid=tmdbid, gtype=gtype):
_wordshelper.insert_custom_word_groups(title=title,
year=year,
gtype=gtype,
tmdbid=tmdbid,
season_count=season_count)
group_info = _wordshelper.get_custom_word_groups(
tmdbid=tmdbid, gtype=gtype)
if group_info:
group_id_dict[import_group_id] = group_info[0].ID
for id_info in ids_info:
id_info = id_info.split('_')
import_group_id = id_info[0]
import_word_id = id_info[1]
import_word_info = import_dict.get(
import_group_id).get("words").get(import_word_id)
gid = group_id_dict.get(import_group_id)
replaced = import_word_info.get("replaced")
replace = import_word_info.get("replace")
front = import_word_info.get("front")
back = import_word_info.get("back")
offset = import_word_info.get("offset")
whelp = import_word_info.get("help")
wtype = int(import_word_info.get("type"))
season = import_word_info.get("season")
regex = import_word_info.get("regex")
# 屏蔽, 替换, 替换+集偏移
if wtype in [1, 2, 3]:
if _wordshelper.is_custom_words_existed(replaced=replaced):
return {"code": 1, "msg": "识别词已存在\n(被替换词:%s)" % replaced}
# 集偏移
elif wtype == 4:
if _wordshelper.is_custom_words_existed(front=front, back=back):
return {"code": 1, "msg": "识别词已存在\n(前后定位词:%s@%s)" % (front, back)}
_wordshelper.insert_custom_word(replaced=replaced,
replace=replace,
front=front,
back=back,
offset=offset,
wtype=wtype,
gid=gid,
season=season,
enabled=1,
regex=regex,
whelp=whelp if whelp else "")
return {"code": 0, "msg": ""}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e)}
@staticmethod
def get_categories(data):
if data.get("type") == "电影":
categories = Category().movie_categorys
elif data.get("type") == "电视剧":
categories = Category().tv_categorys
else:
categories = Category().anime_categorys
return {"code": 0, "category": list(categories), "id": data.get("id"), "value": data.get("value")}
@staticmethod
def __delete_rss_history(data):
rssid = data.get("rssid")
Rss().delete_rss_history(rssid=rssid)
return {"code": 0}
@staticmethod
def __re_rss_history(data):
rssid = data.get("rssid")
rtype = data.get("type")
rssinfo = Rss().get_rss_history(rtype=rtype, rid=rssid)
if rssinfo:
if rtype == "MOV":
mtype = MediaType.MOVIE
else:
mtype = MediaType.TV
if rssinfo[0].SEASON:
season = int(str(rssinfo[0].SEASON).replace("S", ""))
else:
season = None
code, msg, _ = Subscribe().add_rss_subscribe(mtype=mtype,
name=rssinfo[0].NAME,
year=rssinfo[0].YEAR,
channel=RssType.Auto,
season=season,
mediaid=rssinfo[0].TMDBID,
total_ep=rssinfo[0].TOTAL,
current_ep=rssinfo[0].START)
return {"code": code, "msg": msg}
else:
return {"code": 1, "msg": "订阅历史记录不存在"}
@staticmethod
def __share_filtergroup(data):
gid = data.get("id")
_filter = Filter()
group_info = _filter.get_filter_group(gid=gid)
if not group_info:
return {"code": 1, "msg": "规则组不存在"}
group_rules = _filter.get_filter_rule(groupid=gid)
if not group_rules:
return {"code": 1, "msg": "规则组没有对应规则"}
rules = []
for rule in group_rules:
rules.append({
"name": rule.ROLE_NAME,
"pri": rule.PRIORITY,
"include": rule.INCLUDE,
"exclude": rule.EXCLUDE,
"size": rule.SIZE_LIMIT,
"free": rule.NOTE
})
rule_json = {
"name": group_info[0].GROUP_NAME,
"rules": rules
}
json_string = base64.b64encode(json.dumps(
rule_json).encode("utf-8")).decode('utf-8')
return {"code": 0, "string": json_string}
@staticmethod
def __import_filtergroup(data):
content = data.get("content")
try:
_filter = Filter()
json_str = base64.b64decode(
str(content).encode("utf-8")).decode('utf-8')
json_obj = json.loads(json_str)
if json_obj:
if not json_obj.get("name"):
return {"code": 1, "msg": "数据格式不正确"}
_filter.add_group(name=json_obj.get("name"))
group_id = _filter.get_filter_groupid_by_name(
json_obj.get("name"))
if not group_id:
return {"code": 1, "msg": "数据内容不正确"}
if json_obj.get("rules"):
for rule in json_obj.get("rules"):
_filter.add_filter_rule(item={
"group": group_id,
"name": rule.get("name"),
"pri": rule.get("pri"),
"include": rule.get("include"),
"exclude": rule.get("exclude"),
"size": rule.get("size"),
"free": rule.get("free")
})
return {"code": 0, "msg": ""}
except Exception as err:
ExceptionUtils.exception_traceback(err)
return {"code": 1, "msg": "数据格式不正确,%s" % str(err)}
@staticmethod
def get_library_spacesize():
"""
查询媒体库存储空间
"""
# 磁盘空间
UsedSapce = 0
UsedPercent = 0
media = Config().get_config('media')
# 电影目录
movie_paths = media.get('movie_path')
if not isinstance(movie_paths, list):
movie_paths = [movie_paths]
# 电视目录
tv_paths = media.get('tv_path')
if not isinstance(tv_paths, list):
tv_paths = [tv_paths]
# 动漫目录
anime_paths = media.get('anime_path')
if not isinstance(anime_paths, list):
anime_paths = [anime_paths]
# 总空间、剩余空间
TotalSpace, FreeSpace = SystemUtils.calculate_space_usage(movie_paths + tv_paths + anime_paths)
if TotalSpace:
# 已使用空间
UsedSapce = TotalSpace - FreeSpace
# 百分比格式化
UsedPercent = "%0.1f" % ((UsedSapce / TotalSpace) * 100)
# 总剩余空间 格式化
if FreeSpace > 1024:
FreeSpace = "{:,} TB".format(round(FreeSpace / 1024, 2))
else:
FreeSpace = "{:,} GB".format(round(FreeSpace, 2))
# 总使用空间 格式化
if UsedSapce > 1024:
UsedSapce = "{:,} TB".format(round(UsedSapce / 1024, 2))
else:
UsedSapce = "{:,} GB".format(round(UsedSapce, 2))
# 总空间 格式化
if TotalSpace > 1024:
TotalSpace = "{:,} TB".format(round(TotalSpace / 1024, 2))
else:
TotalSpace = "{:,} GB".format(round(TotalSpace, 2))
return {"code": 0,
"UsedPercent": UsedPercent,
"FreeSpace": FreeSpace,
"UsedSapce": UsedSapce,
"TotalSpace": TotalSpace}
@staticmethod
def get_transfer_statistics():
"""
查询转移历史统计数据
"""
Labels = []
MovieNums = []
TvNums = []
AnimeNums = []
for statistic in FileTransfer().get_transfer_statistics(90):
if not statistic[2]:
continue
if statistic[1] not in Labels:
Labels.append(statistic[1])
if statistic[0] == "电影":
MovieNums.append(statistic[2])
TvNums.append(0)
AnimeNums.append(0)
elif statistic[0] == "电视剧":
TvNums.append(statistic[2])
MovieNums.append(0)
AnimeNums.append(0)
else:
AnimeNums.append(statistic[2])
MovieNums.append(0)
TvNums.append(0)
return {
"code": 0,
"Labels": Labels,
"MovieNums": MovieNums,
"TvNums": TvNums,
"AnimeNums": AnimeNums
}
@staticmethod
def get_library_mediacount():
"""
查询媒体库统计数据
"""
MediaServerClient = MediaServer()
media_counts = MediaServerClient.get_medias_count()
UserCount = MediaServerClient.get_user_count()
if media_counts:
return {
"code": 0,
"Movie": "{:,}".format(media_counts.get('MovieCount')),
"Series": "{:,}".format(media_counts.get('SeriesCount')),
"Episodes": "{:,}".format(media_counts.get('EpisodeCount')) if media_counts.get(
'EpisodeCount') else "",
"Music": "{:,}".format(media_counts.get('SongCount')),
"User": UserCount
}
else:
return {"code": -1, "msg": "媒体库服务器连接失败"}
@staticmethod
def get_library_playhistory():
"""
查询媒体库播放记录
"""
return {"code": 0, "result": MediaServer().get_activity_log(30)}
def get_search_result(self):
"""
查询所有搜索结果
"""
SearchResults = {}
res = Searcher().get_search_results()
total = len(res)
for item in res:
# 质量(来源、效果)、分辨率
if item.RES_TYPE:
try:
res_mix = json.loads(item.RES_TYPE)
except Exception as err:
ExceptionUtils.exception_traceback(err)
continue
respix = res_mix.get("respix") or ""
video_encode = res_mix.get("video_encode") or ""
restype = res_mix.get("restype") or ""
reseffect = res_mix.get("reseffect") or ""
else:
restype = ""
respix = ""
reseffect = ""
video_encode = ""
# 分组标识 (来源,分辨率)
group_key = re.sub(r"[-.\s@|]", "", f"{respix}_{restype}").lower()
# 分组信息
group_info = {
"respix": respix,
"restype": restype,
}
# 种子唯一标识 (大小,质量(来源、效果),制作组组成)
unique_key = re.sub(r"[-.\s@|]", "",
f"{respix}_{restype}_{video_encode}_{reseffect}_{item.SIZE}_{item.OTHERINFO}").lower()
# 标识信息
unique_info = {
"video_encode": video_encode,
"size": item.SIZE,
"reseffect": reseffect,
"releasegroup": item.OTHERINFO
}
# 结果
title_string = f"{item.TITLE}"
if item.YEAR:
title_string = f"{title_string} ({item.YEAR})"
# 电视剧季集标识
mtype = item.TYPE or ""
SE_key = item.ES_STRING if item.ES_STRING and mtype != "MOV" else "MOV"
media_type = {"MOV": "电影", "TV": "电视剧", "ANI": "动漫"}.get(mtype)
# 只需要部分种子标签
labels = [label for label in str(item.NOTE).split("|")
if label in ["官方", "官组", "中字", "国语", "特效", "特效字幕"]]
# 种子信息
torrent_item = {
"id": item.ID,
"seeders": item.SEEDERS,
"enclosure": item.ENCLOSURE,
"site": item.SITE,
"torrent_name": item.TORRENT_NAME,
"description": item.DESCRIPTION,
"pageurl": item.PAGEURL,
"uploadvalue": item.UPLOAD_VOLUME_FACTOR,
"downloadvalue": item.DOWNLOAD_VOLUME_FACTOR,
"size": item.SIZE,
"respix": respix,
"restype": restype,
"reseffect": reseffect,
"releasegroup": item.OTHERINFO,
"video_encode": video_encode,
"labels": labels
}
# 促销
free_item = {
"value": f"{item.UPLOAD_VOLUME_FACTOR} {item.DOWNLOAD_VOLUME_FACTOR}",
"name": MetaBase.get_free_string(item.UPLOAD_VOLUME_FACTOR, item.DOWNLOAD_VOLUME_FACTOR)
}
# 分辨率
if respix == "":
respix = "未知分辨率"
# 制作组、字幕组
if item.OTHERINFO is None:
releasegroup = "未知"
else:
releasegroup = item.OTHERINFO
# 季
filter_season = SE_key.split()[0] if SE_key and SE_key not in [
"MOV", "TV"] else None
# 合并搜索结果
if SearchResults.get(title_string):
# 种子列表
result_item = SearchResults[title_string]
torrent_dict = SearchResults[title_string].get("torrent_dict")
SE_dict = torrent_dict.get(SE_key)
if SE_dict:
group = SE_dict.get(group_key)
if group:
unique = group.get("group_torrents").get(unique_key)
if unique:
unique["torrent_list"].append(torrent_item)
group["group_total"] += 1
else:
group["group_total"] += 1
group.get("group_torrents")[unique_key] = {
"unique_info": unique_info,
"torrent_list": [torrent_item]
}
else:
SE_dict[group_key] = {
"group_info": group_info,
"group_total": 1,
"group_torrents": {
unique_key: {
"unique_info": unique_info,
"torrent_list": [torrent_item]
}
}
}
else:
torrent_dict[SE_key] = {
group_key: {
"group_info": group_info,
"group_total": 1,
"group_torrents": {
unique_key: {
"unique_info": unique_info,
"torrent_list": [torrent_item]
}
}
}
}
# 过滤条件
torrent_filter = dict(result_item.get("filter"))
if free_item not in torrent_filter.get("free"):
torrent_filter["free"].append(free_item)
if releasegroup not in torrent_filter.get("releasegroup"):
torrent_filter["releasegroup"].append(releasegroup)
if respix not in torrent_filter.get("respix"):
torrent_filter["respix"].append(respix)
if item.SITE not in torrent_filter.get("site"):
torrent_filter["site"].append(item.SITE)
if video_encode \
and video_encode not in torrent_filter.get("video"):
torrent_filter["video"].append(video_encode)
if filter_season \
and filter_season not in torrent_filter.get("season"):
torrent_filter["season"].append(filter_season)
else:
fav, rssid = 0, None
# 存在标志
if item.TMDBID:
fav, rssid, item_url = self.get_media_exists_info(
mtype=mtype,
title=item.TITLE,
year=item.YEAR,
mediaid=item.TMDBID)
SearchResults[title_string] = {
"key": item.ID,
"title": item.TITLE,
"year": item.YEAR,
"type_key": mtype,
"image": item.IMAGE,
"type": media_type,
"vote": item.VOTE,
"tmdbid": item.TMDBID,
"backdrop": item.IMAGE,
"poster": item.POSTER,
"overview": item.OVERVIEW,
"fav": fav,
"rssid": rssid,
"torrent_dict": {
SE_key: {
group_key: {
"group_info": group_info,
"group_total": 1,
"group_torrents": {
unique_key: {
"unique_info": unique_info,
"torrent_list": [torrent_item]
}
}
}
}
},
"filter": {
"site": [item.SITE],
"free": [free_item],
"releasegroup": [releasegroup],
"respix": [respix],
"video": [video_encode] if video_encode else [],
"season": [filter_season] if filter_season else []
}
}
# 提升整季的顺序到顶层
def se_sort(k):
k = re.sub(r" +|(?<=s\d)\D*?(?=e)|(?<=s\d\d)\D*?(?=e)",
" ", k[0], flags=re.I).split()
return (k[0], k[1]) if len(k) > 1 else ("Z" + k[0], "ZZZ")
# 开始排序季集顺序
for title, item in SearchResults.items():
# 排序筛选器 季
item["filter"]["season"].sort(reverse=True)
# 排序筛选器 制作组、字幕组. 将未知放到最后
item["filter"]["releasegroup"] = sorted(item["filter"]["releasegroup"], key=lambda x: (x == "未知", x))
# 排序种子列 集
item["torrent_dict"] = sorted(item["torrent_dict"].items(),
key=se_sort,
reverse=True)
return {"code": 0, "total": total, "result": SearchResults}
@staticmethod
def search_media_infos(data):
"""
根据关键字搜索相似词条
"""
SearchWord = data.get("keyword")
if not SearchWord:
return []
SearchSourceType = data.get("searchtype")
medias = WebUtils.search_media_infos(keyword=SearchWord,
source=SearchSourceType)
return {"code": 0, "result": [media.to_dict() for media in medias]}
@staticmethod
def get_movie_rss_list():
"""
查询所有电影订阅
"""
return {"code": 0, "result": Subscribe().get_subscribe_movies()}
@staticmethod
def get_tv_rss_list():
"""
查询所有电视剧订阅
"""
return {"code": 0, "result": Subscribe().get_subscribe_tvs()}
@staticmethod
def get_rss_history(data):
"""
查询所有订阅历史
"""
mtype = data.get("type")
return {"code": 0, "result": [rec.as_dict() for rec in Rss().get_rss_history(rtype=mtype)]}
@staticmethod
def get_downloading(data={}):
"""
查询正在下载的任务
"""
dl_id = data.get("id")
force_list = data.get("force_list")
MediaHander = Media()
DownloaderHandler = Downloader()
torrents = DownloaderHandler.get_downloading_progress(downloader_id=dl_id, force_list=bool(force_list))
for torrent in torrents:
# 先查询下载记录,没有再识别
name = torrent.get("name")
download_info = DownloaderHandler.get_download_history_by_downloader(
downloader=DownloaderHandler.default_downloader_id,
download_id=torrent.get("id")
)
if download_info:
name = download_info.TITLE
year = download_info.YEAR
poster_path = download_info.POSTER
se = download_info.SE
else:
media_info = MediaHander.get_media_info(title=name)
if not media_info:
torrent.update({
"title": name,
"image": ""
})
continue
year = media_info.year
name = media_info.title or media_info.get_name()
se = media_info.get_season_episode_string()
poster_path = media_info.get_poster_image()
# 拼装标题
if year:
title = "%s (%s) %s" % (name,
year,
se)
else:
title = "%s %s" % (name, se)
torrent.update({
"title": title,
"image": poster_path or ""
})
return {"code": 0, "result": torrents}
@staticmethod
def get_transfer_history(data):
"""
查询媒体整理历史记录
"""
PageNum = data.get("pagenum")
if not PageNum:
PageNum = 30
SearchStr = data.get("keyword")
CurrentPage = data.get("page")
if not CurrentPage:
CurrentPage = 1
else:
CurrentPage = int(CurrentPage)
totalCount, historys = FileTransfer().get_transfer_history(SearchStr, CurrentPage, PageNum)
historys_list = []
for history in historys:
history = history.as_dict()
sync_mode = history.get("MODE")
rmt_mode = ModuleConf.get_dictenum_key(
ModuleConf.RMT_MODES, sync_mode) if sync_mode else ""
history.update({
"SYNC_MODE": sync_mode,
"RMT_MODE": rmt_mode
})
historys_list.append(history)
TotalPage = floor(totalCount / PageNum) + 1
return {
"code": 0,
"total": totalCount,
"result": historys_list,
"totalPage": TotalPage,
"pageNum": PageNum,
"currentPage": CurrentPage
}
@staticmethod
def truncate_transfer_history():
"""
清空媒体整理历史记录
"""
if FileTransfer().get_transfer_history_count() < 1:
return {"code": 0, "result": True}
FileTransfer().truncate_transfer_history_list()
return {"code": 0, "result": True}
@staticmethod
def get_unknown_list():
"""
查询所有未识别记录
"""
Items = []
Records = FileTransfer().get_transfer_unknown_paths()
for rec in Records:
if not rec.PATH:
continue
path = rec.PATH.replace("\\", "/") if rec.PATH else ""
path_to = rec.DEST.replace("\\", "/") if rec.DEST else ""
sync_mode = rec.MODE or ""
rmt_mode = ModuleConf.get_dictenum_key(ModuleConf.RMT_MODES,
sync_mode) if sync_mode else ""
Items.append({
"id": rec.ID,
"path": path,
"to": path_to,
"name": path,
"sync_mode": sync_mode,
"rmt_mode": rmt_mode,
})
return {"code": 0, "items": Items}
@staticmethod
def get_unknown_list_by_page(data):
"""
查询所有未识别记录
"""
PageNum = data.get("pagenum")
if not PageNum:
PageNum = 30
SearchStr = data.get("keyword")
CurrentPage = data.get("page")
if not CurrentPage:
CurrentPage = 1
else:
CurrentPage = int(CurrentPage)
totalCount, Records = FileTransfer().get_transfer_unknown_paths_by_page(
SearchStr, CurrentPage, PageNum)
Items = []
for rec in Records:
if not rec.PATH:
continue
path = rec.PATH.replace("\\", "/") if rec.PATH else ""
path_to = rec.DEST.replace("\\", "/") if rec.DEST else ""
sync_mode = rec.MODE or ""
rmt_mode = ModuleConf.get_dictenum_key(ModuleConf.RMT_MODES,
sync_mode) if sync_mode else ""
Items.append({
"id": rec.ID,
"path": path,
"to": path_to,
"name": path,
"sync_mode": sync_mode,
"rmt_mode": rmt_mode,
})
TotalPage = floor(totalCount / PageNum) + 1
return {
"code": 0,
"total": totalCount,
"items": Items,
"totalPage": TotalPage,
"pageNum": PageNum,
"currentPage": CurrentPage
}
@staticmethod
def truncate_transfer_unknown():
"""
清空媒体手动整理历史记录
"""
if FileTransfer().get_transfer_unknown_count() < 1:
return {"code": 0, "result": True}
FileTransfer().truncate_transfer_unknown_list()
return {"code": 0, "result": True}
@staticmethod
def unidentification():
"""
重新识别所有未识别记录
"""
ItemIds = []
Records = FileTransfer().get_transfer_unknown_paths()
for rec in Records:
if not rec.PATH:
continue
ItemIds.append(rec.ID)
if len(ItemIds) > 0:
WebAction.re_identification({"flag": "unidentification", "ids": ItemIds})
@staticmethod
def get_customwords():
_wordshelper = WordsHelper()
words = []
words_info = _wordshelper.get_custom_words(gid=-1)
for word_info in words_info:
words.append({"id": word_info.ID,
"replaced": word_info.REPLACED,
"replace": word_info.REPLACE,
"front": word_info.FRONT,
"back": word_info.BACK,
"offset": word_info.OFFSET,
"type": word_info.TYPE,
"group_id": word_info.GROUP_ID,
"season": word_info.SEASON,
"enabled": word_info.ENABLED,
"regex": word_info.REGEX,
"help": word_info.HELP, })
groups = [{"id": "-1",
"name": "通用",
"link": "",
"type": "1",
"seasons": "0",
"words": words}]
groups_info = _wordshelper.get_custom_word_groups()
for group_info in groups_info:
gid = group_info.ID
name = "%s (%s)" % (group_info.TITLE, group_info.YEAR)
gtype = group_info.TYPE
if gtype == 1:
link = "https://www.themoviedb.org/movie/%s" % group_info.TMDBID
else:
link = "https://www.themoviedb.org/tv/%s" % group_info.TMDBID
words = []
words_info = _wordshelper.get_custom_words(gid=gid)
for word_info in words_info:
words.append({"id": word_info.ID,
"replaced": word_info.REPLACED,
"replace": word_info.REPLACE,
"front": word_info.FRONT,
"back": word_info.BACK,
"offset": word_info.OFFSET,
"type": word_info.TYPE,
"group_id": word_info.GROUP_ID,
"season": word_info.SEASON,
"enabled": word_info.ENABLED,
"regex": word_info.REGEX,
"help": word_info.HELP, })
groups.append({"id": gid,
"name": name,
"link": link,
"type": group_info.TYPE,
"seasons": group_info.SEASON_COUNT,
"words": words})
return {
"code": 0,
"result": groups
}
@staticmethod
def get_users():
"""
查询所有用户
"""
user_list = ProUser().get_users()
Users = []
for user in user_list:
pris = str(user.PRIS).split(",")
Users.append({"id": user.ID, "name": user.NAME, "pris": pris})
return {"code": 0, "result": Users}
@staticmethod
def get_filterrules():
"""
查询所有过滤规则
"""
RuleGroups = Filter().get_rule_infos()
sql_file = os.path.join(Config().get_script_path(), "init_filter.sql")
with open(sql_file, "r", encoding="utf-8") as f:
sql_list = f.read().split(';\n')
Init_RuleGroups = []
i = 0
while i < len(sql_list):
rulegroup = {}
rulegroup_info = re.findall(
r"[0-9]+,'[^\"]+NULL", sql_list[i], re.I)[0].split(",")
rulegroup['id'] = int(rulegroup_info[0])
rulegroup['name'] = rulegroup_info[1][1:-1]
rulegroup['rules'] = []
rulegroup['sql'] = [sql_list[i]]
if i + 1 < len(sql_list):
rules = re.findall(
r"[0-9]+,'[^\"]+NULL", sql_list[i + 1], re.I)[0].split("),\n (")
for rule in rules:
rule_info = {}
rule = rule.split(",")
rule_info['name'] = rule[2][1:-1]
rule_info['include'] = rule[4][1:-1]
rule_info['exclude'] = rule[5][1:-1]
rulegroup['rules'].append(rule_info)
rulegroup["sql"].append(sql_list[i + 1])
Init_RuleGroups.append(rulegroup)
i = i + 2
return {
"code": 0,
"ruleGroups": RuleGroups,
"initRules": Init_RuleGroups
}
def __update_directory(self, data):
"""
维护媒体库目录
"""
cfg = self.set_config_directory(Config().get_config(),
data.get("oper"),
data.get("key"),
data.get("value"),
data.get("replace_value"))
# 保存配置
Config().save_config(cfg)
return {"code": 0}
@staticmethod
def __test_site(data):
"""
测试站点连通性
"""
flag, msg, times = Sites().test_connection(data.get("id"))
code = 0 if flag else -1
return {"code": code, "msg": msg, "time": times}
@staticmethod
def __get_sub_path(data):
"""
查询下级子目录
"""
r = []
try:
ft = data.get("filter") or "ALL"
d = data.get("dir")
if not d or d == "/":
if SystemUtils.get_system() == OsType.WINDOWS:
partitions = SystemUtils.get_windows_drives()
if partitions:
dirs = [os.path.join(partition, "/")
for partition in partitions]
else:
dirs = [os.path.join("C:/", f)
for f in os.listdir("C:/")]
else:
dirs = [os.path.join("/", f) for f in os.listdir("/")]
elif d == "*SYNC-FOLDERS*":
sync_dirs = []
for id, conf in Sync().get_sync_path_conf().items():
sync_dirs.append(conf["from"])
sync_dirs.append(conf["to"])
dirs = list(set(sync_dirs))
elif d == "*DOWNLOAD-FOLDERS*":
dirs = [path.rstrip('/') for path in Downloader().get_download_visit_dirs()]
elif d == "*MEDIA-FOLDERS*":
media_dirs = []
movie_path = Config().get_config('media').get('movie_path')
tv_path = Config().get_config('media').get('tv_path')
anime_path = Config().get_config('media').get('anime_path')
unknown_path = Config().get_config('media').get('unknown_path')
if movie_path is not None: media_dirs.extend([path.rstrip('/') for path in movie_path])
if tv_path is not None: media_dirs.extend([path.rstrip('/') for path in tv_path])
if anime_path is not None: media_dirs.extend([path.rstrip('/') for path in anime_path])
if unknown_path is not None: media_dirs.extend([path.rstrip('/') for path in unknown_path])
dirs = list(set(media_dirs))
else:
d = os.path.normpath(unquote(d))
if not os.path.isdir(d):
d = os.path.dirname(d)
dirs = [os.path.join(d, f) for f in os.listdir(d)]
dirs.sort()
for ff in dirs:
if os.path.isdir(ff):
if 'ONLYDIR' in ft or 'ALL' in ft:
r.append({
"path": ff.replace("\\", "/"),
"name": os.path.basename(ff),
"type": "dir",
"rel": os.path.dirname(ff).replace("\\", "/")
})
else:
ext = os.path.splitext(ff)[-1][1:]
flag = False
if 'ONLYFILE' in ft or 'ALL' in ft:
flag = True
elif "MEDIAFILE" in ft and f".{str(ext).lower()}" in RMT_MEDIAEXT:
flag = True
elif "SUBFILE" in ft and f".{str(ext).lower()}" in RMT_SUBEXT:
flag = True
elif "AUDIOTRACKFILE" in ft and f".{str(ext).lower()}" in RMT_AUDIO_TRACK_EXT:
flag = True
if flag:
r.append({
"path": ff.replace("\\", "/"),
"name": os.path.basename(ff),
"type": "file",
"rel": os.path.dirname(ff).replace("\\", "/"),
"ext": ext,
"size": StringUtils.str_filesize(os.path.getsize(ff))
})
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {
"code": -1,
"message": '加载路径失败: %s' % str(e)
}
return {
"code": 0,
"count": len(r),
"data": r
}
@staticmethod
def __get_filehardlinks(data):
"""
获取文件硬链接
"""
def parse_hardlinks(hardlinks):
paths = []
for link in hardlinks:
paths.append([SystemUtils.shorten_path(link["file"], 'left', 2), link["file"], link["filepath"]])
return paths
r = {}
try:
file = data.get("filepath")
direction = ""
hardlinks = []
# 获取所有硬链接的同步目录设置
sync_dirs = Sync().get_filehardlinks_sync_dirs()
# 按设置遍历检查文件是否在同步目录内,只查找第一个匹配项,多余的忽略
for dir in sync_dirs:
if dir[0] and file.startswith(f"{dir[0]}/"):
direction = '→'
hardlinks = parse_hardlinks(SystemUtils().find_hardlinks(file=file, fdir=dir[1]))
break
elif dir[1] and file.startswith(f"{dir[1]}/"):
direction = '←'
hardlinks = parse_hardlinks(SystemUtils().find_hardlinks(file=file, fdir=dir[0]))
break
r = {
"filepath": file, # 文件路径
"direction": direction, # 同步方向
"hardlinks": hardlinks # 同步链接,内容分别为缩略路径、文件路径、目录路径
}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {
"code": -1,
"message": '加载路径失败: %s' % str(e)
}
return {
"code": 0,
"count": len(r),
"data": r
}
@staticmethod
def __get_dirhardlink(data):
"""
获取同步目录硬链接
"""
r = {}
try:
path = data.get("dirpath")
direction = ""
hardlink = []
locating = False
# 获取所有硬链接的同步目录设置
sync_dirs = Sync().get_filehardlinks_sync_dirs()
# 按设置遍历检查目录是否是同步目录或在同步目录内
for dir in sync_dirs:
if dir[0] and (dir[0] == path or path.startswith(f"{dir[0]}/")):
direction = '→'
hardlink = dir[0].replace(dir[0], dir[1])
locating = dir[2]
break
elif dir[1] and (dir[1] == path or path.startswith(f"{dir[1]}/")):
direction = '←'
hardlink = dir[1].replace(dir[1], dir[0])
locating = dir[2]
break
r = {
"dirpath": path, # 同步目录路径
"direction": direction, # 同步方向
"hardlink": hardlink, # 同步链接,内容为配置中对应的目录或子目录
"locating": locating # 自动定位
}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {
"code": -1,
"message": '加载路径失败: %s' % str(e)
}
return {
"code": 0,
"count": len(r),
"data": r
}
@staticmethod
def __rename_file(data):
"""
文件重命名
"""
path = data.get("path")
name = data.get("name")
if path and name:
try:
shutil.move(path, os.path.join(os.path.dirname(path), name))
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": -1, "msg": str(e)}
return {"code": 0}
def __delete_files(self, data):
"""
删除文件
"""
files = data.get("files")
if files:
# 删除文件
for file in files:
del_flag, del_msg = self.delete_media_file(filedir=os.path.dirname(file),
filename=os.path.basename(file))
if not del_flag:
log.error(del_msg)
else:
log.info(del_msg)
return {"code": 0}
@staticmethod
def __download_subtitle(data):
"""
从配置的字幕服务下载单个文件的字幕
"""
path = data.get("path")
name = data.get("name")
media = Media().get_media_info(title=name)
if not media or not media.tmdb_info:
return {"code": -1, "msg": f"{name} 无法从TMDB查询到媒体信息"}
if not media.imdb_id:
media.set_tmdb_info(Media().get_tmdb_info(mtype=media.type,
tmdbid=media.tmdb_id))
# 触发字幕下载事件
EventManager().send_event(EventType.SubtitleDownload, {
"media_info": media.to_dict(),
"file": os.path.splitext(path)[0],
"file_ext": os.path.splitext(name)[-1],
"bluray": False
})
return {"code": 0, "msg": "字幕下载任务已提交,正在后台运行。"}
@staticmethod
def __media_path_scrap(data):
"""
刮削媒体文件夹或文件
"""
path = data.get("path")
if not path:
return {"code": -1, "msg": "请指定刮削路径"}
ThreadHelper().start_thread(Scraper().folder_scraper, (path, None, 'force_all'))
return {"code": 0, "msg": "刮削任务已提交,正在后台运行。"}
@staticmethod
def __get_download_setting(data):
sid = data.get("sid")
if sid:
download_setting = Downloader().get_download_setting(sid=sid)
else:
download_setting = list(
Downloader().get_download_setting().values())
return {"code": 0, "data": download_setting}
@staticmethod
def __update_download_setting(data):
sid = data.get("sid")
name = data.get("name")
category = data.get("category")
tags = data.get("tags")
is_paused = data.get("is_paused")
upload_limit = data.get("upload_limit")
download_limit = data.get("download_limit")
ratio_limit = data.get("ratio_limit")
seeding_time_limit = data.get("seeding_time_limit")
downloader = data.get("downloader")
Downloader().update_download_setting(sid=sid,
name=name,
category=category,
tags=tags,
is_paused=is_paused,
upload_limit=upload_limit or 0,
download_limit=download_limit or 0,
ratio_limit=ratio_limit or 0,
seeding_time_limit=seeding_time_limit or 0,
downloader=downloader)
return {"code": 0}
@staticmethod
def __delete_download_setting(data):
sid = data.get("sid")
Downloader().delete_download_setting(sid=sid)
return {"code": 0}
@staticmethod
def __update_message_client(data):
"""
更新消息设置
"""
_message = Message()
name = data.get("name")
cid = data.get("cid")
ctype = data.get("type")
config = data.get("config")
switchs = data.get("switchs")
interactive = data.get("interactive")
enabled = data.get("enabled")
if cid:
_message.delete_message_client(cid=cid)
if int(interactive) == 1:
_message.check_message_client(interactive=0, ctype=ctype)
_message.insert_message_client(name=name,
ctype=ctype,
config=config,
switchs=switchs,
interactive=interactive,
enabled=enabled)
return {"code": 0}
@staticmethod
def __delete_message_client(data):
"""
删除消息设置
"""
if Message().delete_message_client(cid=data.get("cid")):
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __check_message_client(data):
"""
维护消息设置
"""
flag = data.get("flag")
cid = data.get("cid")
ctype = data.get("type")
checked = data.get("checked")
_message = Message()
if flag == "interactive":
# TG/WX只能开启一个交互
if checked:
_message.check_message_client(interactive=0, ctype=ctype)
_message.check_message_client(cid=cid,
interactive=1 if checked else 0)
return {"code": 0}
elif flag == "enable":
_message.check_message_client(cid=cid,
enabled=1 if checked else 0)
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __get_message_client(data):
"""
获取消息设置
"""
cid = data.get("cid")
return {"code": 0, "detail": Message().get_message_client_info(cid=cid)}
@staticmethod
def __test_message_client(data):
"""
测试消息设置
"""
ctype = data.get("type")
config = json.loads(data.get("config"))
res = Message().get_status(ctype=ctype, config=config)
if res:
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __get_indexers():
"""
获取索引器
"""
return {"code": 0, "indexers": Indexer().get_indexer_dict()}
@staticmethod
def __get_download_dirs(data):
"""
获取下载目录
"""
sid = data.get("sid")
site = data.get("site")
if not sid and site:
sid = Sites().get_site_download_setting(site_name=site)
dirs = Downloader().get_download_dirs(setting=sid)
return {"code": 0, "paths": dirs}
@staticmethod
def __find_hardlinks(data):
files = data.get("files")
file_dir = data.get("dir")
if not files:
return []
if not file_dir and os.name != "nt":
# 取根目录下一级为查找目录
file_dir = os.path.commonpath(files).replace("\\", "/")
if file_dir != "/":
file_dir = "/" + str(file_dir).split("/")[1]
else:
return []
hardlinks = {}
if files:
try:
for file in files:
hardlinks[os.path.basename(file)] = SystemUtils(
).find_hardlinks(file=file, fdir=file_dir)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1}
return {"code": 0, "data": hardlinks}
@staticmethod
def __update_sites_cookie_ua(data):
"""
更新所有站点的Cookie和UA
"""
siteid = data.get("siteid")
username = data.get("username")
password = data.get("password")
twostepcode = data.get("two_step_code")
ocrflag = data.get("ocrflag")
# 保存设置
SystemConfig().set(key=SystemConfigKey.CookieUserInfo,
value={
"username": username,
"password": password,
"two_step_code": twostepcode
})
retcode, messages = SiteCookie().update_sites_cookie_ua(siteid=siteid,
username=username,
password=password,
twostepcode=twostepcode,
ocrflag=ocrflag)
return {"code": retcode, "messages": messages}
@staticmethod
def __update_site_cookie_ua(data):
"""
更新单个站点的Cookie和UA
"""
siteid = data.get("site_id")
cookie = data.get("site_cookie")
ua = data.get("site_ua")
Sites().update_site_cookie(siteid=siteid, cookie=cookie, ua=ua)
return {"code": 0, "messages": "请求发送成功"}
@staticmethod
def __set_site_captcha_code(data):
"""
设置站点验证码
"""
code = data.get("code")
value = data.get("value")
SiteCookie().set_code(code=code, value=value)
return {"code": 0}
@staticmethod
def __update_api_key(data):
"""
更新apikey
"""
signurl = data.get('site_signurl')
cookie = data.get('site_cookie')
ua = data.get('ua') or Config().get_ua()
proxy = data.get('proxy')
flag, msg = Sites().update_api_key(signurl=signurl,
cookie=cookie,
ua=ua,
proxy=proxy)
if flag:
return {"code": "0", "apikey": msg}
else:
return {"code": "400", "msg": msg}
@staticmethod
def __update_torrent_remove_task(data):
"""
更新自动删种任务
"""
flag, msg = TorrentRemover().update_torrent_remove_task(data=data)
if not flag:
return {"code": 1, "msg": msg}
else:
return {"code": 0}
@staticmethod
def __get_torrent_remove_task(data=None):
"""
获取自动删种任务
"""
if data:
tid = data.get("tid")
else:
tid = None
return {"code": 0, "detail": TorrentRemover().get_torrent_remove_tasks(taskid=tid)}
@staticmethod
def __delete_torrent_remove_task(data):
"""
删除自动删种任务
"""
tid = data.get("tid")
flag = TorrentRemover().delete_torrent_remove_task(taskid=tid)
if flag:
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __get_remove_torrents(data):
"""
获取满足自动删种任务的种子
"""
tid = data.get("tid")
flag, torrents = TorrentRemover().get_remove_torrents(taskid=tid)
if not flag or not torrents:
return {"code": 1, "msg": "未获取到符合处理条件种子"}
return {"code": 0, "data": torrents}
@staticmethod
def __auto_remove_torrents(data):
"""
执行自动删种任务
"""
tid = data.get("tid")
TorrentRemover().auto_remove_torrents(taskids=tid)
return {"code": 0}
@staticmethod
def __get_site_favicon(data):
"""
获取站点图标
"""
sitename = data.get("name")
return {"code": 0, "icon": Sites().get_site_favicon(site_name=sitename)}
@staticmethod
def __list_brushtask_torrents(data):
"""
获取刷流任务的种子明细
"""
results = BrushTask().get_brushtask_torrents(brush_id=data.get("id"),
active=False)
if not results:
return {"code": 1, "msg": "未下载种子或未获取到种子明细"}
return {"code": 0, "data": [item.as_dict() for item in results]}
@staticmethod
def __set_system_config(data):
"""
设置系统设置(数据库)
"""
key = data.get("key")
value = data.get("value")
if not key or not value:
return {"code": 1}
try:
SystemConfig().set(key=key, value=value)
return {"code": 0}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1}
@staticmethod
def get_site_user_statistics(data):
"""
获取站点用户统计信息
"""
sites = data.get("sites")
encoding = data.get("encoding") or "RAW"
sort_by = data.get("sort_by")
sort_on = data.get("sort_on")
site_hash = data.get("site_hash")
statistics = SiteUserInfo().get_site_user_statistics(sites=sites, encoding=encoding)
if sort_by and sort_on in ["asc", "desc"]:
if sort_on == "asc":
statistics.sort(key=lambda x: x[sort_by])
else:
statistics.sort(key=lambda x: x[sort_by], reverse=True)
if site_hash == "Y":
for item in statistics:
item["site_hash"] = StringUtils.md5_hash(item.get("site"))
return {"code": 0, "data": statistics}
@staticmethod
def send_plugin_message(data):
"""
发送插件消息
"""
title = data.get("title")
text = data.get("text") or ""
image = data.get("image") or ""
Message().send_plugin_message(title=title, text=text, image=image)
return {"code": 0}
@staticmethod
def send_custom_message(data):
"""
发送自定义消息
"""
title = data.get("title")
text = data.get("text") or ""
image = data.get("image") or ""
message_clients = data.get("message_clients")
if not message_clients:
return {"code": 1, "msg": "未选择消息服务"}
Message().send_custom_message(clients=message_clients, title=title, text=text, image=image)
return {"code": 0}
@staticmethod
def get_rmt_modes():
RmtModes = ModuleConf.RMT_MODES_LITE if SystemUtils.is_lite_version(
) else ModuleConf.RMT_MODES
return [{
"value": value,
"name": name.value
} for value, name in RmtModes.items()]
def media_detail(self, data):
"""
获取媒体详情
"""
# TMDBID 或 DB:豆瓣ID
tmdbid = data.get("tmdbid")
mtype = MediaType.MOVIE if data.get(
"type") in MovieTypes else MediaType.TV
if not tmdbid:
return {"code": 1, "msg": "未指定媒体ID"}
media_info = WebUtils.get_mediainfo_from_id(
mtype=mtype, mediaid=tmdbid)
# 检查TMDB信息
if not media_info or not media_info.tmdb_info:
return {
"code": 1,
"msg": "无法查询到TMDB信息"
}
# 查询存在及订阅状态
fav, rssid, item_url = self.get_media_exists_info(mtype=mtype,
title=media_info.title,
year=media_info.year,
mediaid=media_info.tmdb_id)
MediaHandler = Media()
MediaServerHandler = MediaServer()
# 查询季
seasons = MediaHandler.get_tmdb_tv_seasons(media_info.tmdb_info)
# 查询季是否存在
if seasons:
for season in seasons:
season.update({
"state": True if MediaServerHandler.check_item_exists(
mtype=mtype,
title=media_info.title,
year=media_info.year,
tmdbid=media_info.tmdb_id,
season=season.get("season_number")) else False
})
return {
"code": 0,
"data": {
"tmdbid": media_info.tmdb_id,
"douban_id": media_info.douban_id,
"background": MediaHandler.get_tmdb_backdrops(tmdbinfo=media_info.tmdb_info),
"image": media_info.get_poster_image(),
"vote": media_info.vote_average,
"year": media_info.year,
"title": media_info.title,
"genres": MediaHandler.get_tmdb_genres_names(tmdbinfo=media_info.tmdb_info),
"overview": media_info.overview,
"runtime": StringUtils.str_timehours(media_info.runtime),
"fact": MediaHandler.get_tmdb_factinfo(media_info),
"crews": MediaHandler.get_tmdb_crews(tmdbinfo=media_info.tmdb_info, nums=6),
"actors": MediaHandler.get_tmdb_cats(mtype=mtype, tmdbid=media_info.tmdb_id),
"link": media_info.get_detail_url(),
"douban_link": media_info.get_douban_detail_url(),
"fav": fav,
"item_url": item_url,
"rssid": rssid,
"seasons": seasons
}
}
@staticmethod
def __media_similar(data):
"""
查询TMDB相似媒体
"""
tmdbid = data.get("tmdbid")
page = data.get("page") or 1
mtype = MediaType.MOVIE if data.get(
"type") in MovieTypes else MediaType.TV
if not tmdbid:
return {"code": 1, "msg": "未指定TMDBID"}
if mtype == MediaType.MOVIE:
result = Media().get_movie_similar(tmdbid=tmdbid, page=page)
else:
result = Media().get_tv_similar(tmdbid=tmdbid, page=page)
return {"code": 0, "data": result}
@staticmethod
def __media_recommendations(data):
"""
查询TMDB同类推荐媒体
"""
tmdbid = data.get("tmdbid")
page = data.get("page") or 1
mtype = MediaType.MOVIE if data.get(
"type") in MovieTypes else MediaType.TV
if not tmdbid:
return {"code": 1, "msg": "未指定TMDBID"}
if mtype == MediaType.MOVIE:
result = Media().get_movie_recommendations(tmdbid=tmdbid, page=page)
else:
result = Media().get_tv_recommendations(tmdbid=tmdbid, page=page)
return {"code": 0, "data": result}
@staticmethod
def __media_person(data):
"""
根据TMDBID或关键字查询TMDB演员
"""
tmdbid = data.get("tmdbid")
mtype = MediaType.MOVIE if data.get("type") in MovieTypes else MediaType.TV
keyword = data.get("keyword")
if not tmdbid and not keyword:
return {"code": 1, "msg": "未指定TMDBID或关键字"}
if tmdbid:
result = Media().get_tmdb_cats(tmdbid=tmdbid, mtype=mtype)
else:
result = Media().search_tmdb_person(name=keyword)
return {"code": 0, "data": result}
@staticmethod
def __person_medias(data):
"""
查询演员参演作品
"""
personid = data.get("personid")
page = data.get("page") or 1
if data.get("type"):
mtype = MediaType.MOVIE if data.get("type") in MovieTypes else MediaType.TV
else:
mtype = None
if not personid:
return {"code": 1, "msg": "未指定演员ID"}
return {"code": 0, "data": Media().get_person_medias(personid=personid,
mtype=mtype,
page=page)}
@staticmethod
def __save_user_script(data):
"""
保存用户自定义脚本
"""
script = data.get("javascript") or ""
css = data.get("css") or ""
SystemConfig().set(key=SystemConfigKey.CustomScript,
value={
"css": css,
"javascript": script
})
return {"code": 0, "msg": "保存成功"}
@staticmethod
def __run_directory_sync(data):
"""
执行单个目录的目录同步
"""
ThreadHelper().start_thread(Sync().transfer_sync, (data.get("sid"),))
return {"code": 0, "msg": "执行成功"}
@staticmethod
def __update_plugin_config(data):
"""
保存插件配置
"""
plugin_id = data.get("plugin")
config = data.get("config")
if not plugin_id:
return {"code": 1, "msg": "数据错误"}
PluginManager().save_plugin_config(pid=plugin_id, conf=config)
PluginManager().reload_plugin(plugin_id)
return {"code": 0, "msg": "保存成功"}
@staticmethod
def get_media_exists_info(mtype, title, year, mediaid):
"""
获取媒体存在标记:是否存在、是否订阅
:param: mtype 媒体类型
:param: title 媒体标题
:param: year 媒体年份
:param: mediaid TMDBID/DB:豆瓣ID/BG:Bangumi的ID
:return: 1-已订阅/2-已下载/0-不存在未订阅, RSSID, 如果已下载,还会有对应的媒体库的播放地址链接
"""
if str(mediaid).isdigit():
tmdbid = mediaid
else:
tmdbid = None
if mtype in MovieTypes:
rssid = Subscribe().get_subscribe_id(mtype=MediaType.MOVIE,
title=title,
year=year,
tmdbid=tmdbid)
else:
if not tmdbid:
meta_info = MetaInfo(title=title)
title = meta_info.get_name()
season = meta_info.get_season_string()
if season:
year = None
else:
season = None
rssid = Subscribe().get_subscribe_id(mtype=MediaType.TV,
title=title,
year=year,
season=season,
tmdbid=tmdbid)
item_url = None
if rssid:
# 已订阅
fav = "1"
else:
# 检查媒体服务器是否存在
item_id = MediaServer().check_item_exists(mtype=mtype, title=title, year=year, tmdbid=tmdbid)
if item_id:
# 已下载
fav = "2"
item_url = MediaServer().get_play_url(item_id=item_id)
else:
# 未订阅、未下载
fav = "0"
return fav, rssid, item_url
@staticmethod
def __get_season_episodes(data=None):
"""
查询TMDB剧集情况
"""
tmdbid = data.get("tmdbid")
title = data.get("title")
year = data.get("year")
season = 1 if data.get("season") is None else data.get("season")
if not tmdbid:
return {"code": 1, "msg": "TMDBID为空"}
episodes = Media().get_tmdb_season_episodes(tmdbid=tmdbid,
season=season)
MediaServerHandler = MediaServer()
for episode in episodes:
episode.update({
"state": True if MediaServerHandler.check_item_exists(
mtype=MediaType.TV,
title=title,
year=year,
tmdbid=tmdbid,
season=season,
episode=episode.get("episode_number")) else False
})
return {
"code": 0,
"episodes": episodes
}
@staticmethod
def get_user_menus():
"""
查询用户菜单
"""
# 需要过滤的菜单
ignore = []
# 获取可用菜单
menus = current_user.get_usermenus(ignore=ignore)
return {
"code": 0,
"menus": menus,
"level": current_user.level
}
@staticmethod
def get_top_menus():
"""
查询顶底菜单列表
"""
return {
"code": 0,
"menus": current_user.get_topmenus()
}
@staticmethod
def auth_user_level(data=None):
"""
用户认证
"""
if data:
site = data.get("site")
params = data.get("params")
else:
site, params = None, {}
state, msg = ProUser().check_user(site, params)
if state:
return {"code": 0, "msg": "认证成功"}
return {"code": 1, "msg": f"{msg or '认证失败,请检查合作站点账号是否正常!'}"}
@staticmethod
def __update_downloader(data):
"""
更新下载器
"""
did = data.get("did")
name = data.get("name")
dtype = data.get("type")
enabled = data.get("enabled")
transfer = data.get("transfer")
only_nastool = data.get("only_nastool")
match_path = data.get("match_path")
rmt_mode = data.get("rmt_mode")
config = data.get("config")
if not isinstance(config, str):
config = json.dumps(config)
download_dir = data.get("download_dir")
if not isinstance(download_dir, str):
download_dir = json.dumps(download_dir)
Downloader().update_downloader(did=did,
name=name,
dtype=dtype,
enabled=enabled,
transfer=transfer,
only_nastool=only_nastool,
match_path=match_path,
rmt_mode=rmt_mode,
config=config,
download_dir=download_dir)
return {"code": 0}
@staticmethod
def __del_downloader(data):
"""
删除下载器
"""
did = data.get("did")
Downloader().delete_downloader(did=did)
return {"code": 0}
@staticmethod
def __check_downloader(data):
"""
检查下载器
"""
did = data.get("did")
if not did:
return {"code": 1}
checked = data.get("checked")
flag = data.get("flag")
enabled, transfer, only_nastool, match_path = None, None, None, None
if flag == "enabled":
enabled = 1 if checked else 0
elif flag == "transfer":
transfer = 1 if checked else 0
elif flag == "only_nastool":
only_nastool = 1 if checked else 0
elif flag == "match_path":
match_path = 1 if checked else 0
Downloader().check_downloader(did=did,
enabled=enabled,
transfer=transfer,
only_nastool=only_nastool,
match_path=match_path)
return {"code": 0}
@staticmethod
def __get_downloaders(data):
"""
获取下载器
"""
def add_is_default(dl_conf, defualt_id):
dl_conf["is_default"] = str(dl_conf["id"]) == defualt_id
return dl_conf
did = data.get("did")
downloader = Downloader()
resp = downloader.get_downloader_conf(did=did)
default_dl_id = downloader.default_downloader_id
if did:
"""
单个下载器 conf
"""
return {"code": 0, "detail": add_is_default(copy.deepcopy(resp), default_dl_id) if resp else None}
else:
"""
所有下载器 conf
"""
confs = copy.deepcopy(resp)
for key in confs:
add_is_default(confs[key], default_dl_id)
return {"code": 0, "detail": confs}
@staticmethod
def __test_downloader(data):
"""
测试下载器
"""
dtype = data.get("type")
config = json.loads(data.get("config"))
res = Downloader().get_status(dtype=dtype, config=config)
if res:
return {"code": 0}
else:
return {"code": 1}
@staticmethod
def __get_indexer_statistics():
"""
获取索引器统计数据
"""
dataset = [["indexer", "avg"]]
result = Indexer().get_indexer_statistics() or []
dataset.extend([[ret[0], round(ret[4], 1)] for ret in result])
return {
"code": 0,
"data": [{
"name": ret[0],
"total": ret[1],
"fail": ret[2],
"success": ret[3],
"avg": round(ret[4], 1),
} for ret in result],
"dataset": dataset
}
@staticmethod
def user_statistics():
"""
强制刷新站点数据,并发送站点统计的消息
"""
# 强制刷新站点数据,并发送站点统计的消息
SiteUserInfo().refresh_site_data_now()
@staticmethod
def get_default_rss_setting(data):
"""
获取默认订阅设置
"""
match data.get("mtype"):
case "TV":
default_rss_setting = Subscribe().default_rss_setting_tv
case "MOV":
default_rss_setting = Subscribe().default_rss_setting_mov
case _:
default_rss_setting = {}
if default_rss_setting:
return {"code": 0, "data": default_rss_setting}
return {"code": 1}
@staticmethod
def get_movie_rss_items():
"""
获取所有电影订阅项目
"""
RssMovieItems = [
{
"id": movie.get("tmdbid"),
"rssid": movie.get("id")
} for movie in Subscribe().get_subscribe_movies().values() if movie.get("tmdbid")
]
return {"code": 0, "result": RssMovieItems}
@staticmethod
def get_tv_rss_items():
"""
获取所有电视剧订阅项目
"""
# 电视剧订阅
RssTvItems = [
{
"id": tv.get("tmdbid"),
"rssid": tv.get("id"),
"season": int(str(tv.get('season')).replace("S", "")),
"name": tv.get("name"),
} for tv in Subscribe().get_subscribe_tvs().values() if tv.get('season') and tv.get("tmdbid")
]
# 自定义订阅
RssTvItems += RssChecker().get_userrss_mediainfos()
# 电视剧订阅去重
Uniques = set()
UniqueTvItems = []
for item in RssTvItems:
unique = f"{item.get('id')}_{item.get('season')}"
if unique not in Uniques:
Uniques.add(unique)
UniqueTvItems.append(item)
return {"code": 0, "result": UniqueTvItems}
def get_ical_events(self):
"""
获取ical日历事件
"""
Events = []
# 电影订阅
RssMovieItems = self.get_movie_rss_items().get("result")
for movie in RssMovieItems:
info = self.__movie_calendar_data(movie)
if info.get("id"):
Events.append(info)
# 电视剧订阅
RssTvItems = self.get_tv_rss_items().get("result")
for tv in RssTvItems:
infos = self.__tv_calendar_data(tv).get("events")
if infos and isinstance(infos, list):
for info in infos:
if info.get("id"):
Events.append(info)
return {"code": 0, "result": Events}
@staticmethod
def install_plugin(data, reload=True):
"""
安装插件
"""
module_id = data.get("id")
if not module_id:
return {"code": -1, "msg": "参数错误"}
# 用户已安装插件列表
user_plugins = SystemConfig().get(SystemConfigKey.UserInstalledPlugins) or []
if module_id not in user_plugins:
user_plugins.append(module_id)
PluginHelper.install(module_id)
# 保存配置
SystemConfig().set(SystemConfigKey.UserInstalledPlugins, user_plugins)
# 重新加载插件
if reload:
PluginManager().init_config()
return {"code": 0, "msg": "插件安装成功"}
@staticmethod
def uninstall_plugin(data):
"""
卸载插件
"""
module_id = data.get("id")
if not module_id:
return {"code": -1, "msg": "参数错误"}
# 用户已安装插件列表
user_plugins = SystemConfig().get(SystemConfigKey.UserInstalledPlugins) or []
if module_id in user_plugins:
user_plugins.remove(module_id)
# 保存配置
SystemConfig().set(SystemConfigKey.UserInstalledPlugins, user_plugins)
# 重新加载插件
PluginManager().init_config()
return {"code": 0, "msg": "插件卸载功"}
@staticmethod
def get_plugin_apps():
"""
获取插件列表
"""
plugins = PluginManager().get_plugin_apps(current_user.level)
statistic = PluginHelper.statistic()
return {"code": 0, "result": plugins, "statistic": statistic}
@staticmethod
def get_plugin_page(data):
"""
查询插件的额外数据
"""
plugin_id = data.get("id")
if not plugin_id:
return {"code": 1, "msg": "参数错误"}
title, content, func = PluginManager().get_plugin_page(pid=plugin_id)
return {"code": 0, "title": title, "content": content, "func": func}
@staticmethod
def get_plugin_state(data):
"""
获取插件状态
"""
plugin_id = data.get("id")
if not plugin_id:
return {"code": 1, "msg": "参数错误"}
state = PluginManager().get_plugin_state(plugin_id)
return {"code": 0, "state": state}
@staticmethod
def get_plugins_conf():
Plugins = PluginManager().get_plugins_conf(current_user.level)
return {"code": 0, "result": Plugins}
@staticmethod
def update_category_config(data):
"""
保存二级分类配置
"""
text = data.get("config") or ''
# 保存配置
category_path = Config().category_path
if category_path:
with open(category_path, "w", encoding="utf-8") as f:
f.write(text)
return {"code": 0, "msg": "保存成功"}
@staticmethod
def get_category_config(data):
"""
获取二级分类配置
"""
category_name = data.get("category_name")
if not category_name:
return {"code": 1, "msg": "请输入二级分类策略名称"}
if category_name == "config":
return {"code": 1, "msg": "非法二级分类策略名称"}
category_path = os.path.join(Config().get_config_path(), f"{category_name}.yaml")
if not os.path.exists(category_path):
return {"code": 1, "msg": "请保存生成配置文件"}
# 读取category配置文件数据
with open(category_path, "r", encoding="utf-8") as f:
category_text = f.read()
return {"code": 0, "text": category_text}
@staticmethod
def backup(full_backup=False, bk_path=None):
"""
@param full_backup 是否完整备份
@param bk_path 自定义备份路径
"""
try:
# 创建备份文件夹
config_path = Path(Config().get_config_path())
backup_file = f"bk_{time.strftime('%Y%m%d%H%M%S')}"
if bk_path:
backup_path = Path(bk_path) / backup_file
else:
backup_path = config_path / "backup_file" / backup_file
backup_path.mkdir(parents=True)
# 把现有的相关文件进行copy备份
shutil.copy(f'{config_path}/config.yaml', backup_path)
shutil.copy(f'{config_path}/default-category.yaml', backup_path)
shutil.copy(f'{config_path}/user.db', backup_path)
# 完整备份不删除表
if not full_backup:
conn = sqlite3.connect(f'{backup_path}/user.db')
cursor = conn.cursor()
# 执行操作删除不需要备份的表
table_list = [
'SEARCH_RESULT_INFO',
'RSS_TORRENTS',
'DOUBAN_MEDIAS',
'TRANSFER_HISTORY',
'TRANSFER_UNKNOWN',
'TRANSFER_BLACKLIST',
'SYNC_HISTORY',
'DOWNLOAD_HISTORY',
'alembic_version'
]
for table in table_list:
cursor.execute(f"""DROP TABLE IF EXISTS {table};""")
conn.commit()
cursor.close()
conn.close()
zip_file = str(backup_path) + '.zip'
if os.path.exists(zip_file):
zip_file = str(backup_path) + '.zip'
shutil.make_archive(str(backup_path), 'zip', str(backup_path))
shutil.rmtree(str(backup_path))
return zip_file
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
@staticmethod
def get_system_processes():
"""
获取系统进程
"""
return {"code": 0, "data": SystemUtils.get_all_processes()}
@staticmethod
def run_plugin_method(data):
"""
运行插件方法
"""
plugin_id = data.get("plugin_id")
method = data.get("method")
if not plugin_id or not method:
return {"code": 1, "msg": "参数错误"}
data.pop("plugin_id")
data.pop("method")
result = PluginManager().run_plugin_method(pid=plugin_id, method=method, **data)
return {"code": 0, "result": result}
def get_commands(self):
"""
获取命令列表
"""
return [{
"id": cid,
"name": cmd.get("desc")
} for cid, cmd in self._commands.items()] + [{
"id": item.get("cmd"),
"name": item.get("desc")
} for item in PluginManager().get_plugin_commands()]
| 217,841 | Python | .py | 5,117 | 25.413328 | 120 | 0.463176 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,927 | main.py | demigody_nas-tools/web/main.py | import base64
import datetime
import hashlib
import mimetypes
import os.path
import re
import time
import traceback
import urllib
import xml.dom.minidom
from functools import wraps
from math import floor
from pathlib import Path
from threading import Lock
from urllib import parse
from urllib.parse import quote, unquote
from dotenv import load_dotenv
from flask import Flask, request, json, render_template, make_response, session, send_from_directory, send_file, \
redirect, Response
from flask_compress import Compress
from flask_login import LoginManager, login_user, login_required, current_user
from flask_sock import Sock
from icalendar import Calendar, Event, Alarm
from simple_websocket import ConnectionClosed
from werkzeug.middleware.proxy_fix import ProxyFix
import log
from app.brushtask import BrushTask
from app.conf import ModuleConf, SystemConfig
from app.downloader import Downloader
from app.filter import Filter
from app.helper import SecurityHelper, MetaHelper, ChromeHelper, ThreadHelper
from app.indexer import Indexer
from app.media.meta import MetaInfo
from app.mediaserver import MediaServer
from app.message import Message
from app.plugins import EventManager
from app.rsschecker import RssChecker
from app.sites import Sites, SiteUserInfo
from app.subscribe import Subscribe
from app.sync import Sync
from app.torrentremover import TorrentRemover
from app.utils import DomUtils, SystemUtils, ExceptionUtils, StringUtils
from app.utils.types import *
from config import PT_TRANSFER_INTERVAL, Config, TMDB_API_DOMAINS
from web.action import WebAction
from web.apiv1 import apiv1_bp
from web.backend.WXBizMsgCrypt3 import WXBizMsgCrypt
from web.backend.pro_user import ProUser
from web.backend.wallpaper import get_login_wallpaper
from web.backend.web_utils import WebUtils
from web.security import require_auth
flask_dir = Path(__file__).resolve().parent.parent
flask_env_path = flask_dir / ".flaskenv"
if flask_env_path.is_file():
print(f"正在加载flask环境变量: {str(flask_env_path)}")
load_dotenv(dotenv_path=flask_env_path)
else:
print("flask.env 文件不存在")
# 配置文件锁
ConfigLock = Lock()
# Flask App
App = Flask(__name__)
App.wsgi_app = ProxyFix(App.wsgi_app)
App.config['JSON_AS_ASCII'] = False
App.config['JSON_SORT_KEYS'] = False
App.config['SOCK_SERVER_OPTIONS'] = {'ping_interval': 25}
App.config['SESSION_REFRESH_EACH_REQUEST'] = False
App.secret_key = os.urandom(24)
App.permanent_session_lifetime = datetime.timedelta(days=30)
# Flask Socket
Sock = Sock(App)
# 启用压缩
Compress(App)
# 登录管理模块
LoginManager = LoginManager()
LoginManager.login_view = "login"
LoginManager.init_app(App)
# SSE
LoggingSource = ""
LoggingLock = Lock()
# 路由注册
App.register_blueprint(apiv1_bp, url_prefix="/api/v1")
# fix Windows registry stuff
mimetypes.add_type('application/javascript', '.js')
mimetypes.add_type('text/css', '.css')
@App.after_request
def add_header(r):
"""
统一添加Http头,标用缓存,避免Flask多线程+Chrome内核会发生的静态资源加载出错的问题
r.headers["Cache-Control"] = "no-cache, no-store, max-age=0"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
"""
return r
# 定义获取登录用户的方法
@LoginManager.user_loader
def load_user(user_id):
return ProUser().get(user_id)
# 页面不存在
@App.errorhandler(404)
def page_not_found(error):
return render_template("404.html", error=error), 404
# 服务错误
@App.errorhandler(500)
def page_server_error(error):
return render_template("500.html", error=error), 500
def action_login_check(func):
"""
Action安全认证
"""
@wraps(func)
def login_check(*args, **kwargs):
if not current_user.is_authenticated:
return {"code": -1, "msg": "用户未登录"}
return func(*args, **kwargs)
return login_check
# 主页面
@App.route('/', methods=['GET', 'POST'])
def login():
def redirect_to_navigation():
"""
跳转到导航页面
"""
# 存储当前用户
Config().current_user = current_user.username
# 让当前用户生效
MediaServer().init_config()
# 跳转页面
if GoPage and GoPage != 'web':
return redirect('/web#' + GoPage)
else:
return redirect('/web')
def redirect_to_login(errmsg=''):
"""
跳转到登录页面
"""
image_code, img_title, img_link = get_login_wallpaper()
return render_template('login.html',
GoPage=GoPage,
image_code=image_code,
img_title=img_title,
img_link=img_link,
err_msg=errmsg)
# 登录认证
if request.method == 'GET':
GoPage = request.args.get("next") or ""
if GoPage.startswith('/'):
GoPage = GoPage[1:]
if current_user.is_authenticated:
userid = current_user.id
username = current_user.username
if userid is None or username is None:
return redirect_to_login()
else:
# 登录成功
return redirect_to_navigation()
else:
return redirect_to_login()
else:
GoPage = request.form.get('next') or ""
if GoPage.startswith('/'):
GoPage = GoPage[1:]
username = request.form.get('username')
password = request.form.get('password')
remember = request.form.get('remember')
if not username:
return redirect_to_login('请输入用户名')
user_info = ProUser().get_user(username)
if not user_info:
return redirect_to_login('用户名或密码错误')
# 校验密码
if user_info.verify_password(password):
# 创建用户 Session
login_user(user_info)
session.permanent = True if remember else False
# 登录成功
return redirect_to_navigation()
else:
return redirect_to_login('用户名或密码错误')
@App.route('/web', methods=['POST', 'GET'])
@login_required
def web():
# 跳转页面
GoPage = request.args.get("next") or ""
# 判断当前的运营环境
SystemFlag = SystemUtils.get_system()
SyncMod = Config().get_config('media').get('default_rmt_mode')
TMDBFlag = 1 if Config().get_config('app').get('rmt_tmdbkey') else 0
DefaultPath = Config().get_config('media').get('media_default_path')
if not SyncMod:
SyncMod = "link"
RmtModeDict = WebAction().get_rmt_modes()
RestypeDict = ModuleConf.TORRENT_SEARCH_PARAMS.get("restype")
PixDict = ModuleConf.TORRENT_SEARCH_PARAMS.get("pix")
SiteFavicons = Sites().get_site_favicon()
Indexers = Indexer().get_indexers()
SearchSource = "douban" if Config().get_config("laboratory").get("use_douban_titles") else "tmdb"
CustomScriptCfg = SystemConfig().get(SystemConfigKey.CustomScript)
Menus = WebAction().get_user_menus().get("menus") or []
Commands = WebAction().get_commands()
return render_template('navigation.html',
GoPage=GoPage,
CurrentUser=current_user,
SystemFlag=SystemFlag.value,
TMDBFlag=TMDBFlag,
AppVersion=WebUtils.get_current_version(),
RestypeDict=RestypeDict,
PixDict=PixDict,
SyncMod=SyncMod,
SiteFavicons=SiteFavicons,
RmtModeDict=RmtModeDict,
Indexers=Indexers,
SearchSource=SearchSource,
CustomScriptCfg=CustomScriptCfg,
DefaultPath=DefaultPath,
Menus=Menus,
Commands=Commands)
# 开始
@App.route('/index', methods=['POST', 'GET'])
@login_required
def index():
# 媒体服务器类型
MSType = Config().get_config('media').get('media_server')
# 获取媒体数量
MediaCounts = WebAction().get_library_mediacount()
if MediaCounts.get("code") == 0:
ServerSucess = True
else:
ServerSucess = False
# 获得活动日志
Activity = WebAction().get_library_playhistory().get("result")
# 磁盘空间
LibrarySpaces = WebAction().get_library_spacesize()
# 媒体库
Librarys = MediaServer().get_libraries()
LibrarySyncConf = SystemConfig().get(SystemConfigKey.SyncLibrary) or []
AllLibraryModule = [MyMediaLibraryType.MINE, MyMediaLibraryType.WATCHING, MyMediaLibraryType.NEWESTADD]
LibraryManageConf = SystemConfig().get(SystemConfigKey.LibraryDisplayModule) or []
if not LibraryManageConf:
for index, item in enumerate(AllLibraryModule):
LibraryManageConf.append({"id": index, "name": item.value, "selected": True})
# 继续观看
Resumes = MediaServer().get_resume()
# 最近添加
Latests = MediaServer().get_latest()
return render_template("index.html",
ServerSucess=ServerSucess,
MediaCount={'MovieCount': MediaCounts.get("Movie"),
'SeriesCount': MediaCounts.get("Series"),
'SongCount': MediaCounts.get("Music"),
"EpisodeCount": MediaCounts.get("Episodes")},
Activitys=Activity,
UserCount=MediaCounts.get("User"),
FreeSpace=LibrarySpaces.get("FreeSpace"),
TotalSpace=LibrarySpaces.get("TotalSpace"),
UsedSapce=LibrarySpaces.get("UsedSapce"),
UsedPercent=LibrarySpaces.get("UsedPercent"),
MediaServerType=MSType,
Librarys=Librarys,
LibrarySyncConf=LibrarySyncConf,
LibraryManageConf=LibraryManageConf,
Resumes=Resumes,
Latests=Latests
)
# 资源搜索页面
@App.route('/search', methods=['POST', 'GET'])
@login_required
def search():
# 权限
if current_user.is_authenticated:
username = current_user.username
pris = ProUser().get_user(username).get("pris")
else:
pris = ""
# 结果
res = WebAction().get_search_result()
SearchResults = res.get("result")
Count = res.get("total")
return render_template("search.html",
UserPris=str(pris).split(","),
Count=Count,
Results=SearchResults,
SiteDict=Indexer().get_indexer_hash_dict(),
UPCHAR=chr(8593))
# 电影订阅页面
@App.route('/movie_rss', methods=['POST', 'GET'])
@login_required
def movie_rss():
RssItems = WebAction().get_movie_rss_list().get("result")
RuleGroups = {str(group["id"]): group["name"] for group in Filter().get_rule_groups()}
DownloadSettings = Downloader().get_download_setting()
return render_template("rss/movie_rss.html",
Count=len(RssItems),
RuleGroups=RuleGroups,
DownloadSettings=DownloadSettings,
Items=RssItems
)
# 电视剧订阅页面
@App.route('/tv_rss', methods=['POST', 'GET'])
@login_required
def tv_rss():
RssItems = WebAction().get_tv_rss_list().get("result")
RuleGroups = {str(group["id"]): group["name"] for group in Filter().get_rule_groups()}
DownloadSettings = Downloader().get_download_setting()
return render_template("rss/tv_rss.html",
Count=len(RssItems),
RuleGroups=RuleGroups,
DownloadSettings=DownloadSettings,
Items=RssItems
)
# 订阅历史页面
@App.route('/rss_history', methods=['POST', 'GET'])
@login_required
def rss_history():
mtype = request.args.get("t")
RssHistory = WebAction().get_rss_history({"type": mtype}).get("result")
return render_template("rss/rss_history.html",
Count=len(RssHistory),
Items=RssHistory,
Type=mtype
)
# 订阅日历页面
@App.route('/rss_calendar', methods=['POST', 'GET'])
@login_required
def rss_calendar():
Today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
# 电影订阅
RssMovieItems = WebAction().get_movie_rss_items().get("result")
# 电视剧订阅
RssTvItems = WebAction().get_tv_rss_items().get("result")
return render_template("rss/rss_calendar.html",
Today=Today,
RssMovieItems=RssMovieItems,
RssTvItems=RssTvItems)
# 站点维护页面
@App.route('/site', methods=['POST', 'GET'])
@login_required
def sites():
CfgSites = Sites().get_sites()
RuleGroups = {str(group["id"]): group["name"] for group in Filter().get_rule_groups()}
DownloadSettings = {did: attr["name"] for did, attr in Downloader().get_download_setting().items()}
ChromeOk = ChromeHelper().get_status()
CookieCloudCfg = SystemConfig().get(SystemConfigKey.CookieCloud)
CookieUserInfoCfg = SystemConfig().get(SystemConfigKey.CookieUserInfo)
return render_template("site/site.html",
Sites=CfgSites,
RuleGroups=RuleGroups,
DownloadSettings=DownloadSettings,
ChromeOk=ChromeOk,
CookieCloudCfg=CookieCloudCfg,
CookieUserInfoCfg=CookieUserInfoCfg)
# 站点列表页面
@App.route('/sitelist', methods=['POST', 'GET'])
@login_required
def sitelist():
IndexerSites = Indexer().get_indexer_dict(check=False, public=False, plugins=False)
return render_template("site/sitelist.html",
Sites=IndexerSites,
Count=len(IndexerSites))
# 唤起App中转页面
@App.route('/open', methods=['POST', 'GET'])
def open_app():
return render_template("openapp.html")
# 站点资源页面
@App.route('/resources', methods=['POST', 'GET'])
@login_required
def resources():
site_domain = request.args.get("site")
site_name = request.args.get("title")
page = request.args.get("page") or 0
keyword = request.args.get("keyword")
Results = WebAction().list_site_resources({
"site": site_domain,
"page": page,
"keyword": keyword
}).get("data") or []
return render_template("site/resources.html",
Results=Results,
SiteDomain=site_domain,
Title=site_name,
KeyWord=keyword,
TotalCount=len(Results),
PageRange=range(0, 10),
CurrentPage=int(page),
TotalPage=10)
# 推荐页面
@App.route('/recommend', methods=['POST', 'GET'])
@login_required
def recommend():
Type = request.args.get("type") or ""
SubType = request.args.get("subtype") or ""
Title = request.args.get("title") or ""
SubTitle = request.args.get("subtitle") or ""
CurrentPage = request.args.get("page") or 1
Week = request.args.get("week") or ""
TmdbId = request.args.get("tmdbid") or ""
PersonId = request.args.get("personid") or ""
Keyword = request.args.get("keyword") or ""
Source = request.args.get("source") or ""
FilterKey = request.args.get("filter") or ""
Params = json.loads(request.args.get("params")) if request.args.get("params") else {}
return render_template("discovery/recommend.html",
Type=Type,
SubType=SubType,
Title=Title,
CurrentPage=CurrentPage,
Week=Week,
TmdbId=TmdbId,
PersonId=PersonId,
SubTitle=SubTitle,
Keyword=Keyword,
Source=Source,
Filter=FilterKey,
FilterConf=ModuleConf.DISCOVER_FILTER_CONF.get(FilterKey) if FilterKey else {},
Params=Params)
# 推荐页面
@App.route('/ranking', methods=['POST', 'GET'])
@login_required
def ranking():
return render_template("discovery/ranking.html",
DiscoveryType="RANKING")
# 豆瓣电影
@App.route('/douban_movie', methods=['POST', 'GET'])
@login_required
def douban_movie():
return render_template("discovery/recommend.html",
Type="DOUBANTAG",
SubType="MOV",
Title="豆瓣电影",
Filter="douban_movie",
FilterConf=ModuleConf.DISCOVER_FILTER_CONF.get('douban_movie'))
# 豆瓣电视剧
@App.route('/douban_tv', methods=['POST', 'GET'])
@login_required
def douban_tv():
return render_template("discovery/recommend.html",
Type="DOUBANTAG",
SubType="TV",
Title="豆瓣电视剧",
Filter="douban_tv",
FilterConf=ModuleConf.DISCOVER_FILTER_CONF.get('douban_tv'))
@App.route('/tmdb_movie', methods=['POST', 'GET'])
@login_required
def tmdb_movie():
return render_template("discovery/recommend.html",
Type="DISCOVER",
SubType="MOV",
Title="TMDB电影",
Filter="tmdb_movie",
FilterConf=ModuleConf.DISCOVER_FILTER_CONF.get('tmdb_movie'))
@App.route('/tmdb_tv', methods=['POST', 'GET'])
@login_required
def tmdb_tv():
return render_template("discovery/recommend.html",
Type="DISCOVER",
SubType="TV",
Title="TMDB电视剧",
Filter="tmdb_tv",
FilterConf=ModuleConf.DISCOVER_FILTER_CONF.get('tmdb_tv'))
# Bangumi每日放送
@App.route('/bangumi', methods=['POST', 'GET'])
@login_required
def discovery_bangumi():
return render_template("discovery/ranking.html",
DiscoveryType="BANGUMI")
# 媒体详情页面
@App.route('/media_detail', methods=['POST', 'GET'])
@login_required
def media_detail():
TmdbId = request.args.get("id")
Type = request.args.get("type")
return render_template("discovery/mediainfo.html",
TmdbId=TmdbId,
Type=Type)
# 演职人员页面
@App.route('/discovery_person', methods=['POST', 'GET'])
@login_required
def discovery_person():
TmdbId = request.args.get("tmdbid")
Title = request.args.get("title")
SubTitle = request.args.get("subtitle")
Type = request.args.get("type")
Keyword = request.args.get("keyword")
return render_template("discovery/person.html",
TmdbId=TmdbId,
Title=Title,
SubTitle=SubTitle,
Type=Type,
Keyword=Keyword)
# 正在下载页面
@App.route('/downloading', methods=['POST', 'GET'])
@login_required
def downloading():
DispTorrents = WebAction().get_downloading().get("result")
return render_template("download/downloading.html",
DownloadCount=len(DispTorrents),
Torrents=DispTorrents)
# 近期下载页面
@App.route('/downloaded', methods=['POST', 'GET'])
@login_required
def downloaded():
CurrentPage = request.args.get("page") or 1
return render_template("discovery/recommend.html",
Type='DOWNLOADED',
Title='近期下载',
CurrentPage=CurrentPage)
@App.route('/torrent_remove', methods=['POST', 'GET'])
@login_required
def torrent_remove():
Downloaders = Downloader().get_downloader_conf_simple()
TorrentRemoveTasks = TorrentRemover().get_torrent_remove_tasks()
return render_template("download/torrent_remove.html",
Downloaders=Downloaders,
DownloaderConfig=ModuleConf.TORRENTREMOVER_DICT,
Count=len(TorrentRemoveTasks),
TorrentRemoveTasks=TorrentRemoveTasks)
# 数据统计页面
@App.route('/statistics', methods=['POST', 'GET'])
@login_required
def statistics():
# 刷新单个site
refresh_site = request.args.getlist("refresh_site")
# 强制刷新所有
refresh_force = True if request.args.get("refresh_force") else False
# 总上传下载
TotalUpload = 0
TotalDownload = 0
TotalSeedingSize = 0
TotalSeeding = 0
# 站点标签及上传下载
SiteNames = []
SiteUploads = []
SiteDownloads = []
SiteRatios = []
SiteErrs = {}
# 站点上传下载
SiteData = SiteUserInfo().get_site_data(specify_sites=refresh_site, force=refresh_force)
if isinstance(SiteData, dict):
for name, data in SiteData.items():
if not data:
continue
up = data.get("upload", 0)
dl = data.get("download", 0)
ratio = data.get("ratio", 0)
seeding = data.get("seeding", 0)
seeding_size = data.get("seeding_size", 0)
err_msg = data.get("err_msg", "")
SiteErrs.update({name: err_msg})
if not up and not dl and not ratio:
continue
if not str(up).isdigit() or not str(dl).isdigit():
continue
if name not in SiteNames:
SiteNames.append(name)
TotalUpload += int(up)
TotalDownload += int(dl)
TotalSeeding += int(seeding)
TotalSeedingSize += int(seeding_size)
SiteUploads.append(int(up))
SiteDownloads.append(int(dl))
SiteRatios.append(round(float(ratio), 1))
# 近期上传下载各站点汇总
# CurrentUpload, CurrentDownload, _, _, _ = SiteUserInfo().get_pt_site_statistics_history(
# days=2)
# 站点用户数据
SiteUserStatistics = WebAction().get_site_user_statistics({"encoding": "DICT"}).get("data")
return render_template("site/statistics.html",
TotalDownload=TotalDownload,
TotalUpload=TotalUpload,
TotalSeedingSize=TotalSeedingSize,
TotalSeeding=TotalSeeding,
SiteDownloads=SiteDownloads,
SiteUploads=SiteUploads,
SiteRatios=SiteRatios,
SiteNames=SiteNames,
SiteErr=SiteErrs,
SiteUserStatistics=SiteUserStatistics)
# 刷流任务页面
@App.route('/brushtask', methods=['POST', 'GET'])
@login_required
def brushtask():
# 站点列表
CfgSites = Sites().get_sites(brush=True)
# 下载器列表
Downloaders = Downloader().get_downloader_conf_simple()
# 任务列表
Tasks = BrushTask().get_brushtask_info().values()
return render_template("site/brushtask.html",
Count=len(Tasks),
Sites=CfgSites,
Tasks=Tasks,
Downloaders=Downloaders)
# 服务页面
@App.route('/service', methods=['POST', 'GET'])
@login_required
def service():
# 所有规则组
RuleGroups = Filter().get_rule_groups()
# 所有同步目录
SyncPaths = Sync().get_sync_path_conf()
# 所有服务
Services = current_user.get_services()
pt = Config().get_config('pt')
# RSS订阅
if "rssdownload" in Services:
pt_check_interval = pt.get('pt_check_interval')
if str(pt_check_interval).isdigit():
tim_rssdownload = str(round(int(pt_check_interval) / 60)) + " 分钟"
rss_state = 'ON'
else:
tim_rssdownload = ""
rss_state = 'OFF'
Services['rssdownload'].update({
'time': tim_rssdownload,
'state': rss_state,
})
# RSS搜索
if "subscribe_search_all" in Services:
search_rss_interval = pt.get('search_rss_interval')
if str(search_rss_interval).isdigit():
if int(search_rss_interval) < 6:
search_rss_interval = 6
tim_rsssearch = str(int(search_rss_interval)) + " 小时"
rss_search_state = 'ON'
else:
tim_rsssearch = ""
rss_search_state = 'OFF'
Services['subscribe_search_all'].update({
'time': tim_rsssearch,
'state': rss_search_state,
})
# 下载文件转移
if "pttransfer" in Services:
pt_monitor = Downloader().monitor_downloader_ids
if pt_monitor:
tim_pttransfer = str(round(PT_TRANSFER_INTERVAL / 60)) + " 分钟"
sta_pttransfer = 'ON'
else:
tim_pttransfer = ""
sta_pttransfer = 'OFF'
Services['pttransfer'].update({
'time': tim_pttransfer,
'state': sta_pttransfer,
})
# 目录同步
if "sync" in Services:
if Sync().monitor_sync_path_ids:
Services['sync'].update({
'state': 'ON'
})
else:
Services.pop('sync')
# 系统进程
if "processes" in Services:
if not SystemUtils.is_docker() or not SystemUtils.get_all_processes():
Services.pop('processes')
return render_template("service.html",
Count=len(Services),
RuleGroups=RuleGroups,
SyncPaths=SyncPaths,
SchedulerTasks=Services)
# 历史记录页面
@App.route('/history', methods=['POST', 'GET'])
@login_required
def history():
pagenum = request.args.get("pagenum")
keyword = request.args.get("s") or ""
current_page = request.args.get("page")
Result = WebAction().get_transfer_history({"keyword": keyword, "page": current_page, "pagenum": pagenum})
PageRange = WebUtils.get_page_range(current_page=Result.get("currentPage"),
total_page=Result.get("totalPage"))
return render_template("rename/history.html",
TotalCount=Result.get("total"),
Count=len(Result.get("result")),
Historys=Result.get("result"),
Search=keyword,
CurrentPage=Result.get("currentPage"),
TotalPage=Result.get("totalPage"),
PageRange=PageRange,
PageNum=Result.get("currentPage"))
# TMDB缓存页面
@App.route('/tmdbcache', methods=['POST', 'GET'])
@login_required
def tmdbcache():
page_num = request.args.get("pagenum")
if not page_num:
page_num = 30
search_str = request.args.get("s")
if not search_str:
search_str = ""
current_page = request.args.get("page")
if not current_page:
current_page = 1
else:
current_page = int(current_page)
total_count, tmdb_caches = MetaHelper().dump_meta_data(search_str, current_page, page_num)
total_page = floor(total_count / page_num) + 1
page_range = WebUtils.get_page_range(current_page=current_page,
total_page=total_page)
return render_template("rename/tmdbcache.html",
TotalCount=total_count,
Count=len(tmdb_caches),
TmdbCaches=tmdb_caches,
Search=search_str,
CurrentPage=current_page,
TotalPage=total_page,
PageRange=page_range,
PageNum=page_num)
# 手工识别页面
@App.route('/unidentification', methods=['POST', 'GET'])
@login_required
def unidentification():
pagenum = request.args.get("pagenum")
keyword = request.args.get("s") or ""
current_page = request.args.get("page")
Result = WebAction().get_unknown_list_by_page({"keyword": keyword, "page": current_page, "pagenum": pagenum})
PageRange = WebUtils.get_page_range(current_page=Result.get("currentPage"),
total_page=Result.get("totalPage"))
return render_template("rename/unidentification.html",
TotalCount=Result.get("total"),
Count=len(Result.get("items")),
Items=Result.get("items"),
Search=keyword,
CurrentPage=Result.get("currentPage"),
TotalPage=Result.get("totalPage"),
PageRange=PageRange,
PageNum=Result.get("currentPage"))
# 文件管理页面
@App.route('/mediafile', methods=['POST', 'GET'])
@login_required
def mediafile():
media_default_path = Config().get_config('media').get('media_default_path')
if media_default_path:
DirD = media_default_path
else:
download_dirs = Downloader().get_download_visit_dirs()
if download_dirs:
try:
DirD = os.path.commonpath(download_dirs).replace("\\", "/")
except Exception as err:
print(str(err))
DirD = "/"
else:
DirD = "/"
DirR = request.args.get("dir")
return render_template("rename/mediafile.html",
Dir=DirR or DirD)
# 基础设置页面
@App.route('/basic', methods=['POST', 'GET'])
@login_required
def basic():
proxy = Config().get_config('app').get("proxies", {}).get("http")
if proxy:
proxy = proxy.replace("http://", "")
RmtModeDict = WebAction().get_rmt_modes()
CustomScriptCfg = SystemConfig().get(SystemConfigKey.CustomScript)
ScraperConf = SystemConfig().get(SystemConfigKey.UserScraperConf) or {}
return render_template("setting/basic.html",
Config=Config().get_config(),
Proxy=proxy,
RmtModeDict=RmtModeDict,
CustomScriptCfg=CustomScriptCfg,
CurrentUser=current_user,
ScraperNfo=ScraperConf.get("scraper_nfo") or {},
ScraperPic=ScraperConf.get("scraper_pic") or {},
TmdbDomains=TMDB_API_DOMAINS)
# 自定义识别词设置页面
@App.route('/customwords', methods=['POST', 'GET'])
@login_required
def customwords():
groups = WebAction().get_customwords().get("result")
return render_template("setting/customwords.html",
Groups=groups,
GroupsCount=len(groups))
# 目录同步页面
@App.route('/directorysync', methods=['POST', 'GET'])
@login_required
def directorysync():
RmtModeDict = WebAction().get_rmt_modes()
SyncPaths = Sync().get_sync_path_conf()
return render_template("setting/directorysync.html",
SyncPaths=SyncPaths,
SyncCount=len(SyncPaths),
RmtModeDict=RmtModeDict)
# 下载器页面
@App.route('/downloader', methods=['POST', 'GET'])
@login_required
def downloader():
DefaultDownloader = Downloader().default_downloader_id
Downloaders = Downloader().get_downloader_conf()
DownloadersCount = len(Downloaders)
Categories = {
x: WebAction().get_categories({
"type": x
}).get("category") for x in ["电影", "电视剧", "动漫"]
}
RmtModeDict = WebAction().get_rmt_modes()
return render_template("setting/downloader.html",
Downloaders=Downloaders,
DefaultDownloader=DefaultDownloader,
DownloadersCount=DownloadersCount,
Categories=Categories,
RmtModeDict=RmtModeDict,
DownloaderConf=ModuleConf.DOWNLOADER_CONF)
# 下载设置页面
@App.route('/download_setting', methods=['POST', 'GET'])
@login_required
def download_setting():
DefaultDownloadSetting = Downloader().default_download_setting_id
Downloaders = Downloader().get_downloader_conf_simple()
DownloadSetting = Downloader().get_download_setting()
return render_template("setting/download_setting.html",
DownloadSetting=DownloadSetting,
DefaultDownloadSetting=DefaultDownloadSetting,
Downloaders=Downloaders,
Count=len(DownloadSetting))
# 索引器页面
@App.route('/indexer', methods=['POST', 'GET'])
@login_required
def indexer():
# 只有选中的索引器才搜索
indexers = Indexer().get_indexers(check=False)
private_count = len([item.id for item in indexers if not item.public])
public_count = len([item.id for item in indexers if item.public])
indexer_sites = SystemConfig().get(SystemConfigKey.UserIndexerSites)
return render_template("setting/indexer.html",
Config=Config().get_config(),
PrivateCount=private_count,
PublicCount=public_count,
Indexers=indexers,
IndexerConf=ModuleConf.INDEXER_CONF,
IndexerSites=indexer_sites)
# 媒体库页面
@App.route('/library', methods=['POST', 'GET'])
@login_required
def library():
return render_template("setting/library.html",
Config=Config().get_config())
# 媒体服务器页面
@App.route('/mediaserver', methods=['POST', 'GET'])
@login_required
def mediaserver():
return render_template("setting/mediaserver.html",
Config=Config().get_config(),
MediaServerConf=ModuleConf.MEDIASERVER_CONF)
# 通知消息页面
@App.route('/notification', methods=['POST', 'GET'])
@login_required
def notification():
MessageClients = Message().get_message_client_info()
Channels = ModuleConf.MESSAGE_CONF.get("client")
Switchs = ModuleConf.MESSAGE_CONF.get("switch")
return render_template("setting/notification.html",
Channels=Channels,
Switchs=Switchs,
ClientCount=len(MessageClients),
MessageClients=MessageClients)
# 用户管理页面
@App.route('/users', methods=['POST', 'GET'])
@login_required
def users():
Users = WebAction().get_users().get("result")
TopMenus = WebAction().get_top_menus().get("menus")
return render_template("setting/users.html",
Users=Users,
UserCount=len(Users),
TopMenus=TopMenus)
# 过滤规则设置页面
@App.route('/filterrule', methods=['POST', 'GET'])
@login_required
def filterrule():
result = WebAction().get_filterrules()
return render_template("setting/filterrule.html",
Count=len(result.get("ruleGroups")),
RuleGroups=result.get("ruleGroups"),
Init_RuleGroups=result.get("initRules"))
# 自定义订阅页面
@App.route('/user_rss', methods=['POST', 'GET'])
@login_required
def user_rss():
Tasks = RssChecker().get_rsstask_info()
RssParsers = RssChecker().get_userrss_parser()
RuleGroups = {str(group["id"]): group["name"] for group in Filter().get_rule_groups()}
DownloadSettings = {did: attr["name"] for did, attr in Downloader().get_download_setting().items()}
RestypeDict = ModuleConf.TORRENT_SEARCH_PARAMS.get("restype")
PixDict = ModuleConf.TORRENT_SEARCH_PARAMS.get("pix")
return render_template("rss/user_rss.html",
Tasks=Tasks,
Count=len(Tasks),
RssParsers=RssParsers,
RuleGroups=RuleGroups,
RestypeDict=RestypeDict,
PixDict=PixDict,
DownloadSettings=DownloadSettings)
# RSS解析器页面
@App.route('/rss_parser', methods=['POST', 'GET'])
@login_required
def rss_parser():
RssParsers = RssChecker().get_userrss_parser()
return render_template("rss/rss_parser.html",
RssParsers=RssParsers,
Count=len(RssParsers))
# 插件页面
@App.route('/plugin', methods=['POST', 'GET'])
@login_required
def plugin():
Plugins = WebAction().get_plugins_conf().get("result")
return render_template("setting/plugin.html",
Plugins=Plugins,
Count=len(Plugins))
# 事件响应
@App.route('/do', methods=['POST'])
@action_login_check
def do():
try:
content = request.get_json()
cmd = content.get("cmd")
data = content.get("data") or {}
return WebAction().action(cmd, data)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": -1, "msg": str(e)}
# 目录事件响应
@App.route('/dirlist', methods=['POST'])
@login_required
def dirlist():
def match_sync_dir(folder, x, y, locating, direction):
"""
匹配同步目录对硬链接信息生成HTML
"""
result = False
sync_class = ""
link_path = ""
link_direction = ""
# 匹配同步目录
sync_class = f"sync-{'src' if direction == '→' else 'dest'}{' auto-locate' if locating else ''}"
if folder == x:
target = SystemUtils.shorten_path(y) if y else "未设置"
link_path = f'<span class="link-folder" data-bs-toggle="tooltip" title="{y}" data-jump="{y}">{target}</span>'
link_direction = f'<span class="link-direction" data-direction="{direction}">{direction}</span>'
result = True
return result, sync_class, link_path, link_direction
def get_hardlink_info(folder):
"""
获取硬链接信息
"""
sync_class = ""
link_path = ""
link_direction = ""
# 获取所有硬链接的同步目录设置
sync_dirs = Sync().get_filehardlinks_sync_dirs()
# 按设置遍历检查目录是否是同步目录或在同步目录内
for dir in sync_dirs:
if dir[0] and (dir[0] == folder or folder.startswith(f"{dir[0]}/")):
result, sync_class, link_path, link_direction = match_sync_dir(folder, dir[0], dir[1], dir[2], '→')
if result: break
elif dir[1] and (dir[1] == folder or folder.startswith(f"{dir[1]}/")):
result, sync_class, link_path, link_direction = match_sync_dir(folder, dir[1], dir[0], dir[2], '←')
if result: break
return sync_class, link_path, link_direction
def add_paths_to_media_dirs(paths, media_dirs):
"""
添加路径到媒体目录列表。
:param paths: 待添加的路径列表
:param media_dirs: 媒体目录列表
"""
if not paths:
return
valid_paths = [pathElement.rstrip('/') for pathElement in paths if StringUtils.is_string_and_not_empty(pathElement)]
media_dirs.extend(valid_paths)
def get_media_dirs():
"""
获取媒体库目录
"""
media_dirs = []
movie_path = Config().get_config('media').get('movie_path')
tv_path = Config().get_config('media').get('tv_path')
anime_path = Config().get_config('media').get('anime_path')
unknown_path = Config().get_config('media').get('unknown_path')
add_paths_to_media_dirs(movie_path, media_dirs)
add_paths_to_media_dirs(tv_path, media_dirs)
add_paths_to_media_dirs(anime_path, media_dirs)
add_paths_to_media_dirs(unknown_path, media_dirs)
return list(set(media_dirs))
def get_download_dirs():
# 获取下载目录
return [path.rstrip('/') for path in Downloader().get_download_visit_dirs()]
r = ['<ul class="jqueryFileTree" style="display: none;">']
try:
r = ['<ul class="jqueryFileTree" style="display: none;">']
in_dir = unquote(request.form.get('dir'))
only_folders = request.form.get("onlyFolders")
if not in_dir or in_dir == "/":
if SystemUtils.get_system() == OsType.WINDOWS:
partitions = SystemUtils.get_windows_drives()
if partitions:
dirs = partitions
else:
dirs = [os.path.join("C:/", f) for f in os.listdir("C:/")]
else:
dirs = [os.path.join("/", f) for f in os.listdir("/")]
elif in_dir == "*SYNC-FOLDERS*":
sync_dirs = []
for id, conf in Sync().get_sync_path_conf().items():
sync_dirs.append(conf["from"])
sync_dirs.append(conf["to"])
dirs = list(set(sync_dirs))
elif in_dir == "*DOWNLOAD-FOLDERS*":
dirs = get_download_dirs()
elif in_dir == "*MEDIA-FOLDERS*":
dirs = get_media_dirs()
else:
d = os.path.normpath(urllib.parse.unquote(in_dir))
if not os.path.isdir(d):
d = os.path.dirname(d)
dirs = [os.path.join(d, f) for f in os.listdir(d)]
dirs.sort()
for ff in dirs:
f = os.path.basename(ff)
if not f:
f = ff
if os.path.isdir(ff):
# 对硬链接同步目录进行标识处理
sync_class, link_path, link_direction = get_hardlink_info(ff)
# 获取文件夹路径的MD5存为id,用于前端选择后更新文件夹硬链接同步标识
folder_class = "media-folder" if ff in get_media_dirs() else "download-folder" if ff in get_download_dirs() else ""
path = ff.replace("\\", "/") + "/"
r.append('<li class="directory %s %s collapsed"><a rel="%s">%s%s%s</a></li>' % (
folder_class, sync_class, path, f.replace("\\", "/"), link_direction, link_path))
else:
if not only_folders:
e = os.path.splitext(f)[1][1:]
r.append('<li class="file ext_%s"><a rel="%s">%s</a></li>' % (
e, ff.replace("\\", "/"), f.replace("\\", "/")))
r.append('</ul>')
except Exception as e:
ExceptionUtils.exception_traceback(e)
r.append('加载路径失败: %s' % str(e))
r.append('</ul>')
return make_response(''.join(r), 200)
# 禁止搜索引擎
@App.route('/robots.txt', methods=['GET', 'POST'])
def robots():
return send_from_directory("", "robots.txt")
# 响应企业微信消息
@App.route('/wechat', methods=['GET', 'POST'])
def wechat():
# 当前在用的交互渠道
interactive_client = Message().get_interactive_client(SearchType.WX)
if not interactive_client:
return make_response("NAStool没有启用微信交互", 200)
conf = interactive_client.get("config")
sToken = conf.get('token')
sEncodingAESKey = conf.get('encodingAESKey')
sCorpID = conf.get('corpid')
if not sToken or not sEncodingAESKey or not sCorpID:
return
wxcpt = WXBizMsgCrypt(sToken, sEncodingAESKey, sCorpID)
sVerifyMsgSig = request.args.get("msg_signature")
sVerifyTimeStamp = request.args.get("timestamp")
sVerifyNonce = request.args.get("nonce")
if request.method == 'GET':
if not sVerifyMsgSig and not sVerifyTimeStamp and not sVerifyNonce:
return "NAStool微信交互服务正常!<br>微信回调配置步聚:<br>1、在微信企业应用接收消息设置页面生成Token和EncodingAESKey并填入设置->消息通知->微信对应项,打开微信交互开关。<br>2、保存并重启本工具,保存并重启本工具,保存并重启本工具。<br>3、在微信企业应用接收消息设置页面输入此地址:http(s)://IP:PORT/wechat(IP、PORT替换为本工具的外网访问地址及端口,需要有公网IP并做好端口转发,最好有域名)。"
sVerifyEchoStr = request.args.get("echostr")
log.info("收到微信验证请求: echostr= %s" % sVerifyEchoStr)
ret, sEchoStr = wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce, sVerifyEchoStr)
if ret != 0:
log.error("微信请求验证失败 VerifyURL ret: %s" % str(ret))
# 验证URL成功,将sEchoStr返回给企业号
return sEchoStr
else:
try:
sReqData = request.data
log.debug("收到微信请求:%s" % str(sReqData))
ret, sMsg = wxcpt.DecryptMsg(sReqData, sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce)
if ret != 0:
log.error("解密微信消息失败 DecryptMsg ret = %s" % str(ret))
return make_response("ok", 200)
# 解析XML报文
"""
1、消息格式:
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[this is a test]]></Content>
<MsgId>1234567890123456</MsgId>
<AgentID>1</AgentID>
</xml>
2、事件格式:
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[UserID]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe]]></Event>
<AgentID>1</AgentID>
</xml>
"""
dom_tree = xml.dom.minidom.parseString(sMsg.decode('UTF-8'))
root_node = dom_tree.documentElement
# 消息类型
msg_type = DomUtils.tag_value(root_node, "MsgType")
# Event event事件只有click才有效,enter_agent无效
event = DomUtils.tag_value(root_node, "Event")
# 用户ID
user_id = DomUtils.tag_value(root_node, "FromUserName")
# 没的消息类型和用户ID的消息不要
if not msg_type or not user_id:
log.info("收到微信心跳报文...")
return make_response("ok", 200)
# 解析消息内容
content = ""
if msg_type == "event" and event == "click":
# 校验用户有权限执行交互命令
if conf.get("adminUser") and not any(
user_id == admin_user for admin_user in str(conf.get("adminUser")).split(";")):
Message().send_channel_msg(channel=SearchType.WX, title="用户无权限执行菜单命令", user_id=user_id)
return make_response(content, 200)
# 事件消息
event_key = DomUtils.tag_value(root_node, "EventKey")
if event_key:
log.info("点击菜单:%s" % event_key)
keys = event_key.split('#')
if len(keys) > 2:
content = ModuleConf.WECHAT_MENU.get(keys[2])
elif msg_type == "text":
# 文本消息
content = DomUtils.tag_value(root_node, "Content", default="")
if content:
log.info(f"收到微信消息:userid={user_id}, text={content}")
# 处理消息内容
WebAction().handle_message_job(msg=content,
in_from=SearchType.WX,
user_id=user_id,
user_name=user_id)
return make_response(content, 200)
except Exception as err:
ExceptionUtils.exception_traceback(err)
log.error("微信消息处理发生错误:%s - %s" % (str(err), traceback.format_exc()))
return make_response("ok", 200)
# Plex Webhook
@App.route('/plex', methods=['POST'])
@require_auth(force=False)
def plex_webhook():
if not SecurityHelper().check_mediaserver_ip(request.remote_addr):
log.warn(f"非法IP地址的媒体服务器消息通知:{request.remote_addr}")
return '不允许的IP地址请求'
request_json = json.loads(request.form.get('payload', {}))
log.debug("收到Plex Webhook报文:%s" % str(request_json))
# 事件类型
event_match = request_json.get("event") in ["media.play", "media.stop", "library.new"]
# 媒体类型
type_match = request_json.get("Metadata", {}).get("type") in ["movie", "episode", "show"]
# 是否直播
is_live = request_json.get("Metadata", {}).get("live") == "1"
# 如果事件类型匹配,媒体类型匹配,不是直播
if event_match and type_match and not is_live:
# 发送消息
ThreadHelper().start_thread(MediaServer().webhook_message_handler,
(request_json, MediaServerType.PLEX))
# 触发事件
EventManager().send_event(EventType.PlexWebhook, request_json)
return 'Ok'
# Jellyfin Webhook
@App.route('/jellyfin', methods=['POST'])
@require_auth(force=False)
def jellyfin_webhook():
if not SecurityHelper().check_mediaserver_ip(request.remote_addr):
log.warn(f"非法IP地址的媒体服务器消息通知:{request.remote_addr}")
return '不允许的IP地址请求'
request_json = request.get_json()
log.debug("收到Jellyfin Webhook报文:%s" % str(request_json))
# 发送消息
ThreadHelper().start_thread(MediaServer().webhook_message_handler,
(request_json, MediaServerType.JELLYFIN))
# 触发事件
EventManager().send_event(EventType.JellyfinWebhook, request_json)
return 'Ok'
# Emby Webhook
@App.route('/emby', methods=['GET', 'POST'])
@require_auth(force=False)
def emby_webhook():
if not SecurityHelper().check_mediaserver_ip(request.remote_addr):
log.warn(f"非法IP地址的媒体服务器消息通知:{request.remote_addr}")
return '不允许的IP地址请求'
if request.method == 'POST':
log.debug("Emby Webhook data: %s" % str(request.form.get('data', {})))
request_json = json.loads(request.form.get('data', {}))
else:
log.debug("Emby Webhook data: %s" % str(dict(request.args)))
request_json = dict(request.args)
log.debug("收到Emby Webhook报文:%s" % str(request_json))
# 发送消息
ThreadHelper().start_thread(MediaServer().webhook_message_handler,
(request_json, MediaServerType.EMBY))
# 触发事件
EventManager().send_event(EventType.EmbyWebhook, request_json)
return 'Ok'
# Telegram消息响应
@App.route('/telegram', methods=['POST'])
@require_auth(force=False)
def telegram():
"""
{
'update_id': ,
'message': {
'message_id': ,
'from': {
'id': ,
'is_bot': False,
'first_name': '',
'username': '',
'language_code': 'zh-hans'
},
'chat': {
'id': ,
'first_name': '',
'username': '',
'type': 'private'
},
'date': ,
'text': ''
}
}
"""
# 当前在用的交互渠道
interactive_client = Message().get_interactive_client(SearchType.TG)
if not interactive_client:
return 'NAStool未启用Telegram交互'
msg_json = request.get_json()
if not SecurityHelper().check_telegram_ip(request.remote_addr):
log.error("收到来自 %s 的非法Telegram消息:%s" % (request.remote_addr, msg_json))
return '不允许的IP地址请求'
if msg_json:
message = msg_json.get("message", {})
text = message.get("text")
user_id = message.get("from", {}).get("id")
# 获取用户名
user_name = message.get("from", {}).get("username")
if text:
log.info(f"收到Telegram消息:userid={user_id}, username={user_name}, text={text}")
# 检查权限
if text.startswith("/"):
if str(user_id) not in interactive_client.get("client").get_admin():
Message().send_channel_msg(channel=SearchType.TG,
title="只有管理员才有权限执行此命令",
user_id=user_id)
return '只有管理员才有权限执行此命令'
else:
if not str(user_id) in interactive_client.get("client").get_users():
Message().send_channel_msg(channel=SearchType.TG,
title="你不在用户白名单中,无法使用此机器人",
user_id=user_id)
return '你不在用户白名单中,无法使用此机器人'
# 处理消息
WebAction().handle_message_job(msg=text,
in_from=SearchType.TG,
user_id=user_id,
user_name=user_name)
return 'Ok'
# Synology Chat消息响应
@App.route('/synology', methods=['POST'])
@require_auth(force=False)
def synology():
"""
token: bot token
user_id
username
post_id
timestamp
text
"""
# 当前在用的交互渠道
interactive_client = Message().get_interactive_client(SearchType.SYNOLOGY)
if not interactive_client:
return 'NAStool未启用Synology Chat交互'
msg_data = request.form
if not SecurityHelper().check_synology_ip(request.remote_addr):
log.error("收到来自 %s 的非法Synology Chat消息:%s" % (request.remote_addr, msg_data))
return '不允许的IP地址请求'
if msg_data:
token = msg_data.get("token")
if not interactive_client.get("client").check_token(token):
log.error("收到来自 %s 的非法Synology Chat消息:token校验不通过!" % request.remote_addr)
return 'token校验不通过'
text = msg_data.get("text")
user_id = int(msg_data.get("user_id"))
# 获取用户名
user_name = msg_data.get("username")
if text:
log.info(f"收到Synology Chat消息:userid={user_id}, username={user_name}, text={text}")
WebAction().handle_message_job(msg=text,
in_from=SearchType.SYNOLOGY,
user_id=user_id,
user_name=user_name)
return 'Ok'
# Slack消息响应
@App.route('/slack', methods=['POST'])
@require_auth(force=False)
def slack():
"""
# 消息
{
'client_msg_id': '',
'type': 'message',
'text': 'hello',
'user': '',
'ts': '1670143568.444289',
'blocks': [{
'type': 'rich_text',
'block_id': 'i2j+',
'elements': [{
'type': 'rich_text_section',
'elements': [{
'type': 'text',
'text': 'hello'
}]
}]
}],
'team': '',
'client': '',
'event_ts': '1670143568.444289',
'channel_type': 'im'
}
# 快捷方式
{
"type": "shortcut",
"token": "XXXXXXXXXXXXX",
"action_ts": "1581106241.371594",
"team": {
"id": "TXXXXXXXX",
"domain": "shortcuts-test"
},
"user": {
"id": "UXXXXXXXXX",
"username": "aman",
"team_id": "TXXXXXXXX"
},
"callback_id": "shortcut_create_task",
"trigger_id": "944799105734.773906753841.38b5894552bdd4a780554ee59d1f3638"
}
# 按钮点击
{
"type": "block_actions",
"team": {
"id": "T9TK3CUKW",
"domain": "example"
},
"user": {
"id": "UA8RXUSPL",
"username": "jtorrance",
"team_id": "T9TK3CUKW"
},
"api_app_id": "AABA1ABCD",
"token": "9s8d9as89d8as9d8as989",
"container": {
"type": "message_attachment",
"message_ts": "1548261231.000200",
"attachment_id": 1,
"channel_id": "CBR2V3XEX",
"is_ephemeral": false,
"is_app_unfurl": false
},
"trigger_id": "12321423423.333649436676.d8c1bb837935619ccad0f624c448ffb3",
"client": {
"id": "CBR2V3XEX",
"name": "review-updates"
},
"message": {
"bot_id": "BAH5CA16Z",
"type": "message",
"text": "This content can't be displayed.",
"user": "UAJ2RU415",
"ts": "1548261231.000200",
...
},
"response_url": "https://hooks.slack.com/actions/AABA1ABCD/1232321423432/D09sSasdasdAS9091209",
"actions": [
{
"action_id": "WaXA",
"block_id": "=qXel",
"text": {
"type": "plain_text",
"text": "View",
"emoji": true
},
"value": "click_me_123",
"type": "button",
"action_ts": "1548426417.840180"
}
]
}
"""
# 只有本地转发请求能访问
if not SecurityHelper().check_slack_ip(request.remote_addr):
log.warn(f"非法IP地址的Slack消息通知:{request.remote_addr}")
return '不允许的IP地址请求'
# 当前在用的交互渠道
interactive_client = Message().get_interactive_client(SearchType.SLACK)
if not interactive_client:
return 'NAStool未启用Slack交互'
msg_json = request.get_json()
if msg_json:
if msg_json.get("type") == "message":
userid = msg_json.get("user")
text = msg_json.get("text")
username = msg_json.get("user")
elif msg_json.get("type") == "block_actions":
userid = msg_json.get("user", {}).get("id")
text = msg_json.get("actions")[0].get("value")
username = msg_json.get("user", {}).get("name")
elif msg_json.get("type") == "event_callback":
userid = msg_json.get('event', {}).get('user')
text = re.sub(r"<@[0-9A-Z]+>", "", msg_json.get("event", {}).get("text"), flags=re.IGNORECASE).strip()
username = ""
elif msg_json.get("type") == "shortcut":
userid = msg_json.get("user", {}).get("id")
text = msg_json.get("callback_id")
username = msg_json.get("user", {}).get("username")
else:
return "Error"
log.info(f"收到Slack消息:userid={userid}, username={username}, text={text}")
WebAction().handle_message_job(msg=text,
in_from=SearchType.SLACK,
user_id=userid,
user_name=username)
return "Ok"
# Jellyseerr Overseerr订阅接口
@App.route('/subscribe', methods=['POST'])
@require_auth
def subscribe():
"""
{
"notification_type": "{{notification_type}}",
"event": "{{event}}",
"subject": "{{subject}}",
"message": "{{message}}",
"image": "{{image}}",
"{{media}}": {
"media_type": "{{media_type}}",
"tmdbId": "{{media_tmdbid}}",
"tvdbId": "{{media_tvdbid}}",
"status": "{{media_status}}",
"status4k": "{{media_status4k}}"
},
"{{request}}": {
"request_id": "{{request_id}}",
"requestedBy_email": "{{requestedBy_email}}",
"requestedBy_username": "{{requestedBy_username}}",
"requestedBy_avatar": "{{requestedBy_avatar}}"
},
"{{issue}}": {
"issue_id": "{{issue_id}}",
"issue_type": "{{issue_type}}",
"issue_status": "{{issue_status}}",
"reportedBy_email": "{{reportedBy_email}}",
"reportedBy_username": "{{reportedBy_username}}",
"reportedBy_avatar": "{{reportedBy_avatar}}"
},
"{{comment}}": {
"comment_message": "{{comment_message}}",
"commentedBy_email": "{{commentedBy_email}}",
"commentedBy_username": "{{commentedBy_username}}",
"commentedBy_avatar": "{{commentedBy_avatar}}"
},
"{{extra}}": []
}
"""
req_json = request.get_json()
if not req_json:
return make_response("非法请求!", 400)
notification_type = req_json.get("notification_type")
if notification_type not in ["MEDIA_APPROVED", "MEDIA_AUTO_APPROVED"]:
return make_response("ok", 200)
subject = req_json.get("subject")
media_type = MediaType.MOVIE if req_json.get("media", {}).get("media_type") == "movie" else MediaType.TV
tmdbId = req_json.get("media", {}).get("tmdbId")
if not media_type or not tmdbId or not subject:
return make_response("请求参数不正确!", 500)
# 添加订阅
code = 0
msg = "ok"
meta_info = MetaInfo(title=subject, mtype=media_type)
user_name = req_json.get("request", {}).get("requestedBy_username")
if media_type == MediaType.MOVIE:
code, msg, _ = Subscribe().add_rss_subscribe(mtype=media_type,
name=meta_info.get_name(),
year=meta_info.year,
channel=RssType.Auto,
mediaid=tmdbId,
in_from=SearchType.API,
user_name=user_name)
else:
seasons = []
for extra in req_json.get("extra", []):
if extra.get("name") == "Requested Seasons":
seasons = [int(str(sea).strip()) for sea in extra.get("value").split(", ") if str(sea).isdigit()]
break
for season in seasons:
code, msg, _ = Subscribe().add_rss_subscribe(mtype=media_type,
name=meta_info.get_name(),
year=meta_info.year,
channel=RssType.Auto,
mediaid=tmdbId,
season=season,
in_from=SearchType.API,
user_name=user_name)
if code == 0:
return make_response("ok", 200)
else:
return make_response(msg, 500)
# 备份配置文件
@App.route('/backup', methods=['POST'])
@login_required
def backup():
"""
备份用户设置文件
:return: 备份文件.zip_file
"""
zip_file = WebAction().backup()
if not zip_file:
return make_response("创建备份失败", 400)
return send_file(zip_file)
# 上传文件到服务器
@App.route('/upload', methods=['POST'])
@login_required
def upload():
try:
files = request.files['file']
temp_path = Config().get_temp_path()
if not os.path.exists(temp_path):
os.makedirs(temp_path, exist_ok=True)
file_path = Path(temp_path) / files.filename
files.save(str(file_path))
return {"code": 0, "filepath": str(file_path)}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {"code": 1, "msg": str(e), "filepath": ""}
@App.route('/ical')
@require_auth(force=False)
def ical():
# 是否设置提醒开关
remind = request.args.get("remind")
cal = Calendar()
RssItems = WebAction().get_ical_events().get("result")
for item in RssItems:
event = Event()
event.add('summary', f'{item.get("type")}:{item.get("title")}')
if not item.get("start"):
continue
event.add('dtstart',
datetime.datetime.strptime(item.get("start"),
'%Y-%m-%d')
+ datetime.timedelta(hours=8))
event.add('dtend',
datetime.datetime.strptime(item.get("start"),
'%Y-%m-%d')
+ datetime.timedelta(hours=9))
# 添加事件提醒
if remind:
alarm = Alarm()
alarm.add('trigger', datetime.timedelta(minutes=30))
alarm.add('action', 'DISPLAY')
event.add_component(alarm)
cal.add_component(event)
# 返回日历文件
response = Response(cal.to_ical(), mimetype='text/calendar')
response.headers['Content-Disposition'] = 'attachment; filename=nastool.ics'
return response
@App.route('/img')
@login_required
def Img():
"""
图片中换服务
"""
url = request.args.get('url')
if not url:
return make_response("参数错误", 400)
# 计算Etag
etag = hashlib.sha256(url.encode('utf-8')).hexdigest()
# 检查协商缓存
if_none_match = request.headers.get('If-None-Match')
if if_none_match and if_none_match == etag:
return make_response('', 304)
# 获取图片数据
try:
img = WebUtils.request_cache(url)
response = Response(
img,
mimetype='image/jpeg'
)
response.headers.set('Cache-Control', 'max-age=604800')
response.headers.set('Etag', etag)
return response
except:
return make_response("图片加载失败", 400)
@App.route('/stream-logging')
@login_required
def stream_logging():
"""
实时日志EventSources响应
"""
def __logging(_source=""):
"""
实时日志
"""
global LoggingSource
while True:
with LoggingLock:
if _source != LoggingSource:
LoggingSource = _source
log.LOG_INDEX = len(log.LOG_QUEUE)
if log.LOG_INDEX > 0:
logs = list(log.LOG_QUEUE)[-log.LOG_INDEX:]
log.LOG_INDEX = 0
if _source:
logs = [lg for lg in logs if lg.get("source") == _source]
else:
logs = []
time.sleep(1)
yield 'data: %s\n\n' % json.dumps(logs)
return Response(
__logging(request.args.get("source") or ""),
mimetype='text/event-stream'
)
@App.route('/stream-progress')
@login_required
def stream_progress():
"""
实时日志EventSources响应
"""
def __progress(_type):
"""
实时日志
"""
WA = WebAction()
while True:
time.sleep(0.2)
detail = WA.refresh_process({"type": _type})
yield 'data: %s\n\n' % json.dumps(detail)
return Response(
__progress(request.args.get("type")),
mimetype='text/event-stream'
)
@Sock.route('/message')
@login_required
def message_handler(ws):
"""
消息中心WebSocket
"""
while True:
try:
data = ws.receive(timeout=10)
except ConnectionClosed:
print("WebSocket连接已关闭!")
break
if not data:
continue
try:
msgbody = json.loads(data)
except Exception as err:
print(str(err))
continue
if msgbody.get("text"):
# 发送的消息
WebAction().handle_message_job(msg=msgbody.get("text"),
in_from=SearchType.WEB,
user_id=current_user.username,
user_name=current_user.username)
ws.send((json.dumps({})))
else:
# 拉取消息
system_msg = WebAction().get_system_message(lst_time=msgbody.get("lst_time"))
messages = system_msg.get("message")
lst_time = system_msg.get("lst_time")
ret_messages = []
for message in list(reversed(messages)):
content = re.sub(r"#+", "<br>",
re.sub(r"<[^>]+>", "",
re.sub(r"<br/?>", "####", message.get("content"), flags=re.IGNORECASE)))
ret_messages.append({
"level": "bg-red" if message.get("level") == "ERROR" else "",
"title": message.get("title"),
"content": content,
"time": message.get("time")
})
ws.send((json.dumps({
"lst_time": lst_time,
"message": ret_messages
})))
# base64模板过滤器
@App.template_filter('b64encode')
def b64encode(s):
return base64.b64encode(s.encode()).decode()
# split模板过滤器
@App.template_filter('split')
def split(string, char, pos):
return string.split(char)[pos]
# 刷流规则过滤器
@App.template_filter('brush_rule_string')
def brush_rule_string(rules):
return WebAction.parse_brush_rule_string(rules)
# 大小格式化过滤器
@App.template_filter('str_filesize')
def str_filesize(size):
return StringUtils.str_filesize(size, pre=1)
# MD5 HASH过滤器
@App.template_filter('hash')
def md5_hash(text):
return StringUtils.md5_hash(text)
| 70,971 | Python | .py | 1,713 | 28.457093 | 252 | 0.557514 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,288,928 | user.cpython-310-darwin.so | demigody_nas-tools/web/backend/user.cpython-310-darwin.so | Êşº¾ @ ш @ · Ïúíş … ( __TEXT € € __text __TEXT À1 X™ À1 € __stubs __TEXT Ë 4 Ë € __stub_helper __TEXT LÍ ¾ LÍ € __const __TEXT Ñ Ÿ Ñ __cstring __TEXT %p n %p __unwind_info __TEXT ”~ ` ”~ x __DATA € À € À __nl_symbol_ptr __DATA € € ^ __got __DATA € ğ € _ __la_symbol_ptr __DATA ø€ ğ ø€ } __const __DATA ğƒ �j ğƒ __data __DATA €î ˜ €î __common __DATA __bss __DATA ( H __LINKEDIT @ À @ ˆ‘ " €0 @ F P pH x èQ H 0S * @I
Hˆ P ¬ ¬ ® | ĞE
Û f�&ÄÒø=¨ºhÓÚ·p$
* 8 d' /usr/lib/libSystem.B.dylib & |