id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,000 | tokens.py | demigody_nas-tools/app/utils/tokens.py | import re
from config import SPLIT_CHARS
class Tokens:
_text = ""
_index = 0
_tokens = []
def __init__(self, text):
self._text = text
self._tokens = []
self.load_text(text)
def load_text(self, text):
splited_text = re.split(r'%s' % SPLIT_CHARS, text)
for sub_text in splited_text:
if sub_text:
self._tokens.append(sub_text)
def cur(self):
if self._index >= len(self._tokens):
return None
else:
token = self._tokens[self._index]
return token
def get_next(self):
token = self.cur()
if token:
self._index = self._index + 1
return token
def peek(self):
index = self._index + 1
if index >= len(self._tokens):
return None
else:
return self._tokens[index]
| 893 | Python | .py | 32 | 19.28125 | 58 | 0.526377 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,001 | string_utils.py | demigody_nas-tools/app/utils/string_utils.py | import bisect
import datetime
import hashlib
import random
import re
import os
from urllib import parse
import cn2an
import dateparser
import dateutil.parser
import zhconv
from app.utils.exception_utils import ExceptionUtils
from app.utils.types import MediaType
from config import Config
class StringUtils:
@staticmethod
def num_filesize(text):
"""
将文件大小文本转化为字节
"""
if not text:
return 0
if not isinstance(text, str):
text = str(text)
if text.isdigit():
return int(text)
text = text.replace(",", "").replace(" ", "").upper()
size = re.sub(r"[KMGTPI]*B?", "", text, flags=re.IGNORECASE)
try:
size = float(size)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return 0
if text.find("PB") != -1 or text.find("PIB") != -1:
size *= 1024 ** 5
elif text.find("TB") != -1 or text.find("TIB") != -1:
size *= 1024 ** 4
elif text.find("GB") != -1 or text.find("GIB") != -1:
size *= 1024 ** 3
elif text.find("MB") != -1 or text.find("MIB") != -1:
size *= 1024 ** 2
elif text.find("KB") != -1 or text.find("KIB") != -1:
size *= 1024
return round(size)
@staticmethod
def str_timelong(time_sec):
"""
将数字转换为时间描述
"""
if not isinstance(time_sec, int) or not isinstance(time_sec, float):
try:
time_sec = float(time_sec)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return ""
d = [(0, '秒'), (60 - 1, '分'), (3600 - 1, '小时'), (86400 - 1, '天')]
s = [x[0] for x in d]
index = bisect.bisect_left(s, time_sec) - 1
if index == -1:
return str(time_sec)
else:
b, u = d[index]
return str(round(time_sec / (b + 1))) + u
@staticmethod
def is_chinese(word):
"""
判断是否含有中文
"""
if isinstance(word, list):
word = " ".join(word)
chn = re.compile(r'[\u4e00-\u9fff]')
if chn.search(word):
return True
else:
return False
@staticmethod
def is_japanese(word):
jap = re.compile(r'[\u3040-\u309F\u30A0-\u30FF]')
if jap.search(word):
return True
else:
return False
@staticmethod
def is_korean(word):
kor = re.compile(r'[\uAC00-\uD7FF]')
if kor.search(word):
return True
else:
return False
@staticmethod
def is_all_chinese(word):
"""
判断是否全是中文
"""
for ch in word:
if ch == ' ':
continue
if '\u4e00' <= ch <= '\u9fff':
continue
else:
return False
return True
@staticmethod
def is_eng_media_name_format(word):
pattern = r'^[a-zA-Z]+[a-zA-Z0-9\s._:@!@]*$'
return bool(re.match(pattern, word))
@staticmethod
def is_int_or_float(word):
"""
判断是否是整型或浮点型的格式
"""
if not word:
return None
pattern = r'^[-+]?\d+(\.\d+)?$'
return re.match(pattern, word) is not None
@staticmethod
def is_string_and_not_empty(word):
"""
判断是否是字符串并且字符串是否为空
"""
if isinstance(word, str) and word.strip():
return True
else:
return False
@staticmethod
def xstr(s):
"""
字符串None输出为空
"""
return s if s else ''
@staticmethod
def str_sql(in_str):
"""
转化SQL字符
"""
return "" if not in_str else str(in_str)
@staticmethod
def str_int(text):
"""
web字符串转int
:param text:
:return:
"""
int_val = 0
if not text:
return int_val
try:
int_val = int(text.strip().replace(',', ''))
except Exception as e:
ExceptionUtils.exception_traceback(e)
return int_val
@staticmethod
def str_float(text):
"""
web字符串转float
:param text:
:return:
"""
float_val = 0.0
if not text:
return 0.0
try:
text = text.strip().replace(',', '')
if text:
float_val = float(text)
else:
float_val = 0.0
except Exception as e:
ExceptionUtils.exception_traceback(e)
return float_val
@staticmethod
def handler_special_chars(text, replace_word="", allow_space=False):
"""
忽略特殊字符
"""
# 需要忽略的特殊字符
CONVERT_EMPTY_CHARS = r"[、.。,,·::;;!!'’\"“”()()\[\]【】「」\-——\+\|\\_/&#~~]"
if not text:
return text
if not isinstance(text, list):
text = re.sub(r"[\u200B-\u200D\uFEFF]",
"",
re.sub(r"%s" % CONVERT_EMPTY_CHARS, replace_word, text),
flags=re.IGNORECASE)
if not allow_space:
return re.sub(r"\s+", "", text)
else:
return re.sub(r"\s+", " ", text).strip()
else:
return [StringUtils.handler_special_chars(x) for x in text]
@staticmethod
def str_filesize(size, pre=2):
"""
将字节计算为文件大小描述(带单位的格式化后返回)
"""
if size is None:
return ""
size = re.sub(r"\s|B|iB", "", str(size), re.I)
if size.replace(".", "").isdigit():
try:
size = float(size)
d = [(1024 - 1, 'K'), (1024 ** 2 - 1, 'M'), (1024 ** 3 - 1, 'G'), (1024 ** 4 - 1, 'T')]
s = [x[0] for x in d]
index = bisect.bisect_left(s, size) - 1
if index == -1:
return str(size) + "B"
else:
b, u = d[index]
return str(round(size / (b + 1), pre)) + u
except Exception as e:
ExceptionUtils.exception_traceback(e)
return ""
if re.findall(r"[KMGTP]", size, re.I):
return size
else:
return size + "B"
@staticmethod
def url_equal(url1, url2):
"""
比较两个地址是否为同一个网站
"""
if not url1 or not url2:
return False
if url1.startswith("http"):
url1 = parse.urlparse(url1).netloc
if url2.startswith("http"):
url2 = parse.urlparse(url2).netloc
if url1.replace("www.", "") == url2.replace("www.", ""):
return True
return False
@staticmethod
def get_url_netloc(url):
"""
获取URL的协议和域名部分
"""
if not url:
return "", ""
if not url.startswith("http"):
return "http", url
addr = parse.urlparse(url)
return addr.scheme, addr.netloc
@staticmethod
def get_url_domain(url):
"""
获取URL的域名部分,不含WWW和HTTP
"""
if not url:
return ""
_, netloc = StringUtils.get_url_netloc(url)
if netloc:
return netloc.lower().replace("www.", "")
return ""
@staticmethod
def get_url_sld(url):
"""
获取URL的二级域名部分,不含端口,若为IP则返回IP
"""
if not url:
return ""
_, netloc = StringUtils.get_url_netloc(url)
if not netloc:
return ""
netloc = netloc.split(":")[0].split(".")
if len(netloc) >= 2:
return netloc[-2]
return netloc[0]
@staticmethod
def get_base_url(url):
"""
获取URL根地址
"""
if not url:
return ""
scheme, netloc = StringUtils.get_url_netloc(url)
return f"{scheme}://{netloc}"
@staticmethod
def clear_file_name(name):
"""
去除文件中的特殊字符
"""
if not name:
return None
replacement_dict = {
r"[*?\\/\"<>~|,,?]": "",
r"[\s]+": " ",
}
cleaned_name = name
for pattern, replacement in replacement_dict.items():
cleaned_name = re.sub(pattern, replacement, cleaned_name, flags=re.IGNORECASE).strip()
media = Config().get_config('media')
filename_prefer_barre = media.get("filename_prefer_barre", False) or False
if filename_prefer_barre:
cleaned_name = cleaned_name.replace(":", " - ").replace(":", " - ")
else:
cleaned_name = cleaned_name.replace(":", ":")
return cleaned_name
@staticmethod
def get_keyword_from_string(content):
"""
从搜索关键字中拆分中年份、季、集、类型
"""
if not content:
return None, None, None, None, None
# 去掉查询中的电影或电视剧关键字
if re.search(r'^电视剧|\s+电视剧|^动漫|\s+动漫', content):
mtype = MediaType.TV
else:
mtype = None
content = re.sub(r'^电影|^电视剧|^动漫|\s+电影|\s+电视剧|\s+动漫', '', content).strip()
# 稍微切一下剧集吧
season_num = None
episode_num = None
year = None
season_re = re.search(r"第\s*([0-9一二三四五六七八九十]+)\s*季", content, re.IGNORECASE)
if season_re:
mtype = MediaType.TV
season_num = int(cn2an.cn2an(season_re.group(1), mode='smart'))
episode_re = re.search(r"第\s*([0-9一二三四五六七八九十百零]+)\s*集", content, re.IGNORECASE)
if episode_re:
mtype = MediaType.TV
episode_num = int(cn2an.cn2an(episode_re.group(1), mode='smart'))
if episode_num and not season_num:
season_num = 1
year_re = re.search(r"[\s(]+(\d{4})[\s)]*", content)
if year_re:
year = year_re.group(1)
key_word = re.sub(
r'第\s*[0-9一二三四五六七八九十]+\s*季|第\s*[0-9一二三四五六七八九十百零]+\s*集|[\s(]+(\d{4})[\s)]*', '',
content,
flags=re.IGNORECASE).strip()
if key_word:
key_word = re.sub(r'\s+', ' ', key_word)
if not key_word:
key_word = year
return mtype, key_word, season_num, episode_num, year, content
@staticmethod
def generate_random_str(randomlength=16):
"""
生成一个指定长度的随机字符串
"""
random_str = ''
base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(base_str) - 1
for i in range(randomlength):
random_str += base_str[random.randint(0, length)]
return random_str
@staticmethod
def get_time_stamp(date):
tempsTime = None
try:
tempsTime = dateutil.parser.parse(date)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return tempsTime
@staticmethod
def unify_datetime_str(datetime_str):
"""
日期时间格式化 统一转成 2020-10-14 07:48:04 这种格式
# 场景1: 带有时区的日期字符串 eg: Sat, 15 Oct 2022 14:02:54 +0800
# 场景2: 中间带T的日期字符串 eg: 2020-10-14T07:48:04
# 场景3: 中间带T的日期字符串 eg: 2020-10-14T07:48:04.208
# 场景4: 日期字符串以GMT结尾 eg: Fri, 14 Oct 2022 07:48:04 GMT
# 场景5: 日期字符串以UTC结尾 eg: Fri, 14 Oct 2022 07:48:04 UTC
# 场景6: 日期字符串以Z结尾 eg: Fri, 14 Oct 2022 07:48:04Z
# 场景7: 日期字符串为相对时间 eg: 1 month, 2 days ago
:param datetime_str:
:return:
"""
# 传入的参数如果是None 或者空字符串 直接返回
if not datetime_str:
return datetime_str
try:
return dateparser.parse(datetime_str).strftime('%Y-%m-%d %H:%M:%S')
except Exception as e:
ExceptionUtils.exception_traceback(e)
return datetime_str
@staticmethod
def timestamp_to_date(timestamp, date_format='%Y-%m-%d %H:%M:%S'):
"""
时间戳转日期
:param timestamp:
:param date_format:
:return:
"""
if isinstance(timestamp, str) and not timestamp.isdigit():
return timestamp
try:
return datetime.datetime.fromtimestamp(int(timestamp)).strftime(date_format)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return timestamp
@staticmethod
def to_bool(text, default_val: bool = False) -> bool:
"""
字符串转bool
:param text: 要转换的值
:param default_val: 默认值
:return:
"""
if isinstance(text, str) and not text:
return default_val
if isinstance(text, bool):
return text
if isinstance(text, int) or isinstance(text, float):
return True if text > 0 else False
if isinstance(text, str) and text.lower() in ['y', 'true', '1']:
return True
return False
@staticmethod
def str_from_cookiejar(cj):
"""
将cookiejar转换为字符串
:param cj:
:return:
"""
return '; '.join(['='.join(item) for item in cj.items()])
@staticmethod
def get_idlist_from_string(content, dicts):
"""
从字符串中提取id列表
:param content: 字符串
:param dicts: 字典
:return:
"""
if not content:
return []
id_list = []
content_list = content.split()
for dic in dicts:
if dic.get('name') in content_list and dic.get('id') not in id_list:
id_list.append(dic.get('id'))
content = content.replace(dic.get('name'), '')
return id_list, re.sub(r'\s+', ' ', content).strip()
@staticmethod
def str_title(s):
"""
讲英文的首字母大写
:param s: en_name string
:return: string title
"""
return s.title() if s else s
@staticmethod
def md5_hash(data):
"""
MD5 HASH
"""
if not data:
return ""
return hashlib.md5(str(data).encode()).hexdigest()
@staticmethod
def md5_hash_file(file_path):
"""
MD5 HASH 指定文件
"""
if not os.path.exists(file_path):
return ""
md5_hash = hashlib.md5()
with open(file_path, "rb") as file:
while chunk := file.read(8192):
md5_hash.update(chunk)
return md5_hash.hexdigest()
@staticmethod
def verify_integrity(file_path, original_md5):
"""
校验文件是否匹配指定的md5
"""
md5 = StringUtils.md5_hash_file(file_path)
if not StringUtils.is_string_and_not_empty(md5) or \
not StringUtils.is_string_and_not_empty(original_md5):
return True
return md5 == original_md5
@staticmethod
def str_timehours(minutes):
"""
将分钟转换成小时和分钟
:param minutes:
:return:
"""
if not minutes:
return ""
hours = minutes // 60
minutes = minutes % 60
if hours:
return "%s小时%s分" % (hours, minutes)
else:
return "%s分钟" % minutes
@staticmethod
def str_amount(amount, curr="$"):
"""
格式化显示金额
"""
if not amount:
return "0"
return curr + format(amount, ",")
@staticmethod
def count_words(s):
"""
计算字符串中包含的单词数量,只适用于简单的单行文本
:param s: 要计算的字符串
:return: 字符串中包含的单词数量
"""
# 匹配英文单词
if re.match(r'^[A-Za-z0-9\s]+$', s):
# 如果是英文字符串,则按空格分隔单词,并计算单词数量
num_words = len(s.split())
else:
# 如果不是英文字符串,则计算字符数量
num_words = len(s)
return num_words
@staticmethod
def split_text(text, max_length):
"""
把文本拆分为固定字节长度的数组,优先按换行拆分,避免单词内拆分
"""
if not text:
yield ''
# 分行
lines = re.split('\n', text)
buf = ''
for line in lines:
if len(line.encode('utf-8')) > max_length:
# 超长行继续拆分
blank = ""
if re.match(r'^[A-Za-z0-9.\s]+', line):
# 英文行按空格拆分
parts = line.split()
blank = " "
else:
# 中文行按字符拆分
parts = line
part = ''
for p in parts:
if len((part + p).encode('utf-8')) > max_length:
# 超长则Yield
yield (buf + part).strip()
buf = ''
part = f"{blank}{p}"
else:
part = f"{part}{blank}{p}"
if part:
# 将最后的部分追加到buf
buf += part
else:
if len((buf + "\n" + line).encode('utf-8')) > max_length:
# buf超长则Yield
yield buf.strip()
buf = line
else:
# 短行直接追加到buf
if buf:
buf = f"{buf}\n{line}"
else:
buf = line
if buf:
# 处理文本末尾剩余部分
yield buf.strip()
@staticmethod
def is_one_month_ago(date_str):
"""
判断日期是否早于一个月前
"""
if not date_str:
return False
# 将日期字符串解析为日期对象
date_obj = datetime.datetime.strptime(date_str, '%Y-%m-%d')
# 计算当前日期和一个月前的日期
today = datetime.datetime.today()
one_month_ago = today - datetime.timedelta(days=30)
# 比较日期对象,判断是否早于一个月前
if date_obj < one_month_ago:
return True
else:
return False
@staticmethod
def is_chinese_word(string: str, mode: int = 1):
"""
判断是否包含中文
:param string 需要判断的字符
:param mode 模式 1匹配简体和繁体 2只匹配简体 3只匹配繁体
:return True or False
"""
for ch in string:
if mode == 1:
if "\u4e00" <= ch <= "\u9FFF":
return True
elif mode == 2:
if "\u4e00" <= ch <= "\u9FFF":
if zhconv.convert(ch, "zh-cn") == ch:
return True
elif mode == 3:
if "\u4e00" <= ch <= "\u9FFF":
if zhconv.convert(ch, "zh-cn") != ch:
return True
if re.search(pattern="^[0-9]+$", string=string):
return True
return False
@staticmethod
def get_url_host(url: str) -> str:
"""
获取URL的一级域名
"""
if not url:
return ""
_, netloc = StringUtils.get_url_netloc(url)
if not netloc:
return ""
return netloc.split(".")[-2]
@staticmethod
def format_list(array: list) -> str:
"""
数组转字符串
"""
if not array:
return ""
return '[{}]'.format(','.join('[{}]'.format(','.join(map(str, sublist))) for sublist in array))
| 20,621 | Python | .py | 615 | 20.484553 | 103 | 0.493656 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,002 | system_utils.py | demigody_nas-tools/app/utils/system_utils.py | import datetime
import os
import platform
import shutil
import subprocess
import psutil
from app.utils.exception_utils import ExceptionUtils
from app.utils.path_utils import PathUtils
from app.utils.types import OsType
from config import Config, WEBDRIVER_PATH
from math import ceil
class SystemUtils:
@staticmethod
def __get_hidden_shell():
if os.name == "nt":
st = subprocess.STARTUPINFO()
st.dwFlags = subprocess.STARTF_USESHOWWINDOW
st.wShowWindow = subprocess.SW_HIDE
return st
else:
return None
@staticmethod
def get_system():
"""
获取操作系统类型
"""
if SystemUtils.is_windows():
return OsType.WINDOWS
elif SystemUtils.is_synology():
return OsType.SYNOLOGY
elif SystemUtils.is_docker():
return OsType.DOCKER
elif SystemUtils.is_macos():
return OsType.MACOS
else:
return OsType.LINUX
@staticmethod
def get_local_time(utc_time_str):
"""
通过UTC的时间字符串获取时间
"""
try:
utc_date = datetime.datetime.strptime(utc_time_str.replace('0000', ''), '%Y-%m-%dT%H:%M:%S.%fZ')
local_date = utc_date + datetime.timedelta(hours=8)
local_date_str = datetime.datetime.strftime(local_date, '%Y-%m-%d %H:%M:%S')
except Exception as e:
ExceptionUtils.exception_traceback(e)
return utc_time_str
return local_date_str
@staticmethod
def check_process(pname):
"""
检查进程序是否存在
"""
if not pname:
return False
for process in psutil.process_iter():
if process.name() == pname:
return True
return False
@staticmethod
def execute(cmd):
"""
执行命令,获得返回结果
"""
try:
with os.popen(cmd) as p:
return p.readline().strip()
except Exception as err:
print(str(err))
return ""
@staticmethod
def is_docker():
return os.path.exists('/.dockerenv')
@staticmethod
def is_synology():
if SystemUtils.is_windows():
return False
return True if "synology" in SystemUtils.execute('uname -a') else False
@staticmethod
def is_windows():
return True if os.name == "nt" else False
@staticmethod
def is_macos():
return True if platform.system() == 'Darwin' else False
@staticmethod
def is_lite_version():
return True if SystemUtils.is_docker() \
and os.environ.get("NASTOOL_VERSION") == "lite" else False
@staticmethod
def get_webdriver_path():
if SystemUtils.is_lite_version():
return None
else:
return WEBDRIVER_PATH.get(SystemUtils.get_system().value)
@staticmethod
def chmod755(filePath):
if not os.path.exists(filePath):
return
if not SystemUtils.is_docker() \
and not SystemUtils.is_macos() \
and not SystemUtils.is_synology():
return
os.chmod(filePath, 0o755)
@staticmethod
def get_download_webdriver_path():
download_webdriver_path = os.path.join(Config().get_config_path(), "webdriver")
try:
os.makedirs(download_webdriver_path, exist_ok=True)
except OSError as e:
pass
return download_webdriver_path
@staticmethod
def copy(src, dest):
"""
复制
"""
try:
shutil.copy2(os.path.normpath(src), os.path.normpath(dest))
return 0, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def move(src, dest):
"""
移动
"""
try:
tmp_file = os.path.normpath(os.path.join(os.path.dirname(src),
os.path.basename(dest)))
shutil.move(os.path.normpath(src), tmp_file)
shutil.move(tmp_file, os.path.normpath(dest))
return 0, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def link(src, dest):
"""
硬链接
"""
try:
if platform.release().find("-z4-") >= 0:
# 兼容极空间Z4
tmp = os.path.normpath(os.path.join(PathUtils.get_parent_paths(dest, 2),
os.path.basename(dest)))
os.link(os.path.normpath(src), tmp)
shutil.move(tmp, os.path.normpath(dest))
else:
os.link(os.path.normpath(src), os.path.normpath(dest))
return 0, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def softlink(src, dest):
"""
软链接
"""
try:
os.symlink(os.path.normpath(src), os.path.normpath(dest))
return 0, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def rclone_move(src, dest):
"""
Rclone移动
"""
try:
src = os.path.normpath(src)
dest = dest.replace("\\", "/")
retcode = subprocess.run(['rclone', 'moveto',
src,
f'NASTOOL:{dest}'],
startupinfo=SystemUtils.__get_hidden_shell()).returncode
return retcode, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def rclone_copy(src, dest):
"""
Rclone复制
"""
try:
src = os.path.normpath(src)
dest = dest.replace("\\", "/")
retcode = subprocess.run(['rclone', 'copyto',
src,
f'NASTOOL:{dest}'],
startupinfo=SystemUtils.__get_hidden_shell()).returncode
return retcode, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def minio_move(src, dest):
"""
Minio移动
"""
try:
src = os.path.normpath(src)
dest = dest.replace("\\", "/")
if dest.startswith("/"):
dest = dest[1:]
retcode = subprocess.run(['mc', 'mv',
'--recursive',
src,
f'NASTOOL/{dest}'],
startupinfo=SystemUtils.__get_hidden_shell()).returncode
return retcode, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def minio_copy(src, dest):
"""
Minio复制
"""
try:
src = os.path.normpath(src)
dest = dest.replace("\\", "/")
if dest.startswith("/"):
dest = dest[1:]
retcode = subprocess.run(['mc', 'cp',
'--recursive',
src,
f'NASTOOL/{dest}'],
startupinfo=SystemUtils.__get_hidden_shell()).returncode
return retcode, ""
except Exception as err:
ExceptionUtils.exception_traceback(err)
return -1, str(err)
@staticmethod
def get_windows_drives():
"""
获取Windows所有盘符
"""
vols = []
for i in range(65, 91):
vol = chr(i) + ':'
if os.path.isdir(vol):
vols.append(vol)
return vols
def find_hardlinks(self, file, fdir=None):
"""
查找文件的所有硬链接
"""
ret_files = []
if os.name == "nt":
ret = subprocess.run(
['fsutil', 'hardlink', 'list', file],
startupinfo=self.__get_hidden_shell(),
stdout=subprocess.PIPE
)
if ret.returncode != 0:
return []
if ret.stdout:
drive = os.path.splitdrive(file)[0]
link_files = ret.stdout.decode('GBK').replace('\\', '/').split('\r\n')
for link_file in link_files:
if link_file \
and "$RECYCLE.BIN" not in link_file \
and os.path.normpath(file) != os.path.normpath(f'{drive}{link_file}'):
link_file = f'{drive.upper()}{link_file}'
file_name = os.path.basename(link_file)
file_path = os.path.dirname(link_file)
ret_files.append({
"file": link_file,
"filename": file_name,
"filepath": file_path
})
else:
inode = os.stat(file).st_ino
if not fdir:
fdir = os.path.dirname(file)
stdout = subprocess.run(
['find', fdir, '-inum', str(inode)],
stdout=subprocess.PIPE
).stdout
if stdout:
link_files = stdout.decode('utf-8').split('\n')
for link_file in link_files:
if link_file \
and os.path.normpath(file) != os.path.normpath(link_file):
file_name = os.path.basename(link_file)
file_path = os.path.dirname(link_file)
ret_files.append({
"file": link_file,
"filename": file_name,
"filepath": file_path
})
return ret_files
@staticmethod
def get_free_space(path):
"""
获取指定路径的剩余空间(单位:GB)
"""
if not os.path.exists(path):
return 0.0
return psutil.disk_usage(path).free / 1024 / 1024 / 1024
@staticmethod
def get_total_space(path):
"""
获取指定路径的总空间(单位:GB)
"""
if not os.path.exists(path):
return 0.0
return psutil.disk_usage(path).total / 1024 / 1024 / 1024
@staticmethod
def calculate_space_usage(dir_list):
"""
计算多个目录的总可用空间/剩余空间(单位:GB),并去除重复磁盘
"""
if not dir_list:
return 0.0
if not isinstance(dir_list, list):
dir_list = [dir_list]
# 存储不重复的磁盘
disk_set = set()
# 存储总剩余空间
total_free_space = 0.0
# 存储总空间
total_space = 0.0
for dir_path in dir_list:
if not dir_path:
continue
if not os.path.exists(dir_path):
continue
# 获取目录所在磁盘
if os.name == "nt":
disk = os.path.splitdrive(dir_path)[0]
else:
disk = os.stat(dir_path).st_dev
# 如果磁盘未出现过,则计算其剩余空间并加入总剩余空间中
if disk not in disk_set:
disk_set.add(disk)
total_space += SystemUtils.get_total_space(dir_path)
total_free_space += SystemUtils.get_free_space(dir_path)
return total_space, total_free_space
@staticmethod
def get_all_processes():
def seconds_to_str(seconds):
hours, remainder = divmod(seconds, 3600)
minutes = remainder // 60
ret_str = f'{hours}小时{minutes}分钟' if hours > 0 else f'{minutes}分钟'
return ret_str
processes = []
for proc in psutil.process_iter(['pid', 'name', 'create_time', 'memory_info', 'status']):
try:
if proc.status() != psutil.STATUS_ZOMBIE:
runtime = datetime.datetime.now() - datetime.datetime.fromtimestamp(
int(getattr(proc, 'create_time', 0)()))
runtime_str = seconds_to_str(runtime.seconds)
mem_info = getattr(proc, 'memory_info', None)()
if mem_info is not None:
mem_mb = round(mem_info.rss / (1024 * 1024), 1)
processes.append({
"id": proc.pid, "name": proc.name(), "time": runtime_str, "memory": mem_mb
})
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return processes
# 缩略路径
@staticmethod
def shorten_path(path, ignore = 'center', max_levels = 2, max_name = 40):
"""缩略路径
Args:
path (str): 原始路径
ignore (str, optional): 忽略的位置,忽略左侧为'left',忽略右侧为'right'. 默认忽略中间为'center'.
max_levels (int, optional): 最大的路径层级,不包含根目录和文件的目录数量. 默认为2.
max_name (int, optional): 最大的文件名长度,超长则忽略中间名称. 默认为40.
Returns:
str: 缩略后的路径
"""
parts = path.split(os.path.sep) # 使用操作系统的路径分隔符来拆分路径
root = parts[0]
parts = parts[1:]
file = ""
if os.path.isfile(path):
file = parts[-1] if len(parts[-1])<= max_name else parts[-1][:(max_name-3)//2]+'...'+parts[-1][-(max_name-3)//2:]
parts = parts[:-1] # 如果路径是文件,去掉最后一个部分(文件名)
if len(parts) <= max_levels:
return path # 如果路径层次小于等于max_levels,保留原始路径
else:
shortened_parts = []
if ignore == 'left':
shortened_parts.append('...')
for i in range(-max_levels, 0):
shortened_parts.append(parts[i])
elif ignore == 'right':
shortened_parts.append(root)
for i in range(max_levels):
shortened_parts.append(parts[i])
shortened_parts.append('...')
else:
shortened_parts.append(root)
for i in range(max_levels // 2):
shortened_parts.append(parts[i])
shortened_parts.append('...')
for i in range(-ceil(max_levels/2), 0):
shortened_parts.append(parts[i])
if file:
shortened_parts.append(file) # 文件则添加名称
return os.path.sep.join(shortened_parts) | 15,510 | Python | .py | 408 | 23.215686 | 125 | 0.4953 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,003 | image_utils.py | demigody_nas-tools/app/utils/image_utils.py | from PIL import Image
from collections import Counter
class ImageUtils:
@staticmethod
def calculate_theme_color(image_path):
# 打开图片并转换为RGB模式
img = Image.open(image_path).convert('RGB')
# 缩小图片尺寸以加快计算速度
img = img.resize((100, 100), resample=Image.BILINEAR)
# 获取所有像素颜色值
pixels = img.getdata()
# 统计每种颜色在像素中出现的频率
pixel_count = Counter(pixels)
# 找到出现频率最高的颜色,作为主题色
dominant_color = pixel_count.most_common(1)[0][0]
# 将主题色转换为16进制表示
theme_color = '#{:02x}{:02x}{:02x}'.format(*dominant_color)
# 返回主题色
return theme_color
| 794 | Python | .py | 19 | 25.894737 | 67 | 0.632353 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,004 | ip_utils.py | demigody_nas-tools/app/utils/ip_utils.py | import ipaddress
import socket
from urllib.parse import urlparse
class IpUtils:
@staticmethod
def is_ipv4(ip):
"""
判断是不是ipv4
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except AttributeError: # no inet_pton here,sorry
try:
socket.inet_aton(ip)
except socket.error:
return False
return ip.count('.') == 3
except socket.error: # not a valid ip
return False
return True
@staticmethod
def is_ipv6(ip):
"""
判断是不是ipv6
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid ip
return False
return True
@staticmethod
def is_internal(hostname):
"""
判断一个host是内网还是外网
"""
hostname = urlparse(hostname).hostname
if IpUtils.is_ip(hostname):
return IpUtils.is_private_ip(hostname)
else:
return IpUtils.is_internal_domain(hostname)
@staticmethod
def is_ip(addr):
"""
判断是不是ip
"""
try:
socket.inet_aton(addr)
return True
except socket.error:
return False
@staticmethod
def is_internal_domain(domain):
"""
判断域名是否为内部域名
"""
# 获取域名对应的 IP 地址
try:
ip = socket.gethostbyname(domain)
except socket.error:
return False
# 判断 IP 地址是否属于内网 IP 地址范围
return IpUtils.is_private_ip(ip)
@staticmethod
def is_private_ip(ip_str):
"""
判断是不是内网ip
"""
try:
return ipaddress.ip_address(ip_str.strip()).is_private
except Exception as e:
print(e)
return False
| 1,969 | Python | .py | 72 | 16.305556 | 66 | 0.534778 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,005 | scheduler_utils.py | demigody_nas-tools/app/utils/scheduler_utils.py | import datetime
import random
from apscheduler.triggers.cron import CronTrigger
from apscheduler.util import undefined
import math
import log
class SchedulerUtils:
@staticmethod
def start_job(scheduler, func, func_desc, cron, next_run_time=undefined):
"""
解析任务的定时规则,启动定时服务
:param func: 可调用的一个函数,在指定时间运行
:param func_desc: 函数的描述,在日志中提现
:param cron 时间表达式 三种配置方法:
:param next_run_time: 下次运行时间
1、配置cron表达式,只支持5位的cron表达式
2、配置时间范围,如08:00-09:00,表示在该时间范围内随机执行一次;
3、配置固定时间,如08:00;
4、配置间隔,单位小时,比如23.5;
"""
if cron:
cron = cron.strip()
if cron.count(" ") == 4:
try:
scheduler.add_job(func=func,
trigger=CronTrigger.from_crontab(cron),
next_run_time=next_run_time)
except Exception as e:
log.info("%s时间cron表达式配置格式错误:%s %s" % (func_desc, cron, str(e)))
elif '-' in cron:
try:
time_range = cron.split("-")
start_time_range_str = time_range[0]
end_time_range_str = time_range[1]
start_time_range_array = start_time_range_str.split(":")
end_time_range_array = end_time_range_str.split(":")
start_hour = int(start_time_range_array[0])
start_minute = int(start_time_range_array[1])
end_hour = int(end_time_range_array[0])
end_minute = int(end_time_range_array[1])
def start_random_job():
task_time_count = random.randint(start_hour * 60 + start_minute, end_hour * 60 + end_minute)
SchedulerUtils.start_range_job(scheduler=scheduler,
func=func,
func_desc=func_desc,
hour=math.floor(task_time_count / 60),
minute=task_time_count % 60,
next_run_time=next_run_time)
scheduler.add_job(start_random_job,
"cron",
hour=start_hour,
minute=start_minute,
next_run_time=next_run_time)
log.info("%s服务时间范围随机模式启动,起始时间于%s:%s" % (
func_desc, str(start_hour).rjust(2, '0'), str(start_minute).rjust(2, '0')))
except Exception as e:
log.info("%s时间 时间范围随机模式 配置格式错误:%s %s" % (func_desc, cron, str(e)))
elif cron.find(':') != -1:
try:
hour = int(cron.split(":")[0])
minute = int(cron.split(":")[1])
except Exception as e:
log.info("%s时间 配置格式错误:%s" % (func_desc, str(e)))
hour = minute = 0
scheduler.add_job(func,
"cron",
hour=hour,
minute=minute,
next_run_time=next_run_time)
log.info("%s服务启动" % func_desc)
else:
try:
hours = float(cron)
except Exception as e:
log.info("%s时间 配置格式错误:%s" % (func_desc, str(e)))
hours = 0
if hours:
scheduler.add_job(func,
"interval",
hours=hours,
next_run_time=next_run_time)
log.info("%s服务启动" % func_desc)
@staticmethod
def start_range_job(scheduler, func, func_desc, hour, minute, next_run_time=None):
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
# 随机数从1秒开始,不在整点签到
second = random.randint(1, 59)
log.info("%s到时间 即将在%s-%s-%s,%s:%s:%s签到" % (
func_desc, str(year), str(month), str(day), str(hour), str(minute), str(second)))
if hour < 0 or hour > 24:
hour = -1
if minute < 0 or minute > 60:
minute = -1
if hour < 0 or minute < 0:
log.warn("%s时间 配置格式错误:不启动任务" % func_desc)
return
scheduler.add_job(func,
"date",
run_date=datetime.datetime(year, month, day, hour, minute, second),
next_run_time=next_run_time)
| 5,313 | Python | .py | 102 | 28.019608 | 116 | 0.445218 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,006 | __init__.py | demigody_nas-tools/app/utils/__init__.py | from .dom_utils import DomUtils
from .episode_format import EpisodeFormat
from .http_utils import RequestUtils
from .json_utils import JsonUtils
from .number_utils import NumberUtils
from .path_utils import PathUtils
from .string_utils import StringUtils
from .system_utils import SystemUtils
from .tokens import Tokens
from .torrent import Torrent
from .cache_manager import cacheman, TokenCache, ConfigLoadCache, CategoryLoadCache, OpenAISessionCache
from .exception_utils import ExceptionUtils
from .rsstitle_utils import RssTitleUtils
from .nfo_reader import NfoReader
from .ip_utils import IpUtils
from .image_utils import ImageUtils
from .scheduler_utils import SchedulerUtils | 682 | Python | .py | 17 | 39.176471 | 103 | 0.863363 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,007 | path_utils.py | demigody_nas-tools/app/utils/path_utils.py | import os
class PathUtils:
@staticmethod
def get_dir_files(in_path, exts="", filesize=0, episode_format=None):
"""
获得目录下的媒体文件列表List ,按后缀、大小、格式过滤
"""
if not in_path:
return []
if not os.path.exists(in_path):
return []
ret_list = []
if os.path.isdir(in_path):
for root, dirs, files in os.walk(in_path):
for file in files:
cur_path = os.path.join(root, file)
# 检查路径是否合法
if PathUtils.is_invalid_path(cur_path):
continue
# 检查格式匹配
if episode_format and not episode_format.match(file):
continue
# 检查后缀
if exts and os.path.splitext(file)[-1].lower() not in exts:
continue
# 检查文件大小
if filesize and os.path.getsize(cur_path) < filesize:
continue
# 命中
if cur_path not in ret_list:
ret_list.append(cur_path)
else:
# 检查路径是否合法
if PathUtils.is_invalid_path(in_path):
return []
# 检查后缀
if exts and os.path.splitext(in_path)[-1].lower() not in exts:
return []
# 检查格式
if episode_format and not episode_format.match(os.path.basename(in_path)):
return []
# 检查文件大小
if filesize and os.path.getsize(in_path) < filesize:
return []
ret_list.append(in_path)
return ret_list
@staticmethod
def get_dir_level1_files(in_path, exts=""):
"""
查询目录下的文件(只查询一级)
"""
ret_list = []
if not os.path.exists(in_path):
return []
for file in os.listdir(in_path):
path = os.path.join(in_path, file)
if os.path.isfile(path):
if not exts or os.path.splitext(file)[-1].lower() in exts:
ret_list.append(path)
return ret_list
@staticmethod
def get_dir_level1_medias(in_path, exts=""):
"""
根据后缀,返回目录下所有的文件及文件夹列表(只查询一级)
"""
ret_list = []
if not os.path.exists(in_path):
return []
if os.path.isdir(in_path):
for file in os.listdir(in_path):
path = os.path.join(in_path, file)
if os.path.isfile(path):
if not exts or os.path.splitext(file)[-1].lower() in exts:
ret_list.append(path)
else:
ret_list.append(path)
else:
ret_list.append(in_path)
return ret_list
@staticmethod
def is_invalid_path(path):
"""
判断是否不能处理的路径
"""
if not path:
return True
if path.find('/@Recycle/') != -1 or path.find('/#recycle/') != -1 or path.find('/.') != -1 or path.find(
'/@eaDir') != -1:
return True
return False
@staticmethod
def is_path_in_path(path1, path2):
"""
判断两个路径是否包含关系 path1 in path2
"""
if not path1 or not path2:
return False
path1 = os.path.normpath(path1).replace("\\", "/")
path2 = os.path.normpath(path2).replace("\\", "/")
if path1 == path2:
return True
path = os.path.dirname(path2)
while True:
if path == path1:
return True
path = os.path.dirname(path)
if path == os.path.dirname(path):
break
return False
@staticmethod
def get_bluray_dir(path):
"""
判断是否蓝光原盘目录,是则返回原盘的根目录,否则返回空
"""
if not path or not os.path.exists(path):
return None
if os.path.isdir(path):
if os.path.exists(os.path.join(path, "BDMV", "index.bdmv")):
return path
elif os.path.normpath(path).endswith("BDMV") \
and os.path.exists(os.path.join(path, "index.bdmv")):
return os.path.dirname(path)
elif os.path.normpath(path).endswith("STREAM") \
and os.path.exists(os.path.join(os.path.dirname(path), "index.bdmv")):
return PathUtils.get_parent_paths(path, 2)
else:
# 电视剧原盘下会存在多个目录形如:Spider Man 2021/DIsc1, Spider Man 2021/Disc2
for level1 in PathUtils.get_dir_level1_medias(path):
if os.path.exists(os.path.join(level1, "BDMV", "index.bdmv")):
return path
return None
else:
if str(os.path.splitext(path)[-1]).lower() in [".m2ts", ".ts"] \
and os.path.normpath(os.path.dirname(path)).endswith("STREAM") \
and os.path.exists(os.path.join(PathUtils.get_parent_paths(path, 2), "index.bdmv")):
return PathUtils.get_parent_paths(path, 3)
else:
return None
@staticmethod
def get_parent_paths(path, level: int = 1):
"""
获取父目录路径,level为向上查找的层数
"""
for lv in range(0, level):
path = os.path.dirname(path)
return path
| 5,746 | Python | .py | 146 | 23.760274 | 112 | 0.495863 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,008 | torrent.py | demigody_nas-tools/app/utils/torrent.py | import base64
import datetime
import json
import os.path
import time
import re
import tempfile
import hashlib
from typing import Optional, Dict
from urllib.parse import unquote, urlencode, urlparse
from bencode import bencode, bdecode
import log
from app.utils import StringUtils
from app.utils.http_utils import RequestUtils
from app.utils.types import MediaType
from config import Config
class Torrent:
_torrent_temp_path = None
def __init__(self):
self._torrent_temp_path = Config().get_temp_path()
if not os.path.exists(self._torrent_temp_path):
os.makedirs(self._torrent_temp_path, exist_ok=True)
def get_torrent_info(self, url, cookie=None, apikey=None, ua=None, referer=None, proxy=False):
"""
把种子下载到本地,返回种子内容
:param url: 种子链接
:param cookie: 站点Cookie
:param apikey: 站点apikey
:param ua: 站点UserAgent
:param referer: 关联地址,有的网站需要这个否则无法下载
:param proxy: 是否使用内置代理
:return: 种子保存路径、种子内容、种子文件列表主目录、种子文件列表、错误信息
"""
if not url:
return None, None, "", [], "URL为空"
if url.startswith("magnet:"):
return None, url, "", [], f"{url} 为磁力链接"
try:
# 下载保存种子文件
file_path, content, errmsg = self.save_torrent_file(url=url,
cookie=cookie,
apikey=apikey,
ua=ua,
referer=referer,
proxy=proxy)
if not file_path:
return None, content, "", [], errmsg
# 解析种子文件
files_folder, files, retmsg = self.get_torrent_files(file_path)
# 种子文件路径、种子内容、种子文件列表主目录、种子文件列表、错误信息
return file_path, content, files_folder, files, retmsg
except Exception as err:
return None, None, "", [], "下载种子文件出现异常:%s" % str(err)
def save_torrent_file(self, url, cookie=None, apikey=None, ua=None, referer=None, proxy=False):
"""
把种子下载到本地
:return: 种子保存路径,错误信息
"""
# base64编码处理
if url.startswith("["):
# 需要解码获取下载地址
url = self.get_download_url(url=url)
req = RequestUtils(headers=ua, cookies=cookie, apikey=apikey, proxies=Config().get_proxies() if proxy else None,
referer=referer).get_res(url=url, allow_redirects=False)
while req and req.status_code in [301, 302]:
url = req.headers['Location']
if url and url.startswith("magnet:"):
return None, url, f"获取到磁力链接:{url}"
req = RequestUtils(headers=ua, cookies=cookie, proxies=Config().get_proxies() if proxy else None,
referer=referer).get_res(url=url, allow_redirects=False)
if req and req.status_code == 200:
if not req.content:
return None, None, "未下载到种子数据"
# 解析内容格式
if req.text and str(req.text).startswith("magnet:"):
# 磁力链接
return None, req.text, "磁力链接"
elif req.text and "下载种子文件" in req.text:
# 首次下载提示页面
skip_flag = False
try:
form = re.findall(r'<form.*?action="(.*?)".*?>(.*?)</form>', req.text, re.S)
if form:
action = form[0][0]
if not action or action == "?":
action = url
elif not action.startswith('http'):
action = StringUtils.get_base_url(url) + action
inputs = re.findall(r'<input.*?name="(.*?)".*?value="(.*?)".*?>', form[0][1], re.S)
if action and inputs:
data = {}
for item in inputs:
data[item[0]] = item[1]
# 改写req
req = RequestUtils(headers=ua, cookies=cookie,
proxies=Config().get_proxies() if proxy else None,
referer=referer).post_res(url=action, data=data)
if req and req.status_code == 200:
# 检查是不是种子文件,如果不是抛出异常
bdecode(req.content)
# 跳过成功
log.info(f"【Downloader】触发了站点首次种子下载,已自动跳过:{url}")
skip_flag = True
elif req is not None:
log.warn(f"【Downloader】触发了站点首次种子下载,且无法自动跳过,"
f"返回码:{req.status_code},错误原因:{req.reason}")
else:
log.warn(f"【Downloader】触发了站点首次种子下载,且无法自动跳过:{url}")
except Exception as err:
log.warn(f"【Downloader】触发了站点首次种子下载,尝试自动跳过时出现错误:{str(err)},链接:{url}")
if not skip_flag:
return None, None, "种子数据有误,请确认链接是否正确,如为PT站点则需手工在站点下载一次种子"
else:
# 检查是不是种子文件,如果不是仍然抛出异常
try:
bdecode(req.content)
except Exception as err:
print(str(err))
return None, None, "种子数据有误,请确认链接是否正确"
# 读取种子文件名
file_name = self.__get_url_torrent_filename(req, url)
# 种子文件路径
file_path = os.path.join(self._torrent_temp_path, file_name)
# 种子内容
file_content = req.content
# 写入磁盘
with open(file_path, 'wb') as f:
f.write(file_content)
elif req is None:
return None, None, "无法打开链接:%s" % url
elif req.status_code == 429:
return None, None, "触发站点流控,请稍后重试"
else:
return None, None, "下载种子出错,状态码:%s" % req.status_code
return file_path, file_content, ""
@staticmethod
def get_download_url(url: str) -> Optional[str]:
"""
获取下载链接, url格式:[base64]url
"""
# 获取[]中的内容
m = re.search(r"\[(.*)](.*)", url)
if m:
# 参数
base64_str = m.group(1)
# URL
url = m.group(2)
if not base64_str:
return url
# 解码参数
req_str = base64.b64decode(base64_str.encode('utf-8')).decode('utf-8')
req_params: Dict[str, dict] = json.loads(req_str)
# 是否使用cookie
#if not req_params.get('cookie'):
# cookie = None
# 请求头
if req_params.get('header'):
headers = req_params.get('header')
else:
headers = None
if req_params.get('method') == 'get':
# GET请求
result = RequestUtils(headers=headers).get_res(url, params=req_params.get('params'))
else:
# POST请求
result = RequestUtils(headers=headers).post_res(url, params=req_params.get('params'))
if not result:
return None
if not req_params.get('result'):
return result.text
else:
data = result.json()
for key in str(req_params.get('result')).split("."):
data = data.get(key)
if not data:
return None
log.info(f"获取到下载地址:{data}")
return data
return None
@staticmethod
def get_torrent_files(path):
"""
解析Torrent文件,获取文件清单
:return: 种子文件列表主目录、种子文件列表、错误信息
"""
if not path or not os.path.exists(path):
return "", [], f"种子文件不存在:{path}"
file_names = []
file_folder = ""
try:
torrent = bdecode(open(path, 'rb').read())
if torrent.get("info"):
files = torrent.get("info", {}).get("files") or []
if files:
for item in files:
if item.get("path"):
file_names.append(item["path"][0])
file_folder = torrent.get("info", {}).get("name")
else:
file_names.append(torrent.get("info", {}).get("name"))
except Exception as err:
return file_folder, file_names, "解析种子文件异常:%s" % str(err)
return file_folder, file_names, ""
def read_torrent_content(self, path):
"""
读取本地种子文件的内容
:return: 种子内容、种子文件列表主目录、种子文件列表、错误信息
"""
if not path or not os.path.exists(path):
return None, "", [], "种子文件不存在:%s" % path
content, retmsg, file_folder, files = None, "", "", []
try:
# 读取种子文件内容
with open(path, 'rb') as f:
content = f.read()
# 解析种子文件
file_folder, files, retmsg = self.get_torrent_files(path)
except Exception as e:
retmsg = "读取种子文件出错:%s" % str(e)
return content, file_folder, files, retmsg
@staticmethod
def __get_url_torrent_filename(req, url):
"""
从下载请求中获取种子文件名
"""
if not req:
return ""
disposition = req.headers.get('content-disposition') or ""
file_name = re.findall(r"filename=\"?(.+)\"?", disposition)
if file_name:
file_name = unquote(str(file_name[0].encode('ISO-8859-1').decode()).split(";")[0].strip())
if file_name.endswith('"'):
file_name = file_name[:-1]
elif url and url.endswith(".torrent"):
file_name = unquote(url.split("/")[-1])
else:
file_name = str(datetime.datetime.now())
return file_name.replace('/', '')
@staticmethod
def get_intersection_episodes(target, source, title):
"""
对两个季集字典进行判重,有相同项目的取集的交集
"""
if not source or not title:
return target
if not source.get(title):
return target
if not target.get(title):
target[title] = source.get(title)
return target
index = -1
for target_info in target.get(title):
index += 1
source_info = None
for info in source.get(title):
if info.get("season") == target_info.get("season"):
source_info = info
break
if not source_info:
continue
if not source_info.get("episodes"):
continue
if not target_info.get("episodes"):
target_episodes = source_info.get("episodes")
target[title][index]["episodes"] = target_episodes
continue
target_episodes = list(set(target_info.get("episodes")).intersection(set(source_info.get("episodes"))))
target[title][index]["episodes"] = target_episodes
return target
@staticmethod
def get_download_list(media_list, download_order):
"""
对媒体信息进行排序、去重
"""
if not media_list:
return []
# 排序函数,标题、站点、资源类型、做种数量
def get_sort_str(x):
season_len = str(len(x.get_season_list())).rjust(2, '0')
episode_len = str(len(x.get_episode_list())).rjust(4, '0')
# 排序:标题、资源类型、站点、做种、季集
if download_order == "seeder":
return "%s%s%s%s%s" % (str(x.title).ljust(100, ' '),
str(x.res_order).rjust(3, '0'),
str(x.seeders).rjust(10, '0'),
str(x.site_order).rjust(3, '0'),
"%s%s" % (season_len, episode_len))
else:
return "%s%s%s%s%s" % (str(x.title).ljust(100, ' '),
str(x.res_order).rjust(3, '0'),
str(x.site_order).rjust(3, '0'),
str(x.seeders).rjust(10, '0'),
"%s%s" % (season_len, episode_len))
# 匹配的资源中排序分组选最好的一个下载
# 按站点顺序、资源匹配顺序、做种人数下载数逆序排序
media_list = sorted(media_list, key=lambda x: get_sort_str(x), reverse=True)
# 控重
can_download_list_item = []
can_download_list = []
# 排序后重新加入数组,按真实名称控重,即只取每个名称的第一个
for t_item in media_list:
# 控重的主链是名称、年份、季、集
if t_item.type != MediaType.MOVIE:
media_name = "%s%s" % (t_item.get_title_string(),
t_item.get_season_episode_string())
else:
media_name = t_item.get_title_string()
if media_name not in can_download_list:
can_download_list.append(media_name)
can_download_list_item.append(t_item)
return can_download_list_item
@staticmethod
def magent2torrent(url, path, timeout=20):
"""
磁力链接转种子文件
:param url: 磁力链接
:param path: 保存目录
:param timeout: 获取元数据超时时间
:return: 转换后种子路径
"""
log.info(f"【Downloader】转换磁力链接:{url}")
session = 'libtorrent.session()'
magnet_info = 'libtorrent.parse_magnet_uri(url)'
magnet_info.save_path = path
handle = session.add_torrent(magnet_info)
log.debug("【Downloader】获取元数据中")
tout = 0
while not handle.status().name:
time.sleep(1)
tout += 1
if tout > timeout:
log.debug("【Downloader】元数据获取超时")
return None, "种子元数据获取超时"
session.pause()
log.debug("【Downloader】获取元数据完成")
tf = handle.torrent_file()
ti = 'libtorrent.torrent_info(tf)'
torrent_file = 'libtorrent.create_torrent(ti)'
torrent_file.set_comment(ti.comment())
torrent_file.set_creator(ti.creator())
file_path = os.path.join(path, "%s.torrent" % handle.status().name)
with open(file_path, 'wb') as f_handle:
f_handle.write('libtorrent.bencode(torrent_file.generate())')
f_handle.close()
session.remove_torrent(handle, 1)
log.info(f"【Downloader】转换后的种子路径:{file_path}")
return file_path, ""
@staticmethod
def _write_binary_to_temp_file(binary_data):
"""
种子内容转种子文件
:param binary_data: 种子内容
:return: 转换后种子路径
"""
try:
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(binary_data)
temp_file.close()
return temp_file.name
except Exception as e:
log.error(f"【Downloader】种子内容无法写入临时文件")
return None
@staticmethod
def _parse_torrent_dict(torrent_data):
"""
获取种子文件的信息
:param torrent_data: 种子内容的二进制数据
:return: 种子文件的信息
"""
try:
torrent_dict = bdecode(torrent_data)
return torrent_dict
except Exception as e:
log.error(f"【Downloader】无法解析种子文件内容")
return None
@staticmethod
def _create_magnet_link(torrent_dict):
"""
根据种子信息生成磁力链接
:param torrent_dict: 种子信息
:return: 磁力链接
"""
if torrent_dict is None:
return None
magnet_info = {}
if 'info' in torrent_dict:
info_hash = hashlib.sha1(bencode(torrent_dict['info'])).hexdigest()
magnet_info['xt'] = 'urn:btih:' + info_hash
if 'name' in torrent_dict['info']:
magnet_info['dn'] = torrent_dict['info']['name']
if 'announce' in torrent_dict:
magnet_info['tr'] = torrent_dict['announce']
if 'announce-list' in torrent_dict:
magnet_info['tr'] = [announce[0] for announce in torrent_dict['announce-list']]
magnet_link = 'magnet:?{}'.format(urlencode(magnet_info))
return magnet_link
@staticmethod
def binary_data_to_magnet_link(binary_data):
"""
根据种子内容生成磁力链接
:param binary_data: 种子内容
:return: 磁力链接
"""
temp_file_path = Torrent._write_binary_to_temp_file(binary_data)
if not temp_file_path:
return None
with open(temp_file_path, 'rb') as torrent_file:
torrent_data = torrent_file.read()
torrent_dict = Torrent._parse_torrent_dict(torrent_data)
magnet_link = Torrent._create_magnet_link(torrent_dict)
Torrent._close_and_delete_file(temp_file_path)
return magnet_link
@staticmethod
def _close_and_delete_file(file_path):
"""
清理临时生成的种子文件
:param file_path: 种子文件路径
:return: 是否删除成功
"""
try:
with open(file_path, 'r+') as file:
file.close()
except:
pass
try:
os.remove(file_path)
return True
except Exception as e:
return False
@staticmethod
def is_magnet(link):
"""
判断是否是磁力
"""
return link.lower().startswith("magnet:?xt=urn:btih:")
@staticmethod
def maybe_torrent_url(link):
"""
判断是否可能是种子url
"""
try:
parsed = urlparse(link)
return bool(parsed.netloc) and parsed.scheme in ['http', 'https', 'ftp']
except Exception as err:
return False
@staticmethod
def format_enclosure(link):
"""
格式化一个链接
如果是磁力链接或者为私有PT站点则直接返回
如果不是磁力链接看是否是种子链接,如果是则下载种子后转换为磁力链接
"""
if not StringUtils.is_string_and_not_empty(link):
return None
if Torrent.is_magnet(link):
return link
if not Torrent.maybe_torrent_url(link):
return None
_, torrent_content, _, _, retmsg = Torrent().get_torrent_info(link)
if not torrent_content:
print(f"下载种子文件出错: {retmsg}")
return None
return torrent_content
| 20,568 | Python | .py | 463 | 26.066955 | 120 | 0.506382 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,009 | nfo_reader.py | demigody_nas-tools/app/utils/nfo_reader.py | import xml.etree.ElementTree as ET
class NfoReader:
def __init__(self, xml_file_path):
self.xml_file_path = xml_file_path
self.tree = ET.parse(xml_file_path)
self.root = self.tree.getroot()
def get_element_value(self, element_path):
element = self.root.find(element_path)
return element.text if element is not None else None
| 376 | Python | .py | 9 | 35.111111 | 60 | 0.675824 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,010 | number_utils.py | demigody_nas-tools/app/utils/number_utils.py | class NumberUtils:
@staticmethod
def max_ele(a, b):
"""
返回非空最大值
"""
if not a:
return b
if not b:
return a
return max(int(a), int(b))
@staticmethod
def get_size_gb(size):
"""
将字节转换为GB
"""
if not size:
return 0.0
return float(size) / 1024 / 1024 / 1024
| 419 | Python | .py | 19 | 12.210526 | 47 | 0.454301 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,011 | dom_utils.py | demigody_nas-tools/app/utils/dom_utils.py | class DomUtils:
@staticmethod
def tag_value(tag_item, tag_name, attname="", default=None):
"""
解析XML标签值
"""
tagNames = tag_item.getElementsByTagName(tag_name)
if tagNames:
if attname:
attvalue = tagNames[0].getAttribute(attname)
if attvalue:
return attvalue
else:
firstChild = tagNames[0].firstChild
if firstChild:
return firstChild.data
return default
@staticmethod
def add_node(doc, parent, name, value=None):
"""
添加一个DOM节点
"""
node = doc.createElement(name)
parent.appendChild(node)
if value is not None:
text = doc.createTextNode(str(value))
node.appendChild(text)
return node
| 877 | Python | .py | 28 | 19.75 | 64 | 0.541818 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,012 | exception_utils.py | demigody_nas-tools/app/utils/exception_utils.py | # -*- coding: utf-8 -*-
import traceback
class ExceptionUtils:
@classmethod
def exception_traceback(cls, e):
print(f"\nException: {str(e)}\nCallstack:\n{traceback.format_exc()}\n")
| 199 | Python | .py | 6 | 29.166667 | 79 | 0.680628 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,013 | types.py | demigody_nas-tools/app/utils/types.py | from enum import Enum
class MyMediaLibraryType(Enum):
MINE = '我的媒体库'
WATCHING = '正在观看'
NEWESTADD = '最新入库'
class MediaType(Enum):
TV = '电视剧'
MOVIE = '电影'
ANIME = '动漫'
UNKNOWN = '未知'
class DownloaderType(Enum):
QB = 'Qbittorrent'
TR = 'Transmission'
UT = 'uTorrent'
PAN115 = '115网盘'
ARIA2 = 'Aria2'
PIKPAK = 'PikPak'
class SyncType(Enum):
MAN = "手动整理"
MON = "目录同步"
class SearchType(Enum):
WX = "微信"
WEB = "WEB"
DB = "豆瓣"
RSS = "电影/电视剧订阅"
USERRSS = "自定义订阅"
OT = "手动下载"
TG = "Telegram"
API = "第三方API请求"
SLACK = "Slack"
SYNOLOGY = "Synology Chat"
PLUGIN = "插件"
class RmtMode(Enum):
LINK = "硬链接"
SOFTLINK = "软链接"
COPY = "复制"
MOVE = "移动"
RCLONECOPY = "Rclone复制"
RCLONE = "Rclone移动"
MINIOCOPY = "Minio复制"
MINIO = "Minio移动"
class MatchMode(Enum):
NORMAL = "正常模式"
STRICT = "严格模式"
class OsType(Enum):
WINDOWS = "Windows"
LINUX = "Linux"
SYNOLOGY = "Synology"
MACOS = "MacOS"
DOCKER = "Docker"
class IndexerType(Enum):
BUILTIN = "Indexer"
JACKETT = "Jackett"
PROWLARR = "Prowlarr"
class MediaServerType(Enum):
JELLYFIN = "Jellyfin"
EMBY = "Emby"
PLEX = "Plex"
class BrushDeleteType(Enum):
NOTDELETE = "不删除"
SEEDTIME = "做种时间"
RATIO = "分享率"
UPLOADSIZE = "上传量"
DLTIME = "下载耗时"
AVGUPSPEED = "平均上传速度"
IATIME = "未活动时间"
# 站点框架
class SiteSchema(Enum):
DiscuzX = "Discuz!"
Gazelle = "Gazelle"
Ipt = "IPTorrents"
NexusPhp = "NexusPhp"
NexusProject = "NexusProject"
NexusRabbit = "NexusRabbit"
SmallHorse = "Small Horse"
Unit3d = "Unit3d"
TorrentLeech = "TorrentLeech"
FileList = "FileList"
TNode = "TNode"
# 可监听事件
class EventType(Enum):
# Emby Webhook通知
EmbyWebhook = "emby.webhook"
# Jellyfin Webhook通知
JellyfinWebhook = "jellyfin.webhook"
# Plex Webhook通知
PlexWebhook = "plex.webhook"
# 新增下载
DownloadAdd = "download.add"
# 下载失败
DownloadFail = "download.fail"
# 入库完成
TransferFinished = "transfer.finished"
# 入库失败
TransferFail = "transfer.fail"
# 下载字幕
SubtitleDownload = "subtitle.download"
# 新增订阅
SubscribeAdd = "subscribe.add"
# 订阅完成
SubscribeFinished = "subscribe.finished"
# 交互消息
MessageIncoming = "message.incoming"
# 开始搜索
SearchStart = "search.start"
# 源文件被删除
SourceFileDeleted = "sourcefile.deleted"
# 媒件库文件被删除
LibraryFileDeleted = "libraryfile.deleted"
# 刮削媒体信息
MediaScrapStart = "media.scrap.start"
# 插件重载
PluginReload = "plugin.reload"
# 豆瓣想看同步
DoubanSync = "douban.sync"
# 辅种任务开始
AutoSeedStart = "autoseed.start"
# 刷新媒体库
RefreshMediaServer = "refresh.mediaserver"
# 站点签到
SiteSignin = "site.signin"
# 系统配置Key字典
class SystemConfigKey(Enum):
# 同步媒体库范围
SyncLibrary = "SyncLibrary"
# 媒体库显示模块
LibraryDisplayModule = "LibraryDisplayModule"
# 站点Cookie获取参数
CookieUserInfo = "CookieUserInfo"
# CookieCloud同步参数
CookieCloud = "CookieCloud"
# 自定义JS/CSS
CustomScript = "CustomScript"
# 用户认证参数
UserSiteAuthParams = "UserSiteAuthParams"
# 默认下载器
DefaultDownloader = "DefaultDownloader"
# 默认下载设置
DefaultDownloadSetting = "DefaultDownloadSetting"
# 默认电影订阅设置
DefaultRssSettingMOV = "DefaultRssSettingMOV"
# 默认电视剧订阅设置
DefaultRssSettingTV = "DefaultRssSettingTV"
# 用户已安装的插件
UserInstalledPlugins = "UserInstalledPlugins"
# 已安装插件汇报状态
UserInstalledPluginsReport = "UserInstalledPluginsReport"
# 括削配置
UserScraperConf = "UserScraperConf"
# 索引站点
UserIndexerSites = "UserIndexerSites"
# 处理进度Key字典
class ProgressKey(Enum):
# 搜索
Search = "search"
# 转移
FileTransfer = "filetransfer"
# 媒体库同步
MediaSync = "mediasync"
# 站点Cookie获取
SiteCookie = "sitecookie"
class RssType(Enum):
# 手动
Manual = "manual"
# 自动
Auto = "auto"
# 电影类型关键字
MovieTypes = ['MOV', '电影', MediaType.MOVIE]
# 电视剧类型关键字
TvTypes = ['TV', '电视剧', MediaType.TV]
# 内置索引器文件md5值
BuiltinIndexerFileMd5 = "ca07b56eb946780a4fe3d1f5f71173b7"
| 4,866 | Python | .py | 172 | 19.639535 | 61 | 0.664477 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,014 | sites.py | demigody_nas-tools/app/sites/sites.py | import json
from datetime import datetime
import log
from app.conf import SystemConfig
from app.helper import ChromeHelper, SiteHelper, DbHelper
from app.message import Message
from app.sites.mt import MtFunc
from app.sites.site_limiter import SiteRateLimiter
from app.utils import RequestUtils, StringUtils
from app.utils.commons import singleton
from config import Config
@singleton
class Sites:
message = None
dbhelper = None
_sites = []
_siteByIds = {}
_siteByUrls = {}
_site_favicons = {}
_rss_sites = []
_brush_sites = []
_statistic_sites = []
_signin_sites = []
_limiters = {}
_MAX_CONCURRENCY = 10
def __init__(self):
self.init_config()
def init_config(self):
self.dbhelper = DbHelper()
self.systemconfig = SystemConfig()
self.message = Message()
# 原始站点列表
self._sites = []
# ID存储站点
self._siteByIds = {}
# URL存储站点
self._siteByUrls = {}
# 开启订阅功能站点
self._rss_sites = []
# 开启刷流功能站点:
self._brush_sites = []
# 开启统计功能站点:
self._statistic_sites = []
# 开启签到功能站点:
self._signin_sites = []
# 站点限速器
self._limiters = {}
# 站点图标
self.init_favicons()
# 站点数据
self._sites = self.dbhelper.get_config_site()
for site in self._sites:
# 站点属性
site_note = self.__get_site_note_items(site.NOTE)
# 站点用途:Q签到、D订阅、S刷流
site_rssurl = site.RSSURL
site_signurl = site.SIGNURL
site_cookie = site.COOKIE
site_apikey = site.APIKEY
site_uses = site.INCLUDE or ''
uses = []
if site_uses:
rss_enable = True if "D" in site_uses and site_rssurl else False
brush_enable = True if "S" in site_uses and site_rssurl and (site_cookie or site_apikey) else False
statistic_enable = True if "T" in site_uses and (site_rssurl or site_signurl) and site_cookie else False
uses.append("D") if rss_enable else None
uses.append("S") if brush_enable else None
uses.append("T") if statistic_enable else None
else:
rss_enable = False
brush_enable = False
statistic_enable = False
site_info = {
"id": site.ID,
"name": site.NAME,
"pri": site.PRI or 0,
"rssurl": site_rssurl,
"signurl": site_signurl,
"cookie": site_cookie,
"apikey": site_apikey,
"rule": site_note.get("rule"),
"download_setting": site_note.get("download_setting"),
"rss_enable": rss_enable,
"brush_enable": brush_enable,
"statistic_enable": statistic_enable,
"uses": uses,
"ua": site_note.get("ua"),
"parse": True if site_note.get("parse") == "Y" else False,
"unread_msg_notify": True if site_note.get("message") == "Y" else False,
"chrome": True if site_note.get("chrome") == "Y" else False,
"proxy": True if site_note.get("proxy") == "Y" else False,
"subtitle": True if site_note.get("subtitle") == "Y" else False,
"tags": site_note.get("tags"),
"limit_interval": site_note.get("limit_interval"),
"limit_count": site_note.get("limit_count"),
"limit_seconds": site_note.get("limit_seconds"),
"strict_url": StringUtils.get_base_url(site_signurl or site_rssurl)
}
# 以ID存储
self._siteByIds[site.ID] = site_info
# 以域名存储
site_strict_url = StringUtils.get_url_domain(site.SIGNURL or site.RSSURL)
if site_strict_url:
self._siteByUrls[site_strict_url] = site_info
# 初始化站点限速器
self._limiters[site.ID] = SiteRateLimiter(
limit_interval=int(site_note.get("limit_interval")) * 60 if site_note.get("limit_interval") and str(
site_note.get("limit_interval")).isdigit() and site_note.get("limit_count") and str(
site_note.get("limit_count")).isdigit() else None,
limit_count=int(site_note.get("limit_count")) if site_note.get("limit_interval") and str(
site_note.get("limit_interval")).isdigit() and site_note.get("limit_count") and str(
site_note.get("limit_count")).isdigit() else None,
limit_seconds=int(site_note.get("limit_seconds")) if site_note.get("limit_seconds") and str(
site_note.get("limit_seconds")).isdigit() else None
)
def init_favicons(self):
"""
加载图标到内存
"""
self._site_favicons = {site.SITE: site.FAVICON for site in self.dbhelper.get_site_favicons()}
def get_sites(self,
siteid=None,
siteurl=None,
siteids=None,
rss=False,
brush=False,
statistic=False):
"""
获取站点配置
"""
if siteid:
return self._siteByIds.get(int(siteid)) or {}
if siteurl:
return self._siteByUrls.get(StringUtils.get_url_domain(siteurl)) or {}
ret_sites = []
for site in self._siteByIds.values():
if rss and not site.get('rss_enable'):
continue
if brush and not site.get('brush_enable'):
continue
if statistic and not site.get('statistic_enable'):
continue
if siteids and str(site.get('id')) not in siteids:
continue
ret_sites.append(site)
if siteid or siteurl:
return {}
return ret_sites
def get_sites_by_url_domain(self, url):
"""
根据传入的url获取站点配置
"""
return self._siteByUrls.get(StringUtils.get_url_domain(url))
def check_ratelimit(self, site_id):
"""
检查站点是否触发流控
:param site_id: 站点ID
:return: True为触发了流控,False为未触发
"""
if not self._limiters.get(site_id):
return False
state, msg = self._limiters[site_id].check_rate_limit()
if msg:
log.warn(f"【Sites】站点 {self._siteByIds[site_id].get('name')} {msg}")
return state
def get_sites_by_suffix(self, suffix):
"""
根据url的后缀获取站点配置
"""
for key in self._siteByUrls:
# 使用.分割后再将最后两位(顶级域和二级域)拼起来
key_parts = key.split(".")
key_end = ".".join(key_parts[-2:])
# 将拼起来的结果与参数进行对比
if suffix == key_end:
return self._siteByUrls[key]
return {}
def get_sites_by_name(self, name):
"""
根据站点名称获取站点配置
"""
ret_sites = []
for site in self._siteByIds.values():
if site.get("name") == name:
ret_sites.append(site)
return ret_sites
def get_max_site_pri(self):
"""
获取最大站点优先级
"""
if not self._siteByIds:
return 0
return max([int(site.get("pri")) for site in self._siteByIds.values()])
def get_site_dict(self,
rss=False,
brush=False,
statistic=False):
"""
获取站点字典
"""
return [
{
"id": site.get("id"),
"name": site.get("name")
} for site in self.get_sites(
rss=rss,
brush=brush,
statistic=statistic
)
]
def get_site_names(self,
rss=False,
brush=False,
statistic=False):
"""
获取站点名称
"""
return [
site.get("name") for site in self.get_sites(
rss=rss,
brush=brush,
statistic=statistic
)
]
def get_site_favicon(self, site_name=None):
"""
获取站点图标
"""
if site_name:
return self._site_favicons.get(site_name)
else:
return self._site_favicons
def get_site_download_setting(self, site_name=None):
"""
获取站点下载设置
"""
if site_name:
for site in self._siteByIds.values():
if site.get("name") == site_name:
return site.get("download_setting")
return None
def get_site_download_tags(self, site_name=None):
"""
获取站点标签
"""
if site_name:
for site in self._siteByIds.values():
if site.get("name") == site_name:
return site.get("tags")
return None
def test_connection(self, site_id):
"""
测试站点连通性
:param site_id: 站点编号
:return: 是否连通、错误信息、耗时
"""
global res
site_info = self.get_sites(siteid=site_id)
if not site_info:
return False, "站点不存在", 0
site_cookie = site_info.get("cookie")
site_apikey = site_info.get("apikey")
if not (site_cookie or site_apikey):
return False, "未配置站点Cookie", 0
ua = site_info.get("ua") or Config().get_ua()
site_url = StringUtils.get_base_url(site_info.get("signurl") or site_info.get("rssurl"))
if not site_url:
return False, "未配置站点地址", 0
# 站点特殊处理...
if '1ptba' in site_url:
site_url = site_url + '/index.php'
elif 'zmpt' in site_url:
site_url = site_url + '/index.php'
chrome = ChromeHelper()
if site_info.get("chrome") and chrome.get_status():
# 计时
start_time = datetime.now()
if not chrome.visit(url=site_url, ua=ua, cookie=site_cookie, proxy=site_info.get("proxy")):
return False, "Chrome模拟访问失败", 0
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
seconds = int((datetime.now() - start_time).microseconds / 1000)
if not cloudflare:
return False, "跳转站点失败", seconds
# 判断是否已签到
html_text = chrome.get_html()
if not html_text:
return False, "获取站点源码失败", 0
if SiteHelper.is_logged_in(html_text):
return True, "连接成功", seconds
else:
return False, "Cookie失效", seconds
else:
# 计时
start_time = datetime.now()
if 'm-team' in site_url:
mt = MtFunc(site_info)
seconds = int((datetime.now() - start_time).microseconds / 1000)
if mt.signin():
return True, "连接成功", seconds
else:
return False, "连接失败,请检查Cookie", seconds
else:
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=Config().get_proxies() if site_info.get(
"proxy") else None).get_res(url=site_url)
seconds = int((datetime.now() - start_time).microseconds / 1000)
if res and res.status_code == 200:
if not SiteHelper.is_logged_in(res.text):
return False, "Cookie失效", seconds
else:
return True, "连接成功", seconds
elif res is not None:
return False, f"连接失败,状态码:{res.status_code}", seconds
else:
return False, "无法打开网站", seconds
@staticmethod
def __get_site_note_items(note):
"""
从note中提取站点信息
"""
infos = {}
if note:
infos = json.loads(note)
return infos
def add_site(self, name, site_pri,
rssurl=None, signurl=None, cookie=None, note=None, rss_uses=None, apikey=None):
"""
添加站点
"""
ret = self.dbhelper.insert_config_site(name=name,
site_pri=site_pri,
rssurl=rssurl,
signurl=signurl,
cookie=cookie,
apikey=apikey,
note=note,
rss_uses=rss_uses)
self.init_config()
return ret
def update_site(self, tid, name, site_pri,
rssurl, signurl, cookie, note, rss_uses, apikey=None):
"""
更新站点
"""
ret = self.dbhelper.update_config_site(tid=tid,
name=name,
site_pri=site_pri,
rssurl=rssurl,
signurl=signurl,
cookie=cookie,
apikey=apikey,
note=note,
rss_uses=rss_uses)
self.init_config()
return ret
def delete_site(self, siteid):
"""
删除站点
"""
ret = self.dbhelper.delete_config_site(siteid)
self.init_config()
return ret
def update_site_cookie(self, siteid, cookie, ua=None):
"""
更新站点Cookie和UA
"""
ret = self.dbhelper.update_site_cookie_ua(tid=siteid,
cookie=cookie,
ua=ua)
self.init_config()
return ret
def need_goto_user_detail_fetch(self, site_id):
"""
检查站点是否需要去用户信息页面拉取用户详情
:param site_id: 站点ID
:return: 是否需要去用户信息页面
"""
site_info = self.get_sites(siteid=site_id)
if not site_info:
return False, None
site_cookie = site_info.get("cookie")
if not site_cookie:
return False, None
ua = site_info.get("ua") or Config().get_ua()
site_url = StringUtils.get_base_url(site_info.get("signurl") or site_info.get("rssurl"))
if not site_url:
return False, None
if "hhanclub" in site_url:
hhanclub_pattern = r'<a href="claim\.php\?uid=(\d+)">'
return True, hhanclub_pattern
else:
return False, None
@staticmethod
def update_api_key(signurl, cookie, ua, proxy):
if 'm-team' in signurl:
_site_info = {
"cookie": cookie,
"ua": ua,
"proxy": proxy,
"strict_url": StringUtils.get_base_url(signurl)
}
_mt = MtFunc(_site_info)
return _mt.update_api_key()
return False, ""
| 15,969 | Python | .py | 404 | 24.205446 | 120 | 0.496186 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,015 | __init__.py | demigody_nas-tools/app/sites/__init__.py | from app.sites.site_userinfo import SiteUserInfo
from .sites import Sites
from .site_cookie import SiteCookie
from .site_subtitle import SiteSubtitle
from .siteconf import SiteConf
from .site_limiter import SiteRateLimiter
| 223 | Python | .py | 6 | 36.166667 | 48 | 0.866359 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,016 | site_cookie.py | demigody_nas-tools/app/sites/site_cookie.py | import base64
import time
from lxml import etree
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as es
from selenium.webdriver.support.wait import WebDriverWait
import log
from app.helper import ChromeHelper, ProgressHelper, OcrHelper, SiteHelper
from app.sites.siteconf import SiteConf
from app.sites.sites import Sites
from app.utils import StringUtils, RequestUtils, ExceptionUtils
from app.utils.commons import singleton
from app.utils.types import ProgressKey
@singleton
class SiteCookie(object):
progress = None
sites = None
siteconf = None
ocrhelper = None
captcha_code = {}
def __init__(self):
self.init_config()
def init_config(self):
self.progress = ProgressHelper()
self.sites = Sites()
self.siteconf = SiteConf()
self.ocrhelper = OcrHelper()
self.captcha_code = {}
def set_code(self, code, value):
"""
设置验证码的值
"""
self.captcha_code[code] = value
def get_code(self, code):
"""
获取验证码的值
"""
return self.captcha_code.get(code)
def __get_site_cookie_ua(self,
url,
username,
password,
twostepcode=None,
ocrflag=False,
proxy=False):
"""
获取站点cookie和ua
:param url: 站点地址
:param username: 用户名
:param password: 密码
:param twostepcode: 两步验证
:param ocrflag: 是否开启OCR识别
:param proxy: 是否使用内置代理
:return: cookie、ua、message
"""
if not url or not username or not password:
return None, None, "参数错误"
# 全局锁
chrome = ChromeHelper()
if not chrome.get_status():
return None, None, "需要浏览器内核环境才能更新站点信息"
if not chrome.visit(url=url, proxy=proxy):
return None, None, "Chrome模拟访问失败"
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
return None, None, "跳转站点失败,无法通过Cloudflare验证"
# 登录页面代码
html_text = chrome.get_html()
if not html_text:
return None, None, "获取源码失败"
if SiteHelper.is_logged_in(html_text):
return chrome.get_cookies(), chrome.get_ua(), "已经登录过且Cookie未失效"
# 站点配置
login_conf = self.siteconf.get_login_conf()
# 查找用户名输入框
html = etree.HTML(html_text)
username_xpath = None
for xpath in login_conf.get("username"):
if html.xpath(xpath):
username_xpath = xpath
break
if not username_xpath:
return None, None, "未找到用户名输入框"
# 查找密码输入框
password_xpath = None
for xpath in login_conf.get("password"):
if html.xpath(xpath):
password_xpath = xpath
break
if not password_xpath:
return None, None, "未找到密码输入框"
# 查找两步验证码
twostepcode_xpath = None
for xpath in login_conf.get("twostep"):
if html.xpath(xpath):
twostepcode_xpath = xpath
break
# 查找验证码输入框
captcha_xpath = None
for xpath in login_conf.get("captcha"):
if html.xpath(xpath):
captcha_xpath = xpath
break
# 查找验证码图片
captcha_img_url = None
if captcha_xpath:
for xpath in login_conf.get("captcha_img"):
if html.xpath(xpath):
captcha_img_url = html.xpath(xpath)[0]
break
if not captcha_img_url:
return None, None, "未找到验证码图片"
# 查找登录按钮
submit_xpath = None
for xpath in login_conf.get("submit"):
if html.xpath(xpath):
submit_xpath = xpath
break
if not submit_xpath:
return None, None, "未找到登录按钮"
# 点击登录按钮
try:
submit_obj = WebDriverWait(driver=chrome.browser,
timeout=6).until(es.element_to_be_clickable((By.XPATH,
submit_xpath)))
if submit_obj:
# 输入用户名
chrome.browser.find_element(By.XPATH, username_xpath).send_keys(username)
# 输入密码
chrome.browser.find_element(By.XPATH, password_xpath).send_keys(password)
# 输入两步验证码
if twostepcode and twostepcode_xpath:
twostepcode_element = chrome.browser.find_element(By.XPATH, twostepcode_xpath)
if twostepcode_element.is_displayed():
twostepcode_element.send_keys(twostepcode)
# 识别验证码
if captcha_xpath:
captcha_element = chrome.browser.find_element(By.XPATH, captcha_xpath)
if captcha_element.is_displayed():
code_url = self.__get_captcha_url(url, captcha_img_url)
if ocrflag:
# 自动OCR识别验证码
captcha = self.get_captcha_text(chrome, code_url)
if captcha:
log.info("【Sites】验证码地址为:%s,识别结果:%s" % (code_url, captcha))
else:
return None, None, "验证码识别失败"
else:
# 等待用户输入
captcha = None
code_key = StringUtils.generate_random_str(5)
for sec in range(30, 0, -1):
if self.get_code(code_key):
# 用户输入了
captcha = self.get_code(code_key)
log.info("【Sites】接收到验证码:%s" % captcha)
self.progress.update(ptype=ProgressKey.SiteCookie,
text="接收到验证码:%s" % captcha)
break
else:
# 获取验证码图片base64
code_bin = self.get_captcha_base64(chrome, code_url)
if not code_bin:
return None, None, "获取验证码图片数据失败"
else:
code_bin = f"data:image/png;base64,{code_bin}"
# 推送到前端
self.progress.update(ptype=ProgressKey.SiteCookie,
text=f"{code_bin}|{code_key}")
time.sleep(1)
if not captcha:
return None, None, "验证码输入超时"
# 输入验证码
captcha_element.send_keys(captcha)
else:
# 不可见元素不处理
pass
# 提交登录
submit_obj.click()
# 等待页面刷新完毕
WebDriverWait(driver=chrome.browser, timeout=5).until(es.staleness_of(submit_obj))
else:
return None, None, "未找到登录按钮"
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None, None, "仿真登录失败:%s" % str(e)
# 登录后的源码
html_text = chrome.get_html()
if not html_text:
return None, None, "获取源码失败"
if SiteHelper.is_logged_in(html_text):
return chrome.get_cookies(), chrome.get_ua(), ""
else:
# 读取错误信息
error_xpath = None
for xpath in login_conf.get("error"):
if html.xpath(xpath):
error_xpath = xpath
break
if not error_xpath:
return None, None, "登录失败"
else:
error_msg = html.xpath(error_xpath)[0]
return None, None, error_msg
def get_captcha_text(self, chrome, code_url):
"""
识别验证码图片的内容
"""
code_b64 = self.get_captcha_base64(chrome=chrome,
image_url=code_url)
if not code_b64:
return ""
return self.ocrhelper.get_captcha_text(image_b64=code_b64)
@staticmethod
def __get_captcha_url(siteurl, imageurl):
"""
获取验证码图片的URL
"""
if not siteurl or not imageurl:
return ""
if imageurl.startswith("/"):
imageurl = imageurl[1:]
return "%s/%s" % (StringUtils.get_base_url(siteurl), imageurl)
def update_sites_cookie_ua(self,
username,
password,
twostepcode=None,
siteid=None,
ocrflag=False):
"""
更新所有站点Cookie和ua
"""
# 获取站点列表
sites = self.sites.get_sites(siteid=siteid)
if siteid:
sites = [sites]
# 总数量
site_num = len(sites)
# 当前数量
curr_num = 0
# 返回码、返回消息
retcode = 0
messages = []
# 开始进度
self.progress.start(ProgressKey.SiteCookie)
for site in sites:
if not site.get("signurl") and not site.get("rssurl"):
log.info("【Sites】%s 未设置地址,跳过" % site.get("name"))
continue
log.info("【Sites】开始更新 %s Cookie和User-Agent ..." % site.get("name"))
self.progress.update(ptype=ProgressKey.SiteCookie,
text="开始更新 %s Cookie和User-Agent ..." % site.get("name"))
# 登录页面地址
baisc_url = StringUtils.get_base_url(site.get("signurl") or site.get("rssurl"))
site_conf = self.siteconf.get_grap_conf(url=baisc_url)
if site_conf.get("LOGIN"):
login_url = "%s/%s" % (baisc_url, site_conf.get("LOGIN"))
else:
login_url = "%s/login.php" % baisc_url
# 获取Cookie和User-Agent
cookie, ua, msg = self.__get_site_cookie_ua(url=login_url,
username=username,
password=password,
twostepcode=twostepcode,
ocrflag=ocrflag,
proxy=site.get("proxy"))
# 更新进度
curr_num += 1
if not cookie:
log.error("【Sites】获取 %s 信息失败:%s" % (site.get("name"), msg))
messages.append("%s %s" % (site.get("name"), msg))
self.progress.update(ptype=ProgressKey.SiteCookie,
value=round(100 * (curr_num / site_num)),
text="%s %s" % (site.get("name"), msg))
retcode = 1
else:
self.sites.update_site_cookie(siteid=site.get("id"), cookie=cookie, ua=ua)
log.info("【Sites】更新 %s 的Cookie和User-Agent成功" % site.get("name"))
messages.append("%s %s" % (site.get("name"), msg or "更新Cookie和User-Agent成功"))
self.progress.update(ptype=ProgressKey.SiteCookie,
value=round(100 * (curr_num / site_num)),
text="%s %s" % (site.get("name"), msg or "更新Cookie和User-Agent成功"))
self.progress.end(ProgressKey.SiteCookie)
return retcode, messages
@staticmethod
def get_captcha_base64(chrome, image_url):
"""
根据图片地址,使用浏览器获取验证码图片base64编码
"""
if not image_url:
return ""
ret = RequestUtils(headers=chrome.get_ua(), cookies=chrome.get_cookies()).get_res(image_url)
if ret:
return base64.b64encode(ret.content).decode()
return ""
| 13,251 | Python | .py | 296 | 25.101351 | 103 | 0.480137 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,017 | site_limiter.py | demigody_nas-tools/app/sites/site_limiter.py | import time
class SiteRateLimiter:
def __init__(self, limit_interval: int, limit_count: int, limit_seconds: int):
"""
限制访问频率
:param limit_interval: 单位时间(秒)
:param limit_count: 单位时间内访问次数
:param limit_seconds: 访问间隔(秒)
"""
self.limit_count = limit_count
self.limit_interval = limit_interval
self.limit_seconds = limit_seconds
self.last_visit_time = 0
self.count = 0
def check_rate_limit(self) -> (bool, str):
"""
检查是否超出访问频率控制
:return: 超出返回True,否则返回False,超出时返回错误信息
"""
current_time = time.time()
# 防问间隔时间
if self.limit_seconds:
if current_time - self.last_visit_time < self.limit_seconds:
return True, f"触发流控规则,访问间隔不得小于 {self.limit_seconds} 秒," \
f"上次访问时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_visit_time))}"
# 单位时间内访问次数
if self.limit_interval and self.limit_count:
if current_time - self.last_visit_time > self.limit_interval:
# 计数清零
self.count = 0
if self.count >= self.limit_count:
return True, f"触发流控规则,{self.limit_interval} 秒内访问次数不得超过 {self.limit_count} 次," \
f"上次访问时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_visit_time))}"
# 访问计数
self.count += 1
# 更新最后访问时间
self.last_visit_time = current_time
# 未触发流控
return False, ""
if __name__ == "__main__":
# 限制 1 分钟内最多访问 10 次,单次访问间隔不得小于 10 秒
site_rate_limit = SiteRateLimiter(10, 60, 10)
# 模拟访问
for i in range(12):
if site_rate_limit.check_rate_limit():
print("访问频率超限")
else:
print("访问成功")
time.sleep(3)
| 2,205 | Python | .py | 49 | 27.346939 | 113 | 0.553751 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,018 | siteconf.py | demigody_nas-tools/app/sites/siteconf.py | import json
import random
import re
import time
from functools import lru_cache
from urllib.parse import urlsplit
from lxml import etree
from app.helper import ChromeHelper
from app.sites.mt import MtFunc
from app.utils import ExceptionUtils, StringUtils, RequestUtils
from app.utils.commons import singleton
from config import Config
from web.backend.pro_user import ProUser
@singleton
class SiteConf:
user = None
# 站点签到支持的识别XPATH
_SITE_CHECKIN_XPATH = [
'//a[@id="signed"]',
'//a[contains(@href, "attendance")]',
'//a[contains(text(), "签到")]',
'//a/b[contains(text(), "签 到")]',
'//span[@id="sign_in"]/a',
'//a[contains(@href, "addbonus")]',
'//input[@class="dt_button"][contains(@value, "打卡")]',
'//a[contains(@href, "sign_in")]',
'//a[contains(@onclick, "do_signin")]',
'//a[@id="do-attendance"]',
'//shark-icon-button[@href="attendance.php"]'
]
# 站点详情页字幕下载链接识别XPATH
_SITE_SUBTITLE_XPATH = [
'//td[@class="rowhead"][text()="字幕"]/following-sibling::td//a/@href',
]
# 站点登录界面元素XPATH
_SITE_LOGIN_XPATH = {
"username": [
'//input[@name="username"]',
'//input[@id="form_item_username"]',
'//input[@id="username"]'
],
"password": [
'//input[@name="password"]',
'//input[@id="form_item_password"]',
'//input[@id="password"]'
],
"captcha": [
'//input[@name="imagestring"]',
'//input[@name="captcha"]',
'//input[@id="form_item_captcha"]'
],
"captcha_img": [
'//img[@alt="CAPTCHA"]/@src',
'//img[@alt="SECURITY CODE"]/@src',
'//img[@id="LAY-user-get-vercode"]/@src',
'//img[contains(@src,"/api/getCaptcha")]/@src'
],
"submit": [
'//input[@type="submit"]',
'//button[@type="submit"]',
'//button[@lay-filter="login"]',
'//button[@lay-filter="formLogin"]',
'//input[@type="button"][@value="登录"]'
],
"error": [
"//table[@class='main']//td[@class='text']/text()"
],
"twostep": [
'//input[@name="two_step_code"]',
'//input[@name="2fa_secret"]'
]
}
def __init__(self):
self.init_config()
def init_config(self):
self.user = ProUser()
def get_checkin_conf(self):
return self._SITE_CHECKIN_XPATH
def get_subtitle_conf(self):
return self._SITE_SUBTITLE_XPATH
def get_login_conf(self):
return self._SITE_LOGIN_XPATH
def get_grap_conf(self, url=None):
if not url:
return self.user.get_brush_conf()
for k, v in self.user.get_brush_conf().items():
if StringUtils.url_equal(k, url):
return v
return {}
def check_torrent_attr(self, torrent_url, cookie, apikey, site_url, ua=None, proxy=False):
"""
检验种子是否免费,当前做种人数
:param torrent_url: 种子的详情页面
:param site_url: 站点地址
:param cookie: 站点的Cookie
:param apikey: 站点的Apikey
:param ua: 站点的ua
:param proxy: 是否使用代理
:return: 种子属性,包含FREE 2XFREE HR PEER_COUNT等属性
"""
torrent_id = None
ret_attr = {
"free": False,
"2xfree": False,
"hr": False,
"peer_count": 0
}
if 'm-team' in torrent_url:
split_url = urlsplit(torrent_url)
base_url = f"{split_url.scheme}://{split_url.netloc}"
detail_url = f"{base_url}/api/torrent/detail"
res = re.findall(r'\d+', torrent_url)
torrent_id = res[0]
json_text = self.__get_site_page_html(url=detail_url,
site_url=site_url,
cookie=cookie,
apikey=apikey,
ua=ua,
proxy=proxy,
param=torrent_id)
json_data = json.loads(json_text)
if json_data['code'] != "0":
return ret_attr
discount = json_data['data']['status']['discount']
seeders = json_data['data']['status']['seeders']
# mt最高只有FREE,没有2xFREE和HR
if discount == 'FREE':
ret_attr["free"] = True
ret_attr['peer_count'] = int(seeders)
else:
if not torrent_url:
return ret_attr
xpath_strs = self.get_grap_conf(torrent_url)
if not xpath_strs:
return ret_attr
html_text = self.__get_site_page_html(url=torrent_url, cookie=cookie,
ua=ua,
render=xpath_strs.get('RENDER'),
proxy=proxy)
if not html_text:
return ret_attr
try:
html = etree.HTML(html_text)
# 检测2XFREE
for xpath_str in xpath_strs.get("2XFREE"):
if html.xpath(xpath_str):
ret_attr["free"] = True
ret_attr["2xfree"] = True
# 检测FREE
for xpath_str in xpath_strs.get("FREE"):
if html.xpath(xpath_str):
ret_attr["free"] = True
# 检测HR
for xpath_str in xpath_strs.get("HR"):
if html.xpath(xpath_str):
ret_attr["hr"] = True
# 检测PEER_COUNT当前做种人数
for xpath_str in xpath_strs.get("PEER_COUNT"):
peer_count_dom = html.xpath(xpath_str)
if peer_count_dom:
peer_count_str = ''.join(peer_count_dom[0].itertext())
peer_count_digit_str = ""
for m in peer_count_str:
if m.isdigit():
peer_count_digit_str = peer_count_digit_str + m
if m == " ":
break
ret_attr["peer_count"] = int(peer_count_digit_str) if len(peer_count_digit_str) > 0 else 0
except Exception as err:
ExceptionUtils.exception_traceback(err)
# 随机休眼后再返回
time.sleep(round(random.uniform(1, 5), 1))
return ret_attr
@staticmethod
@lru_cache(maxsize=128)
def __get_site_page_html(url, cookie, ua, apikey=None, site_url=None, render=False, proxy=False, param=None):
chrome = ChromeHelper(headless=True)
if render and chrome.get_status():
# 开渲染
if chrome.visit(url=url, cookie=cookie, ua=ua, proxy=proxy):
# 等待页面加载完成
time.sleep(10)
return chrome.get_html()
elif 'm-team' in site_url:
_site_info = {
"cookie": cookie,
"ua": ua,
"proxy": proxy,
"strict_url": site_url
}
_mt = MtFunc(_site_info)
return _mt.get_torrent_detail(param)
else:
res = RequestUtils(headers=ua, cookies=cookie, apikey=apikey,
proxies=Config().get_proxies() if proxy else None).get_res(url=url)
if res and res.status_code == 200:
res.encoding = res.apparent_encoding
return res.text
return ""
| 8,012 | Python | .py | 199 | 25.346734 | 114 | 0.480419 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,019 | mt.py | demigody_nas-tools/app/sites/mt.py | import base64
import json
from app.media.tmdbv3api.tmdb import logger
from app.utils import RequestUtils
from config import Config
class MtFunc(object):
signin_url = "%s/api/member/updateLastBrowse"
api_key_url = "%s/api/apikey/getKeyList"
download_url = "%s/api/torrent/genDlToken"
torrent_detail_url = "%s/api/torrent/detail"
_site_name = None
_site_api_key = None
_site_cookie = None
_site_url = None
_site_ua = None
_site_proxy = None
def __init__(self, site_info):
self._site_ua = site_info.get("ua") or Config().get_ua()
self._site_api_key = site_info.get("apikey")
self._site_cookie = site_info.get("cookie")
self._site_proxy = site_info.get("proxy") or Config().get_proxies()
self._site_url = site_info.get('strict_url')
def signin(self):
res = (RequestUtils(headers=self._site_ua, authorization=self._site_cookie
, proxies=self._site_proxy)
.post_res(url=self.signin_url % self._site_url))
if res.json()["code"] == "0":
return True
else:
return False
def update_api_key(self) -> tuple[bool, str]:
"""
获取ApiKey
"""
try:
res = RequestUtils(headers={
"Content-Type": "application/json",
"User-Agent": f"{self._site_ua}"
}, cookies=self._site_cookie, proxies=self._site_proxy, timeout=15, authorization=self._site_cookie,
referer=f"{self._site_url}/usercp?tab=laboratory").post_res(url=self.api_key_url % self._site_url)
if res and res.status_code == 200:
api_keys = res.json().get('data')
if api_keys:
logger.info(f"{self._site_name} 获取ApiKey成功")
# 按lastModifiedDate倒序排序
api_keys.sort(key=lambda x: x.get('lastModifiedDate'), reverse=True)
self._site_api_key = api_keys[0].get('apiKey')
else:
__err_msg=res.json().get('message')
if __err_msg:
logger.warn(f"{self._site_name} {__err_msg}")
return False, "{__err_msg}"
else:
logger.warn(f"{self._site_name} 获取ApiKey失败,请先在`控制台`->`实验室`建立存取令牌")
return False, "获取ApiKey失败,请先在`控制台`->`实验室`建立存取令牌"
else:
logger.warn(f"{self._site_name} 获取ApiKey失败,请检查Cookie是否有效")
return False, "获取ApiKey失败,请检查Cookie是否有效"
except Exception as e:
logger.error(f"{self._site_name} 获取ApiKey出错:{e}")
return False, "获取ApiKey出错"
return True, self._site_api_key
def get_download_url(self, torrent_id: str) -> str:
"""
获取下载链接,返回base64编码的json字符串及URL
"""
url = self.download_url % self._site_url
params = {
'method': 'post',
'cookie': False,
'params': {
'id': torrent_id
},
'header': {
'Content-Type': 'application/json',
'User-Agent': f'{self._site_ua}',
'Accept': 'application/json, text/plain, */*',
'x-api-key': self._site_api_key
},
'result': 'data'
}
# base64编码
base64_str = base64.b64encode(json.dumps(params).encode('utf-8')).decode('utf-8')
return f"[{base64_str}]{url}"
def get_torrent_detail(self, torrent_id: str) -> str:
"""
获取下载链接,返回base64编码的json字符串及URL
"""
url = self.torrent_detail_url % self._site_url
param = {'id': torrent_id}
res = RequestUtils(
authorization=self._site_cookie,
apikey=self._site_api_key,
ua=self._site_ua,
proxies=self._site_proxy
).post_res(url=url, data=param)
if res and res.status_code == 200:
res.encoding = res.apparent_encoding
return res.text
| 4,298 | Python | .py | 99 | 29.373737 | 114 | 0.5378 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,020 | site_userinfo.py | demigody_nas-tools/app/sites/site_userinfo.py | import json
from datetime import datetime
from multiprocessing.dummy import Pool as ThreadPool
from threading import Lock
import re
import json
from urllib.parse import urlparse, urlunparse
import requests
import log
from app.helper import ChromeHelper, SubmoduleHelper, DbHelper
from app.message import Message
from app.sites.sites import Sites
from app.sites.siteuserinfo.mt import MtUserInfo
from app.utils import RequestUtils, ExceptionUtils, StringUtils
from app.utils.commons import singleton
from config import Config
lock = Lock()
@singleton
class SiteUserInfo(object):
sites = None
dbhelper = None
message = None
_MAX_CONCURRENCY = 10
_last_update_time = None
_sites_data = {}
def __init__(self):
# 加载模块
self._site_schema = SubmoduleHelper.import_submodules('app.sites.siteuserinfo',
filter_func=lambda _, obj: hasattr(obj, 'schema'))
self._site_schema.sort(key=lambda x: x.order)
log.debug(f"【Sites】加载站点解析:{self._site_schema}")
self.init_config()
def init_config(self):
self.sites = Sites()
self.dbhelper = DbHelper()
self.message = Message()
# 站点上一次更新时间
self._last_update_time = None
# 站点数据
self._sites_data = {}
def __build_class(self, html_text):
for site_schema in self._site_schema:
try:
if site_schema.match(html_text):
return site_schema
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
def build(self, url, site_id, site_name,
site_cookie=None, ua=None, emulate=None, proxy=False):
if not site_cookie:
return None
session = requests.Session()
log.debug(f"【Sites】站点 {site_name} url={url} site_cookie={site_cookie} ua={ua}")
# 站点流控
if self.sites.check_ratelimit(site_id):
return
# 检测环境,有浏览器内核的优先使用仿真签到
chrome = ChromeHelper()
if emulate and chrome.get_status():
if not chrome.visit(url=url, ua=ua, cookie=site_cookie, proxy=proxy):
log.error("【Sites】%s 无法打开网站" % site_name)
return None
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
log.error("【Sites】%s 跳转站点失败" % site_name)
return None
# 判断是否已签到
html_text = chrome.get_html()
else:
proxies = Config().get_proxies() if proxy else None
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxies, session=session).get_res(url=url)
if res and res.status_code == 200:
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
# 第一次登录反爬
if html_text.find("title") == -1:
i = html_text.find("window.location")
if i == -1:
return None
tmp_url = url + html_text[i:html_text.find(";")] \
.replace("\"", "").replace("+", "").replace(" ", "").replace("window.location=", "")
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxies,
session=session).get_res(url=tmp_url)
if res and res.status_code == 200:
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
else:
log.error("【Sites】站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code))
return None
# 兼容假首页情况,假首页通常没有 <link rel="search" 属性
if '"search"' not in html_text and '"csrf-token"' not in html_text:
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxies,
session=session).get_res(url=url + "/index.php")
if res and res.status_code == 200:
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
elif res is not None:
log.error(f"【Sites】站点 {site_name} 连接失败,状态码:{res.status_code}")
return None
else:
log.error(f"【Sites】站点 {site_name} 无法访问:{url}")
return None
# 解析站点类型
site_schema = self.__build_class(html_text)
if not site_schema:
log.error("【Sites】站点 %s 无法识别站点类型" % site_name)
return None
parsed_url = urlparse(url)
if parsed_url.netloc:
site_domain_url = urlunparse((parsed_url.scheme, parsed_url.netloc, "", "", "", ""))
return site_schema(site_name, site_domain_url, site_cookie, html_text, session=session, ua=ua, emulate=emulate,
proxy=proxy)
def __refresh_site_data(self, site_info):
"""
更新单个site 数据信息
:param site_info:
:return:
"""
site_id = site_info.get("id")
site_name = site_info.get("name")
site_url = site_info.get("strict_url")
original_site_url = site_url
if not site_url:
return
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
unread_msg_notify = site_info.get("unread_msg_notify")
chrome = site_info.get("chrome")
proxy = site_info.get("proxy")
need_goto_user_detail_fetch, user_detail_pattern = self.sites.need_goto_user_detail_fetch(site_id=site_id)
if need_goto_user_detail_fetch:
proxies = Config().get_proxies() if proxy else None
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxies,
session=requests.Session()).get_res(url=site_url)
if res and res.status_code == 200:
try:
matches = re.findall(user_detail_pattern, res.text)
if matches:
site_url = site_url.rstrip("/") + f"/userdetails.php?id={matches[0]}"
except requests.exceptions.RequestException as e:
pass
try:
if 'm-team' in original_site_url:
mt_user_info = MtUserInfo()
site_user_info = mt_user_info.get_site_user_info(site_info)
else:
site_user_info = self.build(url=site_url,
site_id=site_id,
site_name=site_name,
site_cookie=site_cookie,
ua=ua,
emulate=chrome,
proxy=proxy)
if site_user_info:
if 'm-team' in original_site_url:
log.debug(f"【Sites】站点 {site_name} 数据获取完成")
else:
log.debug(f"【Sites】站点 {site_name} 开始以 {site_user_info.site_schema()} 模型解析")
# 开始解析
site_user_info.parse()
log.debug(f"【Sites】站点 {site_name} 解析完成")
# 获取不到数据时,仅返回错误信息,不做历史数据更新
if site_user_info.err_msg:
self._sites_data.update({site_name: {"err_msg": site_user_info.err_msg}})
return
# 发送通知,存在未读消息
self.__notify_unread_msg(site_name, site_user_info, unread_msg_notify)
_updated_sites_data = {
site_name: {
"upload": site_user_info.upload,
"username": site_user_info.username,
"user_level": site_user_info.user_level,
"join_at": site_user_info.join_at,
"download": site_user_info.download,
"ratio": site_user_info.ratio,
"seeding": site_user_info.seeding,
"seeding_size": site_user_info.seeding_size,
"leeching": site_user_info.leeching,
"bonus": site_user_info.bonus,
"url": original_site_url,
"err_msg": site_user_info.err_msg,
"message_unread": site_user_info.message_unread
}
}
_updated_sites_json = json.dumps(_updated_sites_data, indent=4)
log.debug(f"【Sites】站点 {site_name} 数据:{_updated_sites_json}")
self._sites_data.update(_updated_sites_data)
log.info(f"【Sites】站点 {site_name} 数据更新成功")
return site_user_info
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【Sites】站点 {site_name} 获取流量数据失败:{str(e)}")
def __notify_unread_msg(self, site_name, site_user_info, unread_msg_notify):
if site_user_info.message_unread <= 0 or not unread_msg_notify:
return
if self._sites_data.get(site_name, {}).get('message_unread') == site_user_info.message_unread:
return
# 解析出内容,则发送内容
if len(site_user_info.message_unread_contents) > 0:
for head, date, content in site_user_info.message_unread_contents:
msg_title = f"【站点 {site_user_info.site_name} 消息】"
msg_text = f"时间:{date}\n标题:{head}\n内容:\n{content}"
self.message.send_site_message(title=msg_title, text=msg_text)
else:
self.message.send_site_message(
title=f"站点 {site_user_info.site_name} 收到 {site_user_info.message_unread} 条新消息,请登陆查看")
def refresh_site_data_now(self):
"""
强制刷新站点数据
"""
self.__refresh_all_site_data(force=True)
# 刷完发送消息
string_list = []
# 增量数据
incUploads = 0
incDownloads = 0
_, _, site, upload, download = SiteUserInfo().get_pt_site_statistics_history(2)
# 按照上传降序排序
data_list = list(zip(site, upload, download))
data_list = sorted(data_list, key=lambda x: x[1], reverse=True)
for data in data_list:
site = data[0]
upload = int(data[1])
download = int(data[2])
if upload > 0 or download > 0:
incUploads += int(upload)
incDownloads += int(download)
string_list.append(f"【{site}】\n"
f"上传量:{StringUtils.str_filesize(upload)}\n"
f"下载量:{StringUtils.str_filesize(download)}\n"
f"\n————————————")
if incDownloads or incUploads:
string_list.insert(0, f"【今日汇总】\n"
f"总上传:{StringUtils.str_filesize(incUploads)}\n"
f"总下载:{StringUtils.str_filesize(incDownloads)}\n"
f"\n————————————")
self.message.send_user_statistics_message(string_list)
def get_site_data(self, specify_sites=None, force=False):
"""
获取站点上传下载量
"""
self.__refresh_all_site_data(force=force, specify_sites=specify_sites)
return self._sites_data
def __refresh_all_site_data(self, force=False, specify_sites=None):
"""
多线程刷新站点下载上传量,默认间隔6小时
"""
if not self.sites.get_sites():
return
with lock:
if not force \
and not specify_sites \
and self._last_update_time:
return
if specify_sites \
and not isinstance(specify_sites, list):
specify_sites = [specify_sites]
# 没有指定站点,默认使用全部站点
if not specify_sites:
refresh_sites = self.sites.get_sites(statistic=True)
else:
refresh_sites = [site for site in self.sites.get_sites(statistic=True) if
site.get("name") in specify_sites]
if not refresh_sites:
return
# 并发刷新
with ThreadPool(min(len(refresh_sites), self._MAX_CONCURRENCY)) as p:
site_user_infos = p.map(self.__refresh_site_data, refresh_sites)
site_user_infos = [info for info in site_user_infos if info]
# 登记历史数据
self.dbhelper.insert_site_statistics_history(site_user_infos)
# 实时用户数据
self.dbhelper.update_site_user_statistics(site_user_infos)
# 更新站点图标
self.dbhelper.update_site_favicon(site_user_infos)
# 实时做种信息
self.dbhelper.update_site_seed_info(site_user_infos)
# 站点图标重新加载
self.sites.init_favicons()
# 更新时间
self._last_update_time = datetime.now()
def get_pt_site_statistics_history(self, days=7, end_day=None):
"""
获取站点上传下载量
"""
site_urls = []
for site in self.sites.get_sites(statistic=True):
site_url = site.get("strict_url")
if site_url:
site_urls.append(site_url)
return self.dbhelper.get_site_statistics_recent_sites(days=days, end_day=end_day, strict_urls=site_urls)
def get_site_user_statistics(self, sites=None, encoding="RAW"):
"""
获取站点用户数据
:param sites: 站点名称
:param encoding: RAW/DICT
:return:
"""
statistic_sites = self.sites.get_sites(statistic=True)
if not sites:
site_urls = [site.get("strict_url") for site in statistic_sites]
else:
site_urls = [site.get("strict_url") for site in statistic_sites
if site.get("name") in sites]
raw_statistics = self.dbhelper.get_site_user_statistics(strict_urls=site_urls)
if encoding == "RAW":
return raw_statistics
return self.__todict(raw_statistics)
def get_pt_site_activity_history(self, site, days=365 * 2):
"""
查询站点 上传,下载,做种数据
:param site: 站点名称
:param days: 最大数据量
:return:
"""
site_activities = [["time", "upload", "download", "bonus", "seeding", "seeding_size"]]
sql_site_activities = self.dbhelper.get_site_statistics_history(site=site, days=days)
for sql_site_activity in sql_site_activities:
timestamp = datetime.strptime(sql_site_activity.DATE, '%Y-%m-%d').timestamp() * 1000
site_activities.append(
[timestamp,
sql_site_activity.UPLOAD,
sql_site_activity.DOWNLOAD,
sql_site_activity.BONUS,
sql_site_activity.SEEDING,
sql_site_activity.SEEDING_SIZE])
return site_activities
def get_pt_site_seeding_info(self, site):
"""
查询站点 做种分布信息
:param site: 站点名称
:return: seeding_info:[uploader_num, seeding_size]
"""
site_seeding_info = {"seeding_info": []}
seeding_info = self.dbhelper.get_site_seeding_info(site=site)
if not seeding_info:
return site_seeding_info
site_seeding_info["seeding_info"] = json.loads(seeding_info[0])
return site_seeding_info
def get_pt_site_min_join_date(self, sites=None):
"""
查询站点加入时间
"""
statistics = self.get_site_user_statistics(sites=sites, encoding="DICT")
if not statistics:
return ""
dates = []
for s in statistics:
if s.get("join_at"):
try:
dates.append(datetime.strptime(s.get("join_at"), '%Y-%m-%d %H:%M:%S'))
except Exception as err:
print(str(err))
pass
if dates:
return min(dates).strftime("%Y-%m-%d")
return ""
@staticmethod
def __todict(raw_statistics):
statistics = []
for site in raw_statistics:
statistics.append({"site": site.SITE,
"username": site.USERNAME,
"user_level": site.USER_LEVEL,
"join_at": site.JOIN_AT,
"update_at": site.UPDATE_AT,
"upload": site.UPLOAD,
"download": site.DOWNLOAD,
"ratio": site.RATIO,
"seeding": site.SEEDING,
"leeching": site.LEECHING,
"seeding_size": site.SEEDING_SIZE,
"bonus": site.BONUS,
"url": site.URL,
"msg_unread": site.MSG_UNREAD
})
return statistics
def update_site_name(self, old_name, name):
"""
更新站点数据中的站点名称
"""
self.dbhelper.update_site_user_statistics_site_name(name, old_name)
self.dbhelper.update_site_seed_info_site_name(name, old_name)
self.dbhelper.update_site_statistics_site_name(name, old_name)
return True
def is_min_join_date_beyond_one_month(self):
"""
查询最早加入PT站的时间是否超过一个月
"""
# 读取强制刷流配置
_force_enable_brush = Config().get_config("pt").get("force_enable_brush")
if _force_enable_brush:
return True
first_pt_site = self.get_pt_site_min_join_date()
if not first_pt_site or not StringUtils.is_one_month_ago(first_pt_site):
return False
else:
return True
| 19,565 | Python | .py | 413 | 29.917676 | 119 | 0.521437 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,021 | site_subtitle.py | demigody_nas-tools/app/sites/site_subtitle.py | import os
import shutil
from lxml import etree
import log
from app.sites.sites import Sites
from app.sites.siteconf import SiteConf
from app.helper import SiteHelper
from app.utils import RequestUtils, StringUtils, PathUtils, ExceptionUtils
from config import Config, RMT_SUBEXT
class SiteSubtitle:
siteconf = None
sites = None
_save_tmp_path = None
def __init__(self):
self.siteconf = SiteConf()
self.sites = Sites()
self._save_tmp_path = Config().get_temp_path()
if not os.path.exists(self._save_tmp_path):
os.makedirs(self._save_tmp_path, exist_ok=True)
def download(self, media_info, site_id, cookie, ua, download_dir):
"""
从站点下载字幕文件,并保存到本地
"""
if not media_info.page_url:
return
# 字幕下载目录
log.info("【Sites】开始从站点下载字幕:%s" % media_info.page_url)
if not download_dir:
log.warn("【Sites】未找到字幕下载目录")
return
# 站点流控
if self.sites.check_ratelimit(site_id):
return
# 读取网站代码
request = RequestUtils(headers=ua, cookies=cookie)
res = request.get_res(media_info.page_url)
if res and res.status_code == 200:
if not res.text:
log.warn(f"【Sites】读取页面代码失败:{media_info.page_url}")
return
html = etree.HTML(res.text)
sublink_list = []
for xpath in self.siteconf.get_subtitle_conf():
sublinks = html.xpath(xpath)
if sublinks:
for sublink in sublinks:
if not sublink:
continue
if not sublink.startswith("http"):
base_url = StringUtils.get_base_url(media_info.page_url)
if sublink.startswith("/"):
sublink = "%s%s" % (base_url, sublink)
else:
sublink = "%s/%s" % (base_url, sublink)
sublink_list.append(sublink)
# 下载所有字幕文件
for sublink in sublink_list:
log.info(f"【Sites】找到字幕下载链接:{sublink},开始下载...")
# 下载
ret = request.get_res(sublink)
if ret and ret.status_code == 200:
# 创建目录
if not os.path.exists(download_dir):
os.makedirs(download_dir, exist_ok=True)
# 保存ZIP
file_name = SiteHelper.get_url_subtitle_name(ret.headers.get('content-disposition'), sublink)
if not file_name:
log.warn(f"【Sites】链接不是字幕文件:{sublink}")
continue
if file_name.lower().endswith(".zip"):
# ZIP包
zip_file = os.path.join(self._save_tmp_path, file_name)
# 解压路径
zip_path = os.path.splitext(zip_file)[0]
with open(zip_file, 'wb') as f:
f.write(ret.content)
# 解压文件
shutil.unpack_archive(zip_file, zip_path, format='zip')
# 遍历转移文件
for sub_file in PathUtils.get_dir_files(in_path=zip_path, exts=RMT_SUBEXT):
target_sub_file = os.path.join(download_dir,
os.path.splitext(os.path.basename(sub_file))[0])
log.info(f"【Sites】转移字幕 {sub_file} 到 {target_sub_file}")
SiteHelper.transfer_subtitle(sub_file, target_sub_file)
# 删除临时文件
try:
shutil.rmtree(zip_path)
os.remove(zip_file)
except Exception as err:
ExceptionUtils.exception_traceback(err)
else:
sub_file = os.path.join(self._save_tmp_path, file_name)
# 保存
with open(sub_file, 'wb') as f:
f.write(ret.content)
target_sub_file = os.path.join(download_dir,
os.path.splitext(os.path.basename(sub_file))[0])
log.info(f"【Sites】转移字幕 {sub_file} 到 {target_sub_file}")
SiteHelper.transfer_subtitle(sub_file, target_sub_file)
else:
log.error(f"【Sites】下载字幕文件失败:{sublink}")
continue
if sublink_list:
log.info(f"【Sites】{media_info.page_url} 页面字幕下载完成")
else:
log.warn(f"【Sites】{media_info.page_url} 页面未找到字幕下载链接")
elif res is not None:
log.warn(f"【Sites】连接 {media_info.page_url} 失败,状态码:{res.status_code}")
else:
log.warn(f"【Sites】无法打开链接:{media_info.page_url}")
| 5,466 | Python | .py | 110 | 28.436364 | 113 | 0.483623 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,022 | ipt_project.py | demigody_nas-tools/app/sites/siteuserinfo/ipt_project.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class IptSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.Ipt
order = SITE_BASE_ORDER + 35
@classmethod
def match(cls, html_text):
return 'IPTorrents' in html_text
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
tmps = html.xpath('//a[contains(@href, "/u/")]//text()')
tmps_id = html.xpath('//a[contains(@href, "/u/")]/@href')
if tmps:
self.username = str(tmps[-1])
if tmps_id:
user_id_match = re.search(r"/u/(\d+)", tmps_id[0])
if user_id_match and user_id_match.group().strip():
self.userid = user_id_match.group(1)
self._user_detail_page = f"user.php?u={self.userid}"
self._torrent_seeding_page = f"peers?u={self.userid}"
tmps = html.xpath('//div[@class = "stats"]/div/div')
if tmps:
self.upload = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[1]).strip())
self.download = StringUtils.num_filesize(str(tmps[0].xpath('span/text()')[2]).strip())
self.seeding = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[0])
self.leeching = StringUtils.str_int(tmps[0].xpath('a')[2].xpath('text()')[1])
self.ratio = StringUtils.str_float(str(tmps[0].xpath('span/text()')[0]).strip().replace('-', '0'))
self.bonus = StringUtils.str_float(tmps[0].xpath('a')[3].xpath('text()')[0])
def _parse_site_page(self, html_text):
# TODO
pass
def _parse_user_detail_info(self, html_text):
html = etree.HTML(html_text)
if not html:
return
user_levels_text = html.xpath('//tr/th[text()="Class"]/following-sibling::td[1]/text()')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
# 加入日期
join_at_text = html.xpath('//tr/th[text()="Join date"]/following-sibling::td[1]/text()')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0])
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
html = etree.HTML(html_text)
if not html:
return
# seeding start
seeding_end_pos = 3
if html.xpath('//tr/td[text() = "Leechers"]'):
seeding_end_pos = len(html.xpath('//tr/td[text() = "Leechers"]/../preceding-sibling::tr')) + 1
seeding_end_pos = seeding_end_pos - 3
page_seeding = 0
page_seeding_size = 0
seeding_torrents = html.xpath('//tr/td[text() = "Seeders"]/../following-sibling::tr/td[position()=6]/text()')
if seeding_torrents:
page_seeding = seeding_end_pos
for per_size in seeding_torrents[:seeding_end_pos]:
if '(' in per_size and ')' in per_size:
per_size = per_size.split('(')[-1]
per_size = per_size.split(')')[0]
page_seeding_size += StringUtils.num_filesize(per_size)
self.seeding = page_seeding
self.seeding_size = page_seeding_size
def _parse_user_traffic_info(self, html_text):
# TODO
pass
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 3,640 | Python | .py | 75 | 38.76 | 117 | 0.587454 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,023 | nexus_project.py | demigody_nas-tools/app/sites/siteuserinfo/nexus_project.py | # -*- coding: utf-8 -*-
import re
from app.sites.siteuserinfo._base import SITE_BASE_ORDER
from app.sites.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
from app.utils.types import SiteSchema
class NexusProjectSiteUserInfo(NexusPhpSiteUserInfo):
schema = SiteSchema.NexusProject
order = SITE_BASE_ORDER + 25
@classmethod
def match(cls, html_text):
return 'Nexus Project' in html_text
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = user_detail.group(1)
self._torrent_seeding_page = f"viewusertorrents.php?id={self.userid}&show=seeding"
| 861 | Python | .py | 18 | 41.777778 | 90 | 0.703349 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,024 | nexus_rabbit.py | demigody_nas-tools/app/sites/siteuserinfo/nexus_rabbit.py | # -*- coding: utf-8 -*-
import json
from lxml import etree
from app.sites.siteuserinfo._base import SITE_BASE_ORDER
from app.sites.siteuserinfo.nexus_php import NexusPhpSiteUserInfo
from app.utils.exception_utils import ExceptionUtils
from app.utils.types import SiteSchema
class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo):
schema = SiteSchema.NexusRabbit
order = SITE_BASE_ORDER + 5
@classmethod
def match(cls, html_text):
html = etree.HTML(html_text)
if not html:
return False
printable_text = html.xpath("string(.)") if html else ""
return 'Style by Rabbit' in printable_text
def _parse_site_page(self, html_text):
super()._parse_site_page(html_text)
self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}"
self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"}
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
try:
torrents = json.loads(html_text).get('data')
except Exception as e:
ExceptionUtils.exception_traceback(e)
return
page_seeding_size = 0
page_seeding_info = []
page_seeding = len(torrents)
for torrent in torrents:
seeders = int(torrent.get('seeders', 0))
size = int(torrent.get('size', 0))
page_seeding_size += int(torrent.get('size', 0))
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
| 1,844 | Python | .py | 44 | 33.159091 | 118 | 0.649573 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,025 | unit3d.py | demigody_nas-tools/app/sites/siteuserinfo/unit3d.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class Unit3dSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.Unit3d
order = SITE_BASE_ORDER + 15
@classmethod
def match(cls, html_text):
return "unit3d.js" in html_text
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
tmps = html.xpath('//a[contains(@href, "/users/") and contains(@href, "settings")]/@href')
if tmps:
user_name_match = re.search(r"/users/(.+)/settings", tmps[0])
if user_name_match and user_name_match.group().strip():
self.username = user_name_match.group(1)
self._torrent_seeding_page = f"/users/{self.username}/active?perPage=100&client=&seeding=include"
self._user_detail_page = f"/users/{self.username}"
tmps = html.xpath('//a[contains(@href, "bonus/earnings")]')
if tmps:
bonus_text = tmps[0].xpath("string(.)")
bonus_match = re.search(r"([\d,.]+)", bonus_text)
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
def _parse_site_page(self, html_text):
# TODO
pass
def _parse_user_detail_info(self, html_text):
"""
解析用户额外信息,加入时间,等级
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return None
# 用户等级
user_levels_text = html.xpath('//div[contains(@class, "content")]//span[contains(@class, "badge-user")]/text()')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
# 加入日期
join_at_text = html.xpath('//div[contains(@class, "content")]//h4[contains(text(), "注册日期") '
'or contains(text(), "註冊日期") '
'or contains(text(), "Registration date")]/text()')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(
join_at_text[0].replace('注册日期', '').replace('註冊日期', '').replace('Registration date', ''))
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 9
seeders_col = 2
# 搜索size列
if html.xpath('//thead//th[contains(@class,"size")]'):
size_col = len(html.xpath('//thead//th[contains(@class,"size")][1]/preceding-sibling::th')) + 1
# 搜索seeders列
if html.xpath('//thead//th[contains(@class,"seeders")]'):
seeders_col = len(html.xpath('//thead//th[contains(@class,"seeders")]/preceding-sibling::th')) + 1
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//tr[position()]/td[{size_col}]')
seeding_seeders = html.xpath(f'//tr[position()]/td[{seeders_col}]')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li')
if next_pages and len(next_pages) > 1:
page_num = next_pages[0].xpath("string(.)").strip()
if page_num.isdigit():
next_page = f"{self._torrent_seeding_page}&page={page_num}"
return next_page
def _parse_user_traffic_info(self, html_text):
html_text = self._prepare_html_text(html_text)
upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (
ratio_match and ratio_match.group(1).strip()) else 0.0
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 5,476 | Python | .py | 107 | 38.878505 | 120 | 0.57798 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,026 | file_list.py | demigody_nas-tools/app/sites/siteuserinfo/file_list.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class FileListSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.FileList
order = SITE_BASE_ORDER + 50
@classmethod
def match(cls, html_text):
html = etree.HTML(html_text)
if not html:
return False
printable_text = html.xpath("string(.)") if html else ""
return 'Powered by FileList' in printable_text
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = user_detail.group(1)
self._torrent_seeding_page = f"snatchlist.php?id={self.userid}&action=torrents&type=seeding"
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
if ret:
self.username = str(ret[0])
def _parse_user_traffic_info(self, html_text):
"""
上传/下载/分享率 [做种数/魔力值]
:param html_text:
:return:
"""
return
def _parse_user_detail_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
upload_html = html.xpath('//table//tr/td[text()="Uploaded"]/following-sibling::td//text()')
if upload_html:
self.upload = StringUtils.num_filesize(upload_html[0])
download_html = html.xpath('//table//tr/td[text()="Downloaded"]/following-sibling::td//text()')
if download_html:
self.download = StringUtils.num_filesize(download_html[0])
self.ratio = 0 if self.download == 0 else self.upload / self.download
user_level_html = html.xpath('//table//tr/td[text()="Class"]/following-sibling::td//text()')
if user_level_html:
self.user_level = user_level_html[0].strip()
join_at_html = html.xpath('//table//tr/td[contains(text(), "Join")]/following-sibling::td//text()')
if join_at_html:
self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip())
bonus_html = html.xpath('//a[contains(@href, "shop.php")]')
if bonus_html:
self.bonus = StringUtils.str_float(bonus_html[0].xpath("string(.)").strip())
pass
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 6
seeders_col = 7
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//table/tr[position()>1]/td[{size_col}]')
seeding_seeders = html.xpath(f'//table/tr[position()>1]/td[{seeders_col}]')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
return next_page
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 4,166 | Python | .py | 90 | 36.377778 | 109 | 0.613236 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,027 | tnode.py | demigody_nas-tools/app/sites/siteuserinfo/tnode.py | # -*- coding: utf-8 -*-
import json
import re
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class TNodeSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.TNode
order = SITE_BASE_ORDER + 60
tnNodeLimitPageSize = 100
@classmethod
def match(cls, html_text):
return 'Powered By TNode' in html_text
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
# <meta name="x-csrf-token" content="fd169876a7b4846f3a7a16fcd5cccf8d">
csrf_token = re.search(r'<meta name="x-csrf-token" content="(.+?)">', html_text)
if csrf_token:
self._addition_headers = {'X-CSRF-TOKEN': csrf_token.group(1)}
self._user_detail_page = "api/user/getMainInfo"
self._torrent_seeding_page = f"api/user/listTorrentActivity?id=&type=seeding&size={self.tnNodeLimitPageSize}&page=1"
def _parse_logged_in(self, html_text):
"""
判断是否登录成功, 通过判断是否存在用户信息
暂时跳过检测,待后续优化
:param html_text:
:return:
"""
return True
def _parse_user_base_info(self, html_text):
self.username = self.userid
def _parse_user_traffic_info(self, html_text):
pass
def _parse_user_detail_info(self, html_text):
detail = json.loads(html_text)
if detail.get("status") != 200:
return
user_info = detail.get("data", {})
self.userid = user_info.get("id")
self.username = user_info.get("username")
self.user_level = user_info.get("class", {}).get("name")
self.join_at = user_info.get("regTime", 0)
self.join_at = StringUtils.unify_datetime_str(str(self.join_at))
self.upload = user_info.get("upload")
self.download = user_info.get("download")
self.ratio = 0 if self.download <= 0 else round(self.upload / self.download, 3)
self.bonus = user_info.get("bonus")
self.message_unread = user_info.get("unreadAdmin", 0) + user_info.get("unreadInbox", 0) + user_info.get(
"unreadSystem", 0)
pass
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
解析用户做种信息
"""
seeding_info = json.loads(html_text)
if seeding_info.get("status") != 200:
return
total = seeding_info.get("data", {}).get("total", [])
torrents = seeding_info.get("data", {}).get("torrents", [])
page_seeding_size = 0
page_seeding_info = []
for torrent in torrents:
size = torrent.get("size", 0)
seeders = torrent.get("seeding", 0)
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += len(torrents)
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
if self.seeding >= total:
# 是否存在下页数据
next_page = None
else:
next_page = f"/api/user/listTorrentActivity?id=&type=seeding&size={self.tnNodeLimitPageSize}&page={(self.seeding + self.tnNodeLimitPageSize - 1) // self.tnNodeLimitPageSize + 1}"
return next_page
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
"""
系统信息 api/message/listSystem?page=1&size=20
收件箱信息 api/message/listInbox?page=1&size=20
管理员信息 api/message/listAdmin?page=1&size=20
:param html_text:
:return:
"""
return None, None, None
| 3,782 | Python | .py | 86 | 33.918605 | 190 | 0.619893 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,028 | discuz.py | demigody_nas-tools/app/sites/siteuserinfo/discuz.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class DiscuzUserInfo(_ISiteUserInfo):
schema = SiteSchema.DiscuzX
order = SITE_BASE_ORDER + 10
@classmethod
def match(cls, html_text):
html = etree.HTML(html_text)
if not html:
return False
printable_text = html.xpath("string(.)") if html else ""
return 'Powered by Discuz!' in printable_text
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
user_info = html.xpath('//a[contains(@href, "&uid=")]')
if user_info:
user_id_match = re.search(r"&uid=(\d+)", user_info[0].attrib['href'])
if user_id_match and user_id_match.group().strip():
self.userid = user_id_match.group(1)
self._torrent_seeding_page = f"forum.php?&mod=torrents&cat_5up=on"
self._user_detail_page = user_info[0].attrib['href']
self.username = user_info[0].text.strip()
def _parse_site_page(self, html_text):
# TODO
pass
def _parse_user_detail_info(self, html_text):
"""
解析用户额外信息,加入时间,等级
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return None
# 用户等级
user_levels_text = html.xpath('//a[contains(@href, "usergroup")]/text()')
if user_levels_text:
self.user_level = user_levels_text[-1].strip()
# 加入日期
join_at_text = html.xpath('//li[em[text()="注册时间"]]/text()')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
# 分享率
ratio_text = html.xpath('//li[contains(.//text(), "分享率")]//text()')
if ratio_text:
ratio_match = re.search(r"\(([\d,.]+)\)", ratio_text[0])
if ratio_match and ratio_match.group(1).strip():
self.bonus = StringUtils.str_float(ratio_match.group(1))
# 积分
bouns_text = html.xpath('//li[em[text()="积分"]]/text()')
if bouns_text:
self.bonus = StringUtils.str_float(bouns_text[0].strip())
# 上传
upload_text = html.xpath('//li[em[contains(text(),"上传量")]]/text()')
if upload_text:
self.upload = StringUtils.num_filesize(upload_text[0].strip().split('/')[-1])
# 下载
download_text = html.xpath('//li[em[contains(text(),"下载量")]]/text()')
if download_text:
self.download = StringUtils.num_filesize(download_text[0].strip().split('/')[-1])
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 3
seeders_col = 4
# 搜索size列
if html.xpath('//tr[position()=1]/td[.//img[@class="size"] and .//img[@alt="size"]]'):
size_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="size"] '
'and .//img[@alt="size"]]/preceding-sibling::td')) + 1
# 搜索seeders列
if html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] and .//img[@alt="seeders"]]'):
seeders_col = len(html.xpath('//tr[position()=1]/td[.//img[@class="seeders"] '
'and .//img[@alt="seeders"]]/preceding-sibling::td')) + 1
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//tr[position()>1]/td[{size_col}]')
seeding_seeders = html.xpath(f'//tr[position()>1]/td[{seeders_col}]//text()')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i])
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
if next_page_text:
next_page = next_page_text[-1].strip()
return next_page
def _parse_user_traffic_info(self, html_text):
pass
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 5,152 | Python | .py | 112 | 34.339286 | 106 | 0.565531 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,029 | nexus_php.py | demigody_nas-tools/app/sites/siteuserinfo/nexus_php.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
import log
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.exception_utils import ExceptionUtils
from app.utils.types import SiteSchema
class NexusPhpSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.NexusPhp
order = SITE_BASE_ORDER * 2
@classmethod
def match(cls, html_text):
"""
默认使用NexusPhp解析
:param html_text:
:return:
"""
return True
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
user_detail = re.search(r"userdetails.php\?id=(\d+)", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = user_detail.group(1)
self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
else:
user_detail = re.search(r"(userdetails)", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = None
self._torrent_seeding_page = None
def _parse_message_unread(self, html_text):
"""
解析未读短消息数量
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return
message_labels = html.xpath('//a[@href="messages.php"]/..')
message_labels.extend(html.xpath('//a[contains(@href, "messages.php")]/..'))
if message_labels:
message_text = message_labels[0].xpath("string(.)")
message_unread_match = re.findall(r"[^Date](信息箱\s*|\(|你有\xa0)(\d+)", message_text)
if message_unread_match and len(message_unread_match[-1]) == 2:
self.message_unread = StringUtils.str_int(message_unread_match[-1][1])
elif message_text.isdigit():
self.message_unread = StringUtils.str_int(message_text)
def _parse_user_base_info(self, html_text):
# 合并解析,减少额外请求调用
self.__parse_user_traffic_info(html_text)
self._user_traffic_page = None
self._parse_message_unread(html_text)
html = etree.HTML(html_text)
if not html:
return
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//b//text()')
if ret:
self.username = str(ret[0])
return
ret = html.xpath(f'//a[contains(@href, "userdetails") and contains(@href, "{self.userid}")]//text()')
if ret:
self.username = str(ret[0])
ret = html.xpath('//a[contains(@href, "userdetails")]//strong//text()')
if ret:
self.username = str(ret[0])
return
def __parse_user_traffic_info(self, html_text):
html = etree.HTML(html_text)
html_text = self._prepare_html_text(html_text)
upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
if not upload_match:
upload_match = re.search(r'<span class="font-bold">上[传傳]量?[::]</span><span>([\d.]+ [A-Za-z]+)</span>', html_text)
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
if not download_match:
download_match = re.search(r'<span class="font-bold">下[载載]量?[::]</span><span>([\d.]+ [A-Za-z]+)</span>', html_text)
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
# 计算分享率
calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
# 优先使用页面上的分享率
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (ratio_match and ratio_match.group(1).strip()) else calc_ratio
if not self.ratio:
ratio_element = html.xpath('//span[@class="font-bold"][contains(text(), "分享率:")]/following-sibling::span/font/text()')
if ratio_element:
_ratio = ratio_element[0].strip() if ratio_element else "0"
self.ratio = StringUtils.str_float(_ratio) if StringUtils.str_float(_ratio) else 0.0
leeching_match = re.search(r"(Torrents leeching|下载中)[\u4E00-\u9FA5\D\s]+(\d+)[\s\S]+<", html_text)
self.leeching = StringUtils.str_int(leeching_match.group(2)) if leeching_match and leeching_match.group(
2).strip() else 0
has_ucoin, self.bonus = self.__parse_ucoin(html)
if has_ucoin:
return
tmps = html.xpath('//a[contains(@href,"mybonus")]/text()') if html else None
if tmps:
bonus_text = str(tmps[0]).strip()
bonus_match = re.search(r"([\d,.]+)", bonus_text)
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
return
bonus_match = re.search(r"mybonus.[\[\]::<>/a-zA-Z_\-=\"'\s#;.(使用魔力值豆]+\s*([\d,.]+)[<()&\s\[]", html_text)
try:
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
return
bonus_match = re.search(r"[魔力值|\]][\[\]::<>/a-zA-Z_\-=\"'\s#;]+\s*([\d,.]+|\"[\d,.]+\")[<>()&\s\[]",
html_text,
flags=re.S)
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1).strip('"'))
except Exception as err:
ExceptionUtils.exception_traceback(err)
@staticmethod
def __parse_ucoin(html):
"""
解析ucoin, 统一转换为铜币
:param html:
:return:
"""
if html:
gold, silver, copper = None, None, None
golds = html.xpath('//span[@class = "ucoin-symbol ucoin-gold"]//text()')
if golds:
gold = StringUtils.str_float(str(golds[-1]))
silvers = html.xpath('//span[@class = "ucoin-symbol ucoin-silver"]//text()')
if silvers:
silver = StringUtils.str_float(str(silvers[-1]))
coppers = html.xpath('//span[@class = "ucoin-symbol ucoin-copper"]//text()')
if coppers:
copper = StringUtils.str_float(str(coppers[-1]))
if gold or silver or copper:
gold = gold if gold else 0
silver = silver if silver else 0
copper = copper if copper else 0
return True, gold * 100 * 100 + silver * 100 + copper
return False, 0.0
def _parse_user_traffic_info(self, html_text):
"""
上传/下载/分享率 [做种数/魔力值]
:param html_text:
:return:
"""
pass
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(str(html_text).replace(r'\/', '/'))
if not html:
return None
# 首页存在扩展链接,使用扩展链接
seeding_url_text = html.xpath('//a[contains(@href,"torrents.php") '
'and contains(@href,"seeding")]/@href')
if multi_page is False and seeding_url_text and seeding_url_text[0].strip():
self._torrent_seeding_page = seeding_url_text[0].strip()
return self._torrent_seeding_page
size_col = 3
seeders_col = 4
# 搜索size列
size_col_xpath = '//tr[position()=1]/' \
'td[(img[@class="size"] and img[@alt="size"])' \
' or (text() = "大小")' \
' or (a/img[@class="size" and @alt="size"])]'
if html.xpath(size_col_xpath):
size_col = len(html.xpath(f'{size_col_xpath}/preceding-sibling::td')) + 1
# 搜索seeders列
seeders_col_xpath = '//tr[position()=1]/' \
'td[(img[@class="seeders"] and img[@alt="seeders"])' \
' or (text() = "在做种")' \
' or (a/img[@class="seeders" and @alt="seeders"])]'
if html.xpath(seeders_col_xpath):
seeders_col = len(html.xpath(f'{seeders_col_xpath}/preceding-sibling::td')) + 1
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
# 如果 table class="torrents",则增加table[@class="torrents"]
table_class = '//table[@class="torrents"]' if html.xpath('//table[@class="torrents"]') else ''
seeding_sizes = html.xpath(f'{table_class}//tr[position()>1]/td[{size_col}]')
seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]/b/a/text()')
if not seeding_seeders:
seeding_seeders = html.xpath(f'{table_class}//tr[position()>1]/td[{seeders_col}]//text()')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i])
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
if next_page_text:
next_page = next_page_text[-1].strip()
# fix up page url
if self.userid not in next_page:
next_page = f'{next_page}&userid={self.userid}&type=seeding'
return next_page
def _parse_user_detail_info(self, html_text):
"""
解析用户额外信息,加入时间,等级
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return
self.__get_user_level(html)
self.__fixup_traffic_info(html)
# 加入日期
join_at_text = html.xpath(
'//tr/td[text()="加入日期" or text()="注册日期" or *[text()="加入日期"]]/following-sibling::td[1]//text()'
'|//div/b[text()="加入日期"]/../text()|//span[text()="加入日期:"]/following-sibling::span[1]/text()')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].split(' (')[0].strip())
upload_match = re.search(r"[^总]上[传傳]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
if not upload_match:
upload_match = re.search(r'<span class="font-bold">上[传傳]量?[::]</span><span>([\d.]+ [A-Za-z]+)</span>', html_text)
self.upload = StringUtils.num_filesize(upload_match.group(1).strip()) if upload_match else 0
download_match = re.search(r"[^总子影力]下[载載]量?[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+[KMGTPI]*B)", html_text,
re.IGNORECASE)
if not download_match:
download_match = re.search(r'<span class="font-bold">下[载載]量?[::]</span><span>([\d.]+ [A-Za-z]+)</span>', html_text)
self.download = StringUtils.num_filesize(download_match.group(1).strip()) if download_match else 0
ratio_match = re.search(r"分享率[::_<>/a-zA-Z-=\"'\s#;]+([\d,.\s]+)", html_text)
# 计算分享率
calc_ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
# 优先使用页面上的分享率
self.ratio = StringUtils.str_float(ratio_match.group(1)) if (ratio_match and ratio_match.group(1).strip()) else calc_ratio
if not self.ratio or self.ratio == 0.0:
ratio_element = html.xpath('//span[@class="font-bold"][contains(text(), "分享率:")]/following-sibling::span/font/text()')
if ratio_element:
_ratio = ratio_element[0].strip() if ratio_element else "0"
self.ratio = StringUtils.str_float(_ratio) if StringUtils.str_float(_ratio) else 0.0
# 做种体积 & 做种数
# seeding 页面获取不到的话,此处再获取一次
seeding_sizes = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
'table[tr[1][td[4 and text()="尺寸"]]]//tr[position()>1]/td[4]')
seeding_seeders = html.xpath('//tr/td[text()="当前上传"]/following-sibling::td[1]//'
'table[tr[1][td[5 and text()="做种者"]]]//tr[position()>1]/td[5]//text()')
tmp_seeding = len(seeding_sizes)
tmp_seeding_size = 0
tmp_seeding_info = []
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i])
tmp_seeding_size += size
tmp_seeding_info.append([seeders, size])
if not self.seeding_size:
self.seeding_size = tmp_seeding_size
if not self.seeding:
self.seeding = tmp_seeding
if not self.seeding_info:
self.seeding_info = tmp_seeding_info
seeding_sizes = html.xpath('//tr/td[text()="做种统计"]/following-sibling::td[1]//text()')
if seeding_sizes:
seeding_match = re.search(r"总做种数:\s+(\d+)", seeding_sizes[0], re.IGNORECASE)
seeding_size_match = re.search(r"总做种体积:\s+([\d,.\s]+[KMGTPI]*B)", seeding_sizes[0], re.IGNORECASE)
tmp_seeding = StringUtils.str_int(seeding_match.group(1)) if (
seeding_match and seeding_match.group(1)) else 0
tmp_seeding_size = StringUtils.num_filesize(
seeding_size_match.group(1).strip()) if seeding_size_match else 0
if not self.seeding_size:
self.seeding_size = tmp_seeding_size
if not self.seeding:
self.seeding = tmp_seeding
self.__fixup_torrent_seeding_page(html)
def __fixup_torrent_seeding_page(self, html):
"""
修正种子页面链接
:param html:
:return:
"""
# 单独的种子页面
seeding_url_text = html.xpath('//a[contains(@href,"getusertorrentlist.php") '
'and contains(@href,"seeding")]/@href')
if seeding_url_text:
self._torrent_seeding_page = seeding_url_text[0].strip()
# 从JS调用种获取用户ID
seeding_url_text = html.xpath('//a[contains(@href, "javascript: getusertorrentlistajax") '
'and contains(@href,"seeding")]/@href')
csrf_text = html.xpath('//meta[@name="x-csrf"]/@content')
if not self._torrent_seeding_page and seeding_url_text:
user_js = re.search(r"javascript: getusertorrentlistajax\(\s*'(\d+)", seeding_url_text[0])
if user_js and user_js.group(1).strip():
self.userid = user_js.group(1).strip()
self._torrent_seeding_page = f"getusertorrentlistajax.php?userid={self.userid}&type=seeding"
elif seeding_url_text and csrf_text:
if csrf_text[0].strip():
self._torrent_seeding_page \
= f"ajax_getusertorrentlist.php"
self._torrent_seeding_params = {'userid': self.userid, 'type': 'seeding', 'csrf': csrf_text[0].strip()}
# 分类做种模式
# 临时屏蔽
# seeding_url_text = html.xpath('//tr/td[text()="当前做种"]/following-sibling::td[1]'
# '/table//td/a[contains(@href,"seeding")]/@href')
# if seeding_url_text:
# self._torrent_seeding_page = seeding_url_text
def __get_user_level(self, html):
# 等级 获取同一行等级数据,图片格式等级,取title信息,否则取文本信息
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级" or *[text()="等级"]]/'
'following-sibling::td[1]/img[1]/@title')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
return
user_levels_text = html.xpath('//span[@class="font-bold m-auto" and contains(text(), "等级:")]/following-sibling::span/b')
if user_levels_text:
user_level_element = user_levels_text[0]
if not StringUtils.is_string_and_not_empty(user_level_element.text):
return
self.user_level = user_level_element.text.strip()
log.debug(f"【Sites】站点 {self.site_name} 等级: {self.user_level}")
return
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
'following-sibling::td[1 and not(img)]'
'|//tr/td[text()="等級" or text()="等级"]/'
'following-sibling::td[1 and img[not(@title)]]')
if user_levels_text:
self.user_level = user_levels_text[0].xpath("string(.)").strip()
return
user_levels_text = html.xpath('//tr/td[text()="等級" or text()="等级"]/'
'following-sibling::td[1]')
if user_levels_text:
self.user_level = user_levels_text[0].xpath("string(.)").strip()
return
user_levels_text = html.xpath('//a[contains(@href, "userdetails")]/text()')
if not self.user_level and user_levels_text:
for user_level_text in user_levels_text:
user_level_match = re.search(r"\[(.*)]", user_level_text)
if user_level_match and user_level_match.group(1).strip():
self.user_level = user_level_match.group(1).strip()
break
def _parse_message_unread_links(self, html_text, msg_links):
html = etree.HTML(html_text)
if not html:
return None
message_links = html.xpath('//tr[not(./td/img[@alt="Read"])]/td/a[contains(@href, "viewmessage")]/@href')
msg_links.extend(message_links)
# 是否存在下页数据
next_page = None
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁")]/@href')
if next_page_text:
next_page = next_page_text[-1].strip()
return next_page
def _parse_message_content(self, html_text):
html = etree.HTML(html_text)
if not html:
return None, None, None
# 标题
message_head_text = None
message_head = html.xpath('//h1/text()'
'|//div[@class="layui-card-header"]/span[1]/text()')
if message_head:
message_head_text = message_head[-1].strip()
# 消息时间
message_date_text = None
message_date = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[2]/td[2]'
'|//div[@class="layui-card-header"]/span[2]/span[2]')
if message_date:
message_date_text = message_date[0].xpath("string(.)").strip()
# 消息内容
message_content_text = None
message_content = html.xpath('//h1/following-sibling::table[.//tr/td[@class="colhead"]]//tr[3]/td'
'|//div[contains(@class,"layui-card-body")]')
if message_content:
message_content_text = message_content[0].xpath("string(.)").strip()
return message_head_text, message_date_text, message_content_text
def __fixup_traffic_info(self, html):
# fixup bonus
if not self.bonus:
bonus_text = html.xpath('//tr/td[text()="魔力值" or text()="猫粮"]/following-sibling::td[1]/text()')
if bonus_text:
self.bonus = StringUtils.str_float(bonus_text[0].strip())
| 21,022 | Python | .py | 384 | 40.153646 | 134 | 0.552069 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,030 | small_horse.py | demigody_nas-tools/app/sites/siteuserinfo/small_horse.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class SmallHorseSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.SmallHorse
order = SITE_BASE_ORDER + 30
@classmethod
def match(cls, html_text):
return 'Small Horse' in html_text
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
user_detail = re.search(r"user.php\?id=(\d+)", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = user_detail.group(1)
self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}"
self._user_traffic_page = f"user.php?id={self.userid}"
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
ret = html.xpath('//a[contains(@href, "user.php")]//text()')
if ret:
self.username = str(ret[0])
def _parse_user_traffic_info(self, html_text):
"""
上传/下载/分享率 [做种数/魔力值]
:param html_text:
:return:
"""
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
tmps = html.xpath('//ul[@class = "stats nobullet"]')
if tmps:
if tmps[1].xpath("li") and tmps[1].xpath("li")[0].xpath("span//text()"):
self.join_at = StringUtils.unify_datetime_str(tmps[1].xpath("li")[0].xpath("span//text()")[0])
self.upload = StringUtils.num_filesize(str(tmps[1].xpath("li")[2].xpath("text()")[0]).split(":")[1].strip())
self.download = StringUtils.num_filesize(
str(tmps[1].xpath("li")[3].xpath("text()")[0]).split(":")[1].strip())
if tmps[1].xpath("li")[4].xpath("span//text()"):
self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[4].xpath("span//text()")[0]).replace('∞', '0'))
else:
self.ratio = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
self.bonus = StringUtils.str_float(str(tmps[1].xpath("li")[5].xpath("text()")[0]).split(":")[1])
self.user_level = str(tmps[3].xpath("li")[0].xpath("text()")[0]).split(":")[1].strip()
self.leeching = StringUtils.str_int(
(tmps[4].xpath("li")[6].xpath("text()")[0]).split(":")[1].replace("[", ""))
def _parse_user_detail_info(self, html_text):
pass
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 6
seeders_col = 8
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{size_col}]')
seeding_seeders = html.xpath(f'//table[@id="torrent_table"]//tr[position()>1]/td[{seeders_col}]')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i].xpath("string(.)").strip())
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
next_pages = html.xpath('//ul[@class="pagination"]/li[contains(@class,"active")]/following-sibling::li')
if next_pages and len(next_pages) > 1:
page_num = next_pages[0].xpath("string(.)").strip()
if page_num.isdigit():
next_page = f"{self._torrent_seeding_page}&page={page_num}"
return next_page
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 4,524 | Python | .py | 90 | 39.611111 | 122 | 0.584371 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,031 | gazelle.py | demigody_nas-tools/app/sites/siteuserinfo/gazelle.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class GazelleSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.Gazelle
order = SITE_BASE_ORDER
@classmethod
def match(cls, html_text):
html = etree.HTML(html_text)
if not html:
return False
printable_text = html.xpath("string(.)") if html else ""
return "Powered by Gazelle" in printable_text or "DIC Music" in printable_text
def _parse_user_base_info(self, html_text):
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
tmps = html.xpath('//a[contains(@href, "user.php?id=")]')
if tmps:
user_id_match = re.search(r"user.php\?id=(\d+)", tmps[0].attrib['href'])
if user_id_match and user_id_match.group().strip():
self.userid = user_id_match.group(1)
self._torrent_seeding_page = f"torrents.php?type=seeding&userid={self.userid}"
self._user_detail_page = f"user.php?id={self.userid}"
self.username = tmps[0].text.strip()
tmps = html.xpath('//*[@id="header-uploaded-value"]/@data-value')
if tmps:
self.upload = StringUtils.num_filesize(tmps[0])
else:
tmps = html.xpath('//li[@id="stats_seeding"]/span/text()')
if tmps:
self.upload = StringUtils.num_filesize(tmps[0])
tmps = html.xpath('//*[@id="header-downloaded-value"]/@data-value')
if tmps:
self.download = StringUtils.num_filesize(tmps[0])
else:
tmps = html.xpath('//li[@id="stats_leeching"]/span/text()')
if tmps:
self.download = StringUtils.num_filesize(tmps[0])
self.ratio = 0.0 if self.download <= 0.0 else round(self.upload / self.download, 3)
tmps = html.xpath('//a[contains(@href, "bonus.php")]/@data-tooltip')
if tmps:
bonus_match = re.search(r"([\d,.]+)", tmps[0])
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
else:
tmps = html.xpath('//a[contains(@href, "bonus.php")]')
if tmps:
bonus_text = tmps[0].xpath("string(.)")
bonus_match = re.search(r"([\d,.]+)", bonus_text)
if bonus_match and bonus_match.group(1).strip():
self.bonus = StringUtils.str_float(bonus_match.group(1))
def _parse_site_page(self, html_text):
# TODO
pass
def _parse_user_detail_info(self, html_text):
"""
解析用户额外信息,加入时间,等级
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return None
# 用户等级
user_levels_text = html.xpath('//*[@id="class-value"]/@data-value')
if user_levels_text:
self.user_level = user_levels_text[0].strip()
else:
user_levels_text = html.xpath('//li[contains(text(), "用户等级")]/text()')
if user_levels_text:
self.user_level = user_levels_text[0].split(':')[1].strip()
# 加入日期
join_at_text = html.xpath('//*[@id="join-date-value"]/@data-value')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
else:
join_at_text = html.xpath(
'//div[contains(@class, "box_userinfo_stats")]//li[contains(text(), "加入时间")]/span/text()')
if join_at_text:
self.join_at = StringUtils.unify_datetime_str(join_at_text[0].strip())
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 3
# 搜索size列
if html.xpath('//table[contains(@id, "torrent")]//tr[1]/td'):
size_col = len(html.xpath('//table[contains(@id, "torrent")]//tr[1]/td')) - 3
# 搜索seeders列
seeders_col = size_col + 2
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{size_col}]')
seeding_seeders = html.xpath(f'//table[contains(@id, "torrent")]//tr[position()>1]/td[{seeders_col}]/text()')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = int(seeding_seeders[i])
page_seeding_size += size
page_seeding_info.append([seeders, size])
if multi_page:
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
else:
if not self.seeding:
self.seeding = page_seeding
if not self.seeding_size:
self.seeding_size = page_seeding_size
if not self.seeding_info:
self.seeding_info = page_seeding_info
# 是否存在下页数据
next_page = None
next_page_text = html.xpath('//a[contains(.//text(), "Next") or contains(.//text(), "下一页")]/@href')
if next_page_text:
next_page = next_page_text[-1].strip()
return next_page
def _parse_user_traffic_info(self, html_text):
# TODO
pass
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 6,115 | Python | .py | 135 | 33.674074 | 117 | 0.567846 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,032 | mt.py | demigody_nas-tools/app/sites/siteuserinfo/mt.py | from collections import namedtuple
from typing import Tuple
import requests
from app.utils import RequestUtils, StringUtils
from config import Config
class MtUserInfo:
"""
mt站获取用户信息
"""
_user_info_url = "%s/api/member/profile"
_user_torrent_list_url = "%s/api/member/getUserTorrentList"
_peer_status_url = "%s/api/tracker/myPeerStatus"
_msg_search_url = "%s/api/msg/search"
_favicon_url = "%s/static/media/logo.80b63235eaf702e44a8d.png"
_user_level = {
"1": "小卒",
"2": "捕頭",
"3": "知縣",
"4": "通判",
"5": "知州",
"6": "府丞",
"7": "府尹",
"8": "總督",
"9": "大臣",
"10": "VIP",
"11": "職人",
"12": "巡查",
"13": "總版",
"14": "總管",
"15": "維護開發員",
"16": "站長",
"17": "候選管理",
"18": "波菜管理"
}
_favicon_base64 = "AAABAAUAAAAAAAEAIADsJwAAVgAAADAwAAABACAAqCUAAEIoAAAgIAAAAQAgAKgQAADqTQAAGBgAAAEAIACICQAAkl4AABAQAAABACAAaAQAABpoAACJUE5HDQoaCgAAAA1JSERSAAABAAAAAQAIBgAAAFxyqGYAACAASURBVHic7d3ncxz3nefxd/fkiBlkEJEgCWZSFCVRliUrOcj2Wl57XVe7V3VbW1e7W3X79Kru8f4Ddw9vqy48uHPdbu15Q529DpLWtkTlQJEiRVIMABNyHEyO3ffgOxADZkCQIoAZ9PdVNUWQGAKNH7o//Uv9+xk8HC8QA+JA+CG/hlLq4WWBJDAHFB/2ixjreI8LaAWiQAgIAhEkACKA/2G/uVLqoRWADLAIpKofZ6sfJ5FQqNzvi7jv83kDucCPA0eAXdVXL3L3d7G+EFFKPVo2UEYu/FvAaPV1ATgNLFQ/t6Z6F28EeAw4BOwDBoAu5KJvrX7eB5hf5SdQSn0lFlBC7vhL1dcsMA2cRYLgE6S2UFOtGkA7MAL8AfAS8MQjPWSl1KNiIjfijurrTh8CPUAOqSEsI2Fh3/kmV40v+CrwF8CLwBDS4aeUai4xpOZ+EKkpzAB57ukXuDMA2oCnkQB4EUmPwGYcqVLqkfMiI3TtgAe5899EOgq/5L7jzwHgJ8AzwI77fXXDANMwMLQLUKlNZ9tg2zaWvebbfEjf3fNIZ/5nSH/Bl30CKwHQj/T0f6P68ZrcLoNgwEM44Cbgu7cVoZTaaIWiRbZQJpkuUq6snQJIc2A/8EdIrf7dlU+sBMBR4DlgkDoTe2IRL72dQbrbA7TH/LREvIQCHnweHQhQarMVyxa5fJmlZJHF5QJzS3luTKVZThWprK4W+JAm/cvABDIyUAKslQB4Dvg2ddr8pgH9XSG++2wfLz7Vw6HdcWJRHy5T6/9KbZVKxSKTLfPF9WXePzvLP7xxnQtjCXL5cq23h4ETwKnqx0nAcgG7gT9EagFu7pkbEPC7eOXrffzwxUG+8/U+hnZEiIa9uF0GhnYAKLV1DHCZBkG/m662AH1dIXxek9HxFLZlY9duGUwg8wSWgIwb2IOMIa66+wf9bvq6grz4ZA8vPbWDkaGWDfxplFIPwjQMTI+L9riLeNRLe4sf27a5OLbMjakUS8majwh0AYeBy8CsC/g+Mtmn99539neHOHG4k1dfHGTfzphW+ZVqUIYhtXXTlJG5ybkss4v5Wm/NIM8LnAFmTGAvUPPW3tMe5PEDbbTH/Hjc2tmnVKMyDAOXy6S7LcCzx7robK07hacVeZ4nCDLzbyd1ev474n72D8WIBO/3zJBSqhHEIl4O7Y7TGvXVm6MTRYb6Q4BpIolQc7qv3ydtC737K9Uc3G6TSMhDMODC66k5R2dlhmAA8LuRMcKa73SbBl6PS3v7lWoSpmlgmtIcMA3pG7hnNMBERvvcgNtkrUd6Db78Ikqp5mHA/W7cBtUmgFLKoTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHMy91QfQjGzbJp0tk8wUSSSLZPJlcvky+UIFy7a/fJ/P6yIc8BDwuwgHPcQjPoJ+F16vawuPXqnbNADWoVS2KJUtiiWLYqlCoWgxs5BlcjbLzekMC8sFlpIFkukiZet2AESCHjrifuIRL52tAQa6w7S3+mkJe/F6zC9fbpeJaRpb+BM2tpXyr1RsKhWLSrWMK5ZNpWJjGGCaBi7TwDAMTANcLhOP28TjNjCr/65W0wBYh/mlPNcmUoyNp7hyM8nl68vMJwokM0XyhQqlskW5YlEu29jcDgCXWb3A3fKn3+uiuz1If3eIkcEoewaiDPdF6W4PEPTrr6Ke6fksE7NZ5pbyzC3mSaQK2DYkUkXmEwX8XpNw0EMs6sXncREKuOloDTDYE6KvM0Q45MHj1gCoRc+6OhYSeabmc1yfSDM6nuTaeIqJ2Sy3ZtLcnMqQyZUpla0H/rrRcJKOuJ/L15fp7w4x0BNmuDfCcF+EnX0RoiEPPgc3EdLZEovLBeaW8swn8iwsFxifzjA9n2MpWSCRKpLKlL5shiVSRbwek4DfTSTkweM2CfhcxKM+utsD9LQH6Wzz0x6TV2ern5aIl4DPpbUCNAC+ZNs2tg2WLdXKsfEUb386w8/fvMnVW0kWlwuP5Psk0yWS6RKjt1IAeD0mu/qjvPRUDz96aYjhvghtMV+1OosjTlLLtrEtm7JlMzWf4/Mri3x6YYEzlxf47MoSqXSJQrHyUF/b5zHp7w6zfzjG0b2tPHWwnZHBFrraA9Wml7zPCeVciwbAHWYWcly8luDd0zNcHEsweivFxFyWdLa0Yd+zXLaYmMnw2rsTXBhNcGSkleMH2vjakS5aW7wEHNA0mFvMMzqe4r0zM1wYTXB9IsVSsshSqkAqXaRUtu//ReoolS2mF7Jk82VGbyU5+ckUw31RHtvXypOHOtjTHyUYcOPQ618DACCZLjI9n+PTLxb48OwcJz+dYnI2y3J64y78FZYNyUyJZKbE6HiS8ZkMk3NZEqkSR0fiDPdFaYl4cLu214htsVQhmS5xczrNpevLnL28xDunZxgbT7GUfDS1LZDyTWfLpLNlJueyGMDlG0luTKeYXczz2N5WBneE2dERpLXFBzirNqABANyazvDaexP8/M2bfH51kULRwrIe/q7zsGwbRqt9DSdPTfPHrwzz6gsDHBlpxR3YXgGQyZa5OJbgb389yifn5xkdT1Eu3+7h3yg2MLOYY2E5z+mLi+zsjfDyUz288mwfJ4504rTBGMcGgG3bZHJlPjg7y8lT07z5yTTXxlNk8+tqa9qABSSAG8AykAaywJ1fIAhEgRagHegD1uzhsyybXEE6GP/1g0kSyQLffqaXx/a1sbM38sA/Z6NJZ0vMLOT43UdTvHN6hjOXFpiez5EvPFwb/2HYNpTKNqlMiWsTKV5732IhWeD6ZJrnj3fT1RbA7d5egVuPYwNgKVlk9FaS37wzzslT03w+mrjff6kAGSCFXPApYBK4CMwhYZDk7gCIAG3VVx+wt/pvYSCGBITv3m8kJ6jF+dEl5pdylKpj30G/m3jUi9fTfKMEli0X3PWJFJ9cWOAXb93kw3NzZLIl1nHTrwBFpPzz1Y8BStWPTeRc9lQ/9iBlGwD8db+oZZNIFUmkiiwuF5hdyOF1mxzb10ZvVwiPx8S1zasEjg2AM5cW+Mc3rvPO6RluTWfW81+KwCXgY+A94Cowg5yQZeQktYA7T2dX9eUGvEAICYFjwLPVj3vX+qYLywV+9fY4uUKFcsXmhSe66W4Prv8HbRCVis3FsQRvfDDJz14bY3YpTya3rosfpIyngQvAdaTcbWCx+rEfqWm1I4HahpTtCNC/nm8wu5jj/bNzzCzk+cHzA/zJd3fSFvNv+07Y7f3T1ZBMF/n86hK//XCSd8/MMDGbJV9/iKkILCAX/jnkBLwMjCF3/XUlx4q//g/HzL/+m9MJpOZwCTgIHKq+upCAuEu5Inep0xcXMA2DSNDDsf2wo6N5QmBuMcfVW0n+5eQt3v50hhtTGcoVC7v+xV8BZpHm1RVgAimzKeT3kUQCYKVG5kFCIIyc0yGgu/rqB/YDw0gtzEuNZlipLOV86cYygQ8nsCyb73y9l5HBFgL+7TtnwFEBUCxVmJzL8ut3xvn9R1NcvpFc6+055CI/D/wGeA24hbTzH8pf/81pCzmJp4CPkJPyOPAqUisYRE7kVb+X65NpkukSPR0BggE37TE/bpfR0FOILcumWLYYHU/x2nsT/PLkrbXK3EYu/BzSnDoHvA/8DgmBuYc4BDfQAXwHeA54AuhEagsB4K7Csyxppnxyfp7xmSyxiJdI0MNQbxjT3J6jA44KgImZLJ+cn+f3H099ORFnDeeBt4B/AUaRO0/+ER/SFHASaU58HfgBcAS5c62Szpb49TsTeN0uhnsjtMf9hAKN+ysslizGxpO89fEU//D6NWYW1yy+MlKl/wh4HfgcqQEs8vChW0F+b68DnyC1rFeQMHiSewJgRaFkMbuQ4/++fo1svsyf/9FeAj4X7m04nbhxz55HqGLZlMsWZy4t8K8fTHJtIkWq/uSelbv+m8jF+QnSw78RctXXSrjkkBP+OFI7uKuqWixb3JxK88mFeXb1R3jhyZ6GHRkolSzmE3l+/8k0b5+e4dpkeq0qfxpp47+LlPk7SJX/q5a7jTTjJn/1X7899b2/en0U6ThcAOaBfUgfjJc7Ho23LJtsocyFsQTxFh97d7ZwdKSV3q5VLbSm54gAKJctkuki756e5dfvjJPJl2u9zUbuQqPA/0Sqn6ObdYjIaMIV5OQsAD1IT/Zd41GlssUX1xL8/WtjDPVGGOhuzOppNl/m1lSan795k1MX5utd/Fb1NYME7f8APkPa9Y/U9/7qdRsJ2JNIP85p4E+BF5HOQ4M7agS2DcvpIueuLPJ3vx4jGHDT3RHE3GbTsx0RADMLOX7/0RQXryXIFcr1JvlYwG+R9v77yB1ps1WQUQYPEAceAwbufdNyusjlG0k++nyOtpiPA8Mx3K7GOikvjC3xq3fGmZjJrDWPfwk4A/wrUuO6ilykGy2BBM3fIc2wV4Ed1OiEnU/k+fDcrAwNdgQZ3BFuymHYerZ1ANi2TblsMzmb5bcfTTI2kaJcqXnxp5Fe5zeRTqcbyF15s9lIj/fHSEeVF2hFOqy+POsKRYv5pTyfnJ9nR0eQkYEWXKbdEHemcsUin69wYTTB259OM7+Up1K7zDPANeDXSPCe2cTDXBlW/BCpbUWBp5GhQzd31Lpy+Qrj+Swfn59nR2eQrrYAHrfZEGX9KGzrAIBqVXQmw7tnZpir3wk1g4ztv4MM9W3etLTabgL/B2kG7EJqAXfddiqWzakLEgA/fHEQr9es3aO1yYpFi+n5LOdHE5z5YpFSpe4j0zNINfwfgPFNO8C7zQOnuN3J2ItM1Fo1DfCjc3NEQh6eP95NOORpiLJ+FLb1fEfbhotjCT67tEg6W651J1rpJPoCueDG2PqLH6Q5kkc6xX6JjHvfxbYhkSxyYzLNucuLLCQe3QM0X8XCcoE3PpjkwugSpXLNsf4S8vO8BvwMuQi3qsxtpPZ3GXgD+DnSB7PKUqrA2HiSUxcXmJp76JHghrNtA8C2bSzb5sJYgnNXFikUK9SoiFaQXv8LSOfQ7OYe5X2dQ5ol49QYCssXZV7DqQvzzC5sRtN5bcVShZmFLG+dmmZ0PFWrvEF+jptIuL3DV5hX8YgUkXPgIySUblBjgle+UGFyNssHZ2fXO3O0KWzbAACpJl++sczFa8v12v55pO15vvpxI9z977SEjEScok41eXG5wPtnZ5ma3+rrSJboujGV4fzoEjOLdQNpHulkvYmU+eY/dlnbJPAp0v9ys9YblpJF3j09w43JjRoV3nzbNgByhQqzC7Ks18Jyvl7Pfw54G/nFN8qJeKcKcsG8jfSQr5LMFLk4lmB+qUC5bGGvMdi+0W5MpbkwusRyqkh59SIeK09PXgT+H9LcaqQyLyE1wLeQSUir5PJlxsZT3JrOsLhceKgl4RrNtg2AdLbEzck080t5crUf8S0ik24+/I9/eujC5h7dA1lG7phXkWO+66zL5srcmEwzt5Qnm69gbeE5eW08xfnRBIVizYOwkLvsaaS9PbGZx7ZOGaQz+Bxyc7jrxCmWLeaW8ozPZpiay1IsaQA0rESyyPmxJZbTxXpvGUcmnyz95//9eaNV/e9UQC6WG8iY9V29fZYtJ+b0vCxYupV3pYm5LGPjKYrlmsVZRsr7U+4JsQZSRjoBryE1lZoTkqbmsjKnpPaEsqaybQMglS1xfSJNJlf3lzSDVPXWfCKoAVhIR9k08ijsqrFM24b5RIGpudyWBEC5YpHLl5mZzzE1l61V/beRKvY5pL+lUQPXRgJ2HAmqmotEzC7mGRtPkdvERUw2yrYNgGy+zMRcZq2UnkWG/5qlS3cOaQbU7O1bSsqCFuX64+4bplS2WE4VmV2UZbxrLOtlIRfWF8h050Zq+9ey8rTmUq1PLiTy3JxKP/RKxY1k2wZALl9mej63VkpnkBCo20ZoMCmk1lLzeFPZEoupmhffhssXKkzOy8q7dSSQC79Zus9X1oCoebyJVImp+RxF7QRsXPmixeJygWKpbgDkkITf+KV/H40s0mlZ83gzuTLJdBGr9nDnhsoXKkzP59YKgCTSh7H1Y5Xrk0aaARlWr/JEJldiIVGgWKxsyeKxj9K2DYBy2SKdLa+1pnwB+QU3Sz0uj1xINa+yXKFMJlemsgXDgPlihZmFNQMgi9ReGmO64v3lkVpLjhrlnStUWE7LtnB1nnNoGts3ACqyuu4aVeIy8gtulnpcGbmAah5vuWxTLK65zNaGqVRssvky5fpV4hISts3UbW5xe72GVSqW/MzN3g+wbQPAtm3KFXutiTEWcvdvlghfWYq8JsuypTq6JQFgfbloaR0rYdtMV4uN1ARq1lpsyyZXqDT9XIBtGwCGaeBxmZj1H9tcWUq6WR7sciHrBNQ8XpfLwO02tmSLq4plk8vXfNhqRTMGAEiHa80+F8uWZx/WeNqxKWzbAHCZBj6va61FM93IEtLNUgYu7lm66k4el4nH7dqSADAMA7fbXOt7N1vYrlhZ1n0VwwCXq/n3DWiWk/+BuV0GQb97rZVyPMhCG81SBmser8dj4t+iLa/Nati66pe1CwnbZltKx4uU+yqGITsPN/uejc199Gvwe120xX14vXXPuSCygYR3847qKwkiuwnVPCFDATfRsGdL7kgu05BVc+tfDG7k+JspAAwktFbt3ARgGgZ+rwuvR2sADcnvc9HZ6sdfPwBiyEo7dbeOajARZIWgmoEVDXtpa/GtdRfeMF6PSTziw1e/rP3IGofNErYupLYVpMYxu10Gfp8Lv9+Fx91Mmbbatg2AoN9NT3uQgK/uL6gd2TqqWdZ6jiO73NQMrHjES2drYEuqpD6vhO0aZR1E1uSveTdtQD5kQ9eVALgrVX1eF+GQh6DfjcfT3JdQcx/9GqJhL7v7I0SCNWvMICfkAWQ7qYb2l3+014XUVo4hNYFVOlsD9HeHt+SEDPrd9HaGCAXqlnULss9Bs4RtDNiJBMAq0bCH7lZZHLTZNf9PUEdL2MPeoRai4bonZSuy4GbNPfkaSPC//eOlEWA3smPQXXfRlfZ3R9xPV6t/S5YH93lNOlv9tIQ9eD01FycNIFtydSMXV6NrBw4jqwWv0hr10dcVWqvJ0zS2bQDEIl727YzREvZSp18sgmwWOYycnI2qFdnO6kCtT3o9Jm0tPtrjflqi3i3pBPS4TVqiXuItPqIhb62hVzdS0xpByrvRz7sdwAmk7Fdpj/sZ2hHWAGhkHrdJNCQbO/b3hGtdGAZSxfsash9fo4ogVf+a21zHoz6eONhOd5sf0zC2ZBjQMAxMw6C3I8hwXwTv6maIgYTAIWRH5EY97wyktjIAPE6d2kpna4Bd/dG1+jyaRqP+Ir4yl0vGxXcPRNndH6nXO+5HftHHaMxe6jak6r+fOrWU1hYfTx3qoKt967cLH+gJMzLYUm/nHBfycxxEqtaNuCeFDxhCaipD3NMHYBhS4+ppD7KzL7LWCFPT2LYBAHJn2jcU49Cu1nodNh7kAnuM2yHQSE4gOwYPIHemuxgGtLX4OHG4g+62VZ/edMN9EQ7tjuPz1ixrF9KxdrD6ZyPuahoFvoWcD6t4PS7aY376u0MMdGsfQMMzDDkpD+yK0dUWqFVlM5FawAHg3yK1gZWNIrdSGNiDbBn+deTEvOvgDQP6u0LsHWphoCdMuP5ox6bpbg+weyBCd3uQoH/VDX5lYs0w8CMkBBpJDLnzP4tsEbZKNOTh2P42BnpC95tm3jS2eQAY9HQEGRlqYe9QjHi07jD0IHJSPodceH62rmw8yISfZ4FnkHBaNfbvdpns2xnjyEgrbXE//gZoj8ajPgZ6wuwZiNIWq1vWvcAPkV72Frb+HFzpnxgAnkBuAn2r3mRIc+trRzsZ7Gn4keN1a8R22CPX0x7g1Rf6yeRKTNbe1mmll/rb1b/PIevCbfZ6gQbSA/114C+R5skqLpdBKODmmaOdPH2ks6F2Bm6N+nj5xA4SqUK9HXRCSPv6W8hCIb9hA7YDfwArOzE/h9wE2mu9ye9z0dMR5JkjnfR3N/Ko8YPZ6vTdFPGotJOP7InT1xWs1R9gIifCLuTO+0PgKHJibNatNYxUj7+JDPsdpE6fRGfcz5OH2jk80kp/d6ihnkiLhj08daidA8Nx2mM+PO66Q4LHud3e7tjkw1zhRWok3waeR2pbNXtTh/siHNvXykBPeK3JZU3HETWASMjDvp0xjh9o59pEinfPzLKcKt67doaBtAMfQ3rcw8gz7FeQO9VGPvjtQibJPAX8G6TzL0SNgHaZBsN9Eb7/XD8jg1EiocY6GcNBDwd2xTm0J86uz6NcHEtQKtd8pH4fErpTyDoBy8iz95u1pImJ/L4PAf8OaZKsuvsbyNOOj+9r49ljXbREPLi3wQzAFY4IgBUnDndgWTZziQKXri3X2zQkgFTDX0Xa4v+M7B+wUTvZ+JG7/TeRKugQchdadev0eU1290d59lgX33y6l87Wxn2O6bG9rfzo5UESqYKsVVh7abYu4CdI4PYgG7TObcLhhavf83vAS0j51xzzj4Q99HWGOHG4k2P72rZFz/+dHBUAfV0hnjrcwdVb0uQ8fXGBcmXVOnpuZIjqANL7biF358+QEFjmqy8lvvI9OpEL/mngBeTOX5PP66Krzc83jnfz9WNdDPc14ijabQM9YZ452slnlxbJFyrcmKrZHxBGytmF1Hi8yGat15EyftQrCBlIs2oPUtt6Ben4a6XGteB2GfR3hfjW070c3dtKdwPMtXjUHBUALpdBb2eIP/3B7i93Dk5nS7V2sgEZstoJ/HvkwjyJ1AbOU2cP+QcQQNr7LyPt4MPIpJ+65NmGGD9+eYjH96/51oYQDXnY1R/l+8/1UyjWDYAVu5EawAGkjP8XUsaPes9zd/V7fQ/4M+TCr1nbMgzw+90c3B3nz3+8l862xq1tfRWOCgDDMPC4TTrifp4/3k2+UOE3745zfTJVa3FHg9tLQg0hgbAL2dV29I4/r3D/O1U70tHVhVz4u5ATcQiZ4hujzu/C7TKIhr08f7yHH7wwwM6+yFqLnDQMw5CRiscPtJNIFVlMFjh/NcHc0qqdzUDKOIgE7o+QO/RnyEaip6izPPc6eZBy348M8R1GgqadOkvCmQaEgh6+/1w/33uuj862NdeVaGqOCgCQWkAw4Obwnjh+n4t0toTLZXB9QkKgTlu1tfo6iLRRx5B+gc+QO/cUcsdKcXs+eRfShAggPc29yFjzQWSiSd3FPVYEfC46Wv0cHI7zzad38NJTPURCW7Pqz8PwelwM7Qjz9JEOcvkytgVnryyylKzZgnJzu5yPIJ2E/UgzYQbZxCWD7I2wVnXCh4RJBGlWxJGwPYGM8Awi8w9qcpkG7XE/ewaifOeZXr52pJNQwL0lz1hsBscFwIpY1Muh3XH+4scj9HeF+Om/XGVmMbfWZqIr4shdZASpvs8DvwBeA85yu8bwE6R9OYSEgB8pbz9rrDV3p56OIF872smf/WA3I0MttIQ9TTn7bLg/SrzFh430qJ88NX2/LczcSBnvBn6MhO05ZMfej6t/1tOB1CCOIHf9ESR440gw1C1305Dx/hOHO/jjV4Z5fH8bnQ0wxXojOTYA3C6TUECG1F460UPA5+J3H09x7soiMws51jg/3dxe4y6CVCXLyF3Fi8wiexapbg4idzQXDzDnIh7xsqs/yjee6Oa5Y13sH47REvHiatIFKP1eWa/gG8e78ftctES8nPligZtT6XrlvPKk5p3rNu5A5mYcRWpeZ4BbyDZex5Aaw2D1/Z3V93chgRDlPoErK0gFeO7xbr7xRDfHD7RX5zE0Z5mvl2MDAORuFAl7Obavjb2DLfh8Ltxug7OXF0mkiuTylfXcqdxIj3IXcjIeBr6DnHDrbjiapvRPRIIeRoaivPhED68828dj+9owDZq+Cup2mTJtOeajs7qaTqFUYWm5SLFUWStwQZpLPdWPn0Q27vwnpH9gBvguMoz6BA9R7n6vyWBPmMf3t/En3x3m0O448ZZmWb3sq3F0AKwwq/0Cf/B8P/t2tvDJ+Xl+//EUp79YIJkurXfH3XakurpSE3igKzYcdNPbEZJ259FODu6O0xH311vMpGm1x/w8c1Ta1bv6Ivy/N29yazpDOrvuPVqjyOSdLmRYdhbp1OvlIco9EvSwb2cL33+un289vYP+njDhBptctZE0AJAlnk23DBGGAx5iER9tMT97B1sYHU9xazrD9EKOVLZYb8gQbg/trbu6Hwl5aI/56O8OM9wbYc9glCcPdrBnMCoX/3a7+pH5DD6vi6MjrYQDHkIBDxfGlrhyI8nYRIpkurjWFmMgF7kXCdoepOM1xgOs7uzzmrSEvOweiLJ3ZwuHdsc5caiD/cMx3G5zW5Z7PRoA92iJeDm6t5WDu2LMP9PHB2dneffMDB99PsfNqTTLmRKVilXdd/Cu/7ryaPEqhkF1tR6pcpqmgcs0GOgOcXhPnJef2sHxg+3s29kMy+U9Gu1xP60tPh7f38aZSwucPDXNL966xeh4klSmRKVipXGIpgAAB6dJREFUY9mryvheIda5nqNhSA+/y2XSEfexqy/Cj18e4vknehjqDddbxGTb0wCowzQNYlEvTx7uYLg/wnef7WNiNssX1xJ8enGBi2MJZmuPad/FADqqJ3tL2EtvZ4iBnhDDvRF6OoJ0tQXobPUTjzbaYkQbT7bXMtjVHyUa9nBkTysXxhKcubTAhbEE49MZllJfddKlfJ+OuJ/BnjCH97RyaHecg7ti9HWFaI/7m353n69CA6AO05TNH3o7g/R2BqlULJaSRXZ0BLFtm6n57LoCwDQNnjzYweE9cVrCXnZ0BunvCjHUGyEW8eD3OfdXYFRrRa0tPmIRL0M7Igz1hr9cWuz6ZJqZhRy3ptNML+RYTpdkB+Q1uEzZEq4t5iMS8hAJeoiGPfR2hNhZXbFoz2CUwR3htTaOdQznnn0PyDQNWmM+BnaE2bszxjtnZtf1/9xug598e4g/fHEQw1hZQPP2ya/Eynp7u/qjDPVG+N5zfaQzJWYWcvzjb6/z+vsTnL+aIF9ce9Kl1yvP7T99pIN9QzF2D0QZqfaphIJuaYIZxpYv+dQoNADWyaieNGa1Hf8gJ5DHZW67p8getZVhzpV+EqgOjXpM+rtDdLYG+MK1fN+vE/S72Nkb5tUXBhjsCdMS8RILe/H7XNvqMd5HRQNANSyP28QT9hKL+IiG1jcL0utx0dEa4PH9bdvy6b1HTSNRKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB9MAUMrBNACUcjANAKUcTANAKQfTAFDKwTQAlHIwDQClHEwDQCkH0wBQysE0AJRyMA0ApRxMA0ApB3MDJcCu9UnLglLFxq75WaVUo7FtGxsoV2zKFWuta7cMFN1AtvqXVUoVi1yhQqWiCaBUM7Asm1LZpliqUC5btd5SAYpAsaOjo2ACl4FkrXcuLRcYvZUkky9t3BErpR6ZVLbE1VtJltNFrNr37QRwDUj/9Kc/tdYMgMm5LKcuzLOYKFCu1EwTpVQDmVvM896ZGWYX8/XesgSMAZlXXnnFXjMAbkyleevUNFNzWUpFDQClGt3EbJZfnrzFxEym3luWgFEgA9IJ+AV1AqBQrDA1l+Vnb1xnOV3im1/bQcjvxut1bcSxK6UeQrlskc6WeP+zWd74YILPry6RSBXrvX0BuModATAOTFY/EQO+vLrLFZtEusjbp6cpVSxcLoO+rhBtMR8Bnxuf18TjdtZIYiZXJl+oUKnTwFrFhlyhQjJd9xei7iNXKFMqW3XGqu5m2zalkkUqWya4jcu8VLEoFi3yhQpLyQITs1lee2+cD8/NMTWfq/VfKkAeuda/ANIgAZAGLgDngae4IwAAKhVbvvi7E3x8fp4nD7ZzcFecge4Q3R1B4hHvBv6YjWd8NsN8Ik+hWFnX+21gbinH9cn0xh7YNja3mCeTK2OtYzy6UrFJ50qMT2fI5WsObm0Ly6kis0s5xqeznB9d4uzlRSbnsiyn63bYl4Ap4BJyrZdAAsAG3gdagEGgC/Df+T8rFZtUtkSuUMa2bK5NpGgJe4kEPfh9zmoOpLMl5pbyTNdO2VXKFZtfvj3OuStLG3xk29et6Qy3pjMUS/cP3XS2xIXRBP/9ny4R2MbnZr5QIZsvs5wuMreYZ2YxT65QXmvIPgG8BnyKDAMCYFT/9AEngP8EPAbsuOfzSqnmtQScBf4L8DFSEwBuV/et6isF9AC7qv+uAaBU8/sd8PfAe8Ascq0Dd7f3y8hogF19gw/wAM5q5D+8AlK4Fe5pQqkNU0I6tkz0uZZ7FZAOv/eAXwJvVf9+V8/onQFQAhaB6eorDoSAIFITWHmp1fLIKMrnSIDG0fLaKBZys8oiZT6H3KQ8aJnD7am+M0h7/6fASWTob1WvaK1ekiIwj5zM80jBBpG72vbtVflqPgJ+BfwGyAGtSHhq7enRyyND1/8M/AL4PdKZHUI6sp0eAHPAZ8DfAj+rfjxPned93DX+LVd9zSCFnap+kR5knkC0+gojweB7pIff+HJImSSBZaSD5UMkba8jd6U0cBTpTF0pL6eV06O0Ut5JpBp7BXgbKe8sUt2dAI5w+xyNsL1vWEXkXEyzunxGkfK5jJRN3aGBB0nLODJMOALsQToKe4G2Bz/2pjaNFOxlZP7EWWRW1Z0DsEHgSeAQUl57gfbNPcxt5Qq3y/wUMpHlXn1Ime9FynyE7d0Xs9Jcv4ZU7y8h5ZN4kC/y/wEpnNDUzuX21AAAAABJRU5ErkJggigAAAAwAAAAYAAAAAEAIAAAAAAAgCUAAAAAAAAAAAAAAAAAAAAAAAAMGyEIBxEVeAAAANIAAADwAAAA8AAAAO8AAADwAAAA8AAAAPAAAADwAAAA8AAAAO8AAADwAAAA8AAAAOEDBwmOAQIC0AAAAPAAAADvAAAA8AAAAPAAAADwAAAA8AAAAO8AAADwAAAA8AAAAPAAAADwAAAA8AAAAO8AAADwAAAA7AMGCKwDBgenAAAA7QAAAO8AAADwAAAA8AAAAPAAAADwAAAA8AAAAO8AAADwAAAA8AAAAO0CBAXECRQYUw4gJwEECgx+AQID/x5BT/8sXnP/LF5z/yxec/4sXnP/LF5z/yxec/8sXnP/LF5z/yxec/4sXnP/LF5z/xs6R/8AAAD/CRQY/yxec/8sXnP+LF5z/yxec/8sXnP/LF5z/yxec/4sXnP/LF5z/yxec/8sXnP/LF5z/yxec/4sXnP/J1Rn/wIEBf8BAgL/JlJj/yxec/4sXnP/LF5z/yxec/8sXnP/LF5z/yxec/4sXnP/LF5z/ytccP8WLzn/AAAA+QwaH0cAAADcIUdX/17I9P9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9bw+3/Dh8l/wYOEaIAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/jp8mP4AAAD+FCs0/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+VLTc/gQJC/4CBAX+Uq/V/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/jp8mP4AAAD+FCs0/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+VLTc/gQJC/4CBAX+Uq/V/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/zp9mP8AAAD/FCs0/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/VLTc/wQJC/8CBAX/Uq/V/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/jh5k/4AAAD+ESYu/lm95v5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyPT+SJm6/gMGCP4BAwP+PYOf/lm/6P5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5XuuP/J1Rn/wcPE/8AAADwAAEB/ggRFf8mUmT+Vrff/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1Sy2f4dPUv/AgQF/wAAAOoBBATgAAAA/wwaH/49gp7/Xsn1/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1Kw1v4OHiX/AAAB8QUMD5IPISkjCxkeQQYNEI4AAQHvDBkf/1Gs0v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/Ua3T/wsYHf4AAgLpCBIWbRAjKxEQIysFCxgePgMHCbgAAQL+MmqB/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/XMTv/xYwOv4AAADoDBshNgAAAAAAAAAAAAAAAAAAAAAMGyEwAAEB4hMpM/9bwu3/X8r2/17J9f5fyvb/X8r2/1/K9v9eyPT/GTZC/wEDBOURJS0mAAAAAAAAAAAAAAAAAAAAAA0cIwYDBwibAQQE/0uhxP9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/Qo6t/wAAAP4MGiBSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACxgdTAAAAP0+haL/X8r2/17J9f5fyvb/X8r2/1/K9v9Npcn/AAAA/gwaIGMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMGyEPAAEC4yRMXf9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+KFZo/gAAAOEULDYJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAd0kTV7+Xsn1/l7J9f5eyfX+Xsn1/l7J9f43dpH+AAAA9BYwOxsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQwPng4eJf5eyPT+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/IERT/wAAAMwRJi4CAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAcMbO0j/X8r2/17J9f5fyvb/X8r2/1/K9v8zbYT/AAAA7RUtNxQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAChUakQoXHP9dxvH/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/KFVo/wAAAN8TKTMIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADBogAQABAdwkTF3/X8r2/17J9f5fyvb/X8r2/1/K9v8/h6X/AAAA+xMpMjAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgQGuhUuOP9eyfX/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/QYyq/wAAAP4MGiBOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACBMXQgAAAP09g5//X8r2/17J9f5fyvb/X8r2/1/K9v9YvOX/Bg0Q/wQKDKoNHSQBAAAAAAAAAAAAAAAAAAAAAAAAAAAIEhY7AAAA9zRvh/9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/W8Pu/xQqNP4BAgLhDRwiKgAAAAAAAAAAAAAAAAAAAAAJFRokAQME2hEkLP9bwez/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX/NG6H/wAAAP0GDRCGDR0kBwAAAAAAAAAAAAAAAAkUGTsAAADiDh4l/1rA6v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1Cs0f4MGyH/AAAA8QcPE48LGR9KCxgdSggRFYkAAADwChYb/0+ozf9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/Xcby/yxecv4BAwT/AQMD2wYOEZUGDRCGAwYHtwAAAPwQIyv/UKvR/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5Vtdz+JlJk/gYNEP4AAAD+AAAA/gULDv4lT2D+U7LZ/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7I9P5Glbb+H0JR/gsYHv4HDxP+FCo0/jNthf5Zveb+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/Xsn1/1vB7P9Npcj/TaTI/1rA6v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/Xsn1/13G8f9cxO//Xsjz/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+FzI9/gAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV91/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADxLV90/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/FzI9/wAAALgAAADVHDxJ/1zF8P9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9Yu+T/Cxcd/wYOEZoKFRpnAAAA/RElLf8bOkf/GzpH/xs6R/4bOkf/GzpH/xs6R/8bOkf/GzpH/xs6R/4bOkf/GzpH/xs6R/8bOkf/GzpH/xs6R/8bOkf+GzpH/xs6R/8bOkf/GzpH/xs6R/4bOkf/GzpH/xs6R/8bOkf/GzpH/xs6R/4bOkf/GzpH/xs6R/8bOkf/GzpH/xs6R/4bOkf/GzpH/xs6R/8bOkf/GzpH/xs6R/4bOkf/GzpH/xs5Rv8LGB7/AAAA8w8gJzcQIysCDBshVwQJDKoAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMIAAADCAAAAwgAAAMEGDhKcDyEoOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8AB4AAAAAAH4APwAAAAAAfwA/gAAAAAB/AD+AAAAAAH4AP4AAAAAAfgAfAAAAAAA8AA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAoAAAAIAAAAEAAAAABACAAAAAAAIAQAAAAAAAAAAAAAAAAAAAAAAAACRQYRAEDBNYBAgP1AQID9AECA/UBAgP1AQID9QECA/QBAgP1AQIC7gIEBb4BAgLyAQID9QECA/UBAgP1AQID9AECA/UBAgP1AQID9QECA/QBAgP1AQME1QEDBNMBAgP0AQID9QECA/UBAgP1AQID9AECA/UBAgP0AgUGyAwbISsBAgPcRpW1/1vC7f9bwu3+W8Lt/1vC7f9bwu3/W8Lt/lvC7f9EkbD/AAAA/0qdwP5bwu3/W8Lt/1vC7f9bwu3+W8Lt/1vC7f9bwu3/W8Lt/lvC7f8aOUX/GDI9/1vC7f5bwu3/W8Lt/1vC7f9bwu3+W8Lt/1vB7P84eZP/BQsNtAEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/0aWt/8AAAD/TKPH/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/xs7SP8YNED/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Rpa3/gAAAP5Mo8f+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+GztI/hg0QP5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+T6jM/gAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9Glrf/AAAA/0yjx/5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v8bO0j/GDRA/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/0aWt/8AAAD/TKPH/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/xs7SP8YNED/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/Rpa3/wAAAP9Mo8f+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/GztI/xg0QP9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5Glrf+AAAA/kyjx/5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f4bO0j+GDRA/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5PqMz+AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/0aWt/8AAAD/TKPH/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/xs7SP8YNED/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/Rpa3/wAAAP9Mo8f+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/GztI/xg0QP9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9Glrf/AAAA/0yjx/5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v8bO0j/GDRA/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/kaWt/4AAAD+TKPH/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/hs7SP4YNED+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/k+ozP4AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5ex/P/N3aQ/wAAAP4zbYX+Wb3n/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5Knb//Dh4l/wsYHf4+hKD+Xcbx/1/K9v9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Wb7n/ho3Q/4CBgfJCRUZZwULDacJExf4SJq8/1/K9v9eyfX+X8r2/1/K9v9eyfX/L2R6/gIFBtwJFRlkChccUwQICrkYMz/+WsDp/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v8kTV7+BQwPlgwbIQMAAAAAAAAAAAoVGkQJExf4WLvk/17J9f5fyvb/X8r2/0eYuf8DBwnXDyEoEQAAAAAAAAAACRQYAwQJC54sXnL/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v5eyfX+Xsn1/l7J9f5eyfX+Wb/o/gQJC/EPISgOAAAAAAAAAAAAAAAAAAAAAAYNEKQ8f5v+Xsn1/l7J9f5eyfX+KFZo/g0cI2wAAAAAAAAAAAAAAAAAAAAACRUZIQwbIP5eyfX+Xsn1/l7J9f5eyfX+T6jM/gAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9UtNv/AQIC3wwaIAIAAAAAAAAAAAAAAAAAAAAABg0QhTNshP9eyfX+X8r2/1/K9v8jTFz/Dh4lXAAAAAAAAAAAAAAAAAAAAAAJFBkTCBIW/F7I9P9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1zF8P8JFBn6DRwiJQAAAAAAAAAAAAAAAAYOEQEDBwjBRJCw/17J9f5fyvb/X8r2/zl6lf8FDA+vAAAAAAAAAAAAAAAAAAAAAAYOEWEdPkv/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/zZ0jf4DBgjKChccIwAAAAAJExgJBQoNhxYvOv5dxvH/Xsn1/l/K9v9fyvb/W8Pt/xMpMv4FDA+RCRUaHAgRFRAGDhFjBQsO8k2lyf9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn0/jZ0jf4IExf6AQIC2QMGCO8hR1b+V7rj/l7J9f5eyfX+Xsn1/l7J9f5eyfX+WL3m/ihWaf4KFhr7Bg0Q+Bw8Sv5Npcn+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5PqMz+AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/13G8f9Sr9b/Wb7o/l7J9f9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/17I8/9dx/L/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/k+ozP4AAADQAQMD9lvD7v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/T6jM/wAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADQAQMD9lvD7v5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+T6jM/gAAANABAwP2W8Pu/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9PqMz/AAAA0AEDA/Zbw+7/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/1/K9v9eyfX+X8r2/1/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/0+ozP8AAADPAQQE1D2Cn/9RrNL/UazS/lGs0v9RrNL/UazS/1Gs0v5RrNL/UazS/1Gs0v9RrNL+UazS/1Gs0v9RrNL/UazS/lGs0v9RrNL/UazS/1Gs0v5RrNL/UazS/1Gs0v9RrNL+UazS/1Gs0v9RrNL/UazS/lGs0v9RrNL/MWh//wULDa4NHSQ2BAkLuwAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYAAADWAAAA1gAAANYFCw6vECMrIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAGAAB4DwAAeA8AAHAPAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAAAABgAAAAwAAAAAQAgAAAAAABgCQAAAAAAAAAAAAAAAAAAAAAAAAYOEX8SKDDwFi859xYvOfcWLzn3Fi859xYvOfcHEBTbDR0j7xYvOfcWLzn3Fi859xYvOfcWLzn3Fi859xUsNvYCBQbUFCw19hYvOfcWLzn3Fi859xYvOfcRJCzsCRMYZRMpM/NeyfX/Xsn1/l/K9v9fyvb/Xsn1/l/K9v8dPkz/OXqV/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/1m/6f4DBgj/WLzl/17J9f5fyvb/Xsn1/l/K9v9eyPP/Cxcd1hYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f4dPkz+OXqV/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/lm/6f4DBgj+WLzl/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v8dPkz/OXqV/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/1m/6f4DBgj/WLzl/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v8dPkz/OXqV/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/1m/6f4DBgj/WLzl/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f4dPkz+OXqV/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/lm/6f4DBgj+WLzl/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v8dPkz/OXqV/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/1m/6f4DBgj/WLzl/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v8dPkz/OXqV/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/1m/6f4DBgj/WLzl/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f4dPkz+OXqV/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/lm/6f4DBgj+WLzl/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/k+ozf8QIin7HD1K/k6ny/9fyvb/Xsn1/l/K9v9fyvb/XMTu/zFof/4BAwTyKFdq/1a33/5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/SZu9/gcOEsMKFxwtCBMXMwYNEcBHmLr/Xsn1/l/K9v9eyfX/Hj9N+AoVGl8KFRoFCBMXPw4dJOVav+n/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+GjlF9wwaIBYAAAAAAAAAAAkTGBMZNUH2Xsn1/l7J9f5QrNH+CBIXnAAAAAAAAAAAAAAAAAcPEmQ8f5v+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/EiYu6gkVGgIAAAAAAAAAAAAAAAAQIirnXsn1/l/K9v9MosX/ChUaiwAAAAAAAAAAAAAAAAcPElM3dY//Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/LF5z/ggTF1YAAAAAAAAAAAYNEVAqWm7+Xsn1/l/K9v9dxvH/DyEp6QkUGCMAAAAABg0QDwUMD8VTsNf/Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/W8Ls/iJIWPsGDRC2Bg0QtCFGVftbwev/Xsn1/l/K9v9fyvb/Ua3T/xo3Q/YHERTGEicw7EaVtv5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5Zvuj+Wb7n/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5dx/P+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xYvOvheyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Xsn1/l7J9f5eyfX+Cxke2xYvOvhfyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/17J9f5fyvb/Xsn1/l/K9v9fyvb/Cxke2xInL/FeyPT/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/Xsn1/l/K9v9fyvb/X8r2/17J9f5fyvb/X8r2/17J9f5fyvb/Xsn1/l/K9v9dxvH/ChYa1AkVGW8MGiDaDR0j4A0dI+ANHSPgDR0j4A0dI+ANHSPgDR0j4A0dI+ANHSPgDR0j4A0dI+ANHSPgDR0j4A0dI+ANHSPgDR0j4A0dI+ANHSPgDR0j4A0dI+ALGB3WCxkfWQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYHAAAHBwAABgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgAAAAQAAAAIAAAAAEAIAAAAAAAQAQAAAAAAAAAAAAAAAAAAAAAAAAULDW9LmJ4+S5iePouYnj5KFZp+BMpMusuYnj6LmJ4+S5iePouYnj5HkBO8h0/TPEuYnj6LmJ4+S5iePkTKTKqLmN5+l7J9f5fyvb/Xsn1/lKw1v8mUWP+X8r2/17J9f5fyvb/Xsn1/j2Cn/87f5v+X8r2/17J9f5fyvb/J1Rm5y5jefpfyvb/X8r2/1/K9v9SsNb/JlFj/1/K9v9fyvb/X8r2/1/K9v89gp//O3+b/1/K9v9fyvb/X8r2/ydUZucuY3n6Xsn1/l/K9v9eyfX+UrDW/yZRY/5fyvb/Xsn1/l/K9v9eyfX+PYKf/zt/m/5fyvb/Xsn1/l/K9v8nVGbnLmN5+l/K9v9fyvb/X8r2/1Kw1v8mUWP/X8r2/1/K9v9fyvb/X8r2/z2Cn/87f5v/X8r2/1/K9v9fyvb/J1Rm5y5jefpeyfX+X8r2/17J9f5SsNb/JlFj/l/K9v9eyfX+X8r2/17J9f49gp//O3+b/l/K9v9eyfX+X8r2/ydUZucuY3n6X8r2/1/K9v9dx/L/LF9z8RAjK8NCjaz9X8r2/1/K9v9TsNb/GTVBzxYvOcJLocT+X8r2/1/K9v8nVGbnLmN5+l7J9f5fyvb/OHeS+wgTFyoAAAAABxAUeFSz2v5fyvb/IERT0AYNEAQAAAAAESUur17J9f5fyvb/J1Rm5y5jefpfyvb/X8r2/y9kefUGDhIJAAAAAAQJC1FNpMj/X8r2/xw8ScIAAAAAAAAAAA0cI5xeyfX/X8r2/ydUZucuY3n6Xsn1/l/K9v9UtNv+EykyuQULDnQlT2DhXsn0/l/K9v9Jnb/+ECMrqQwaIJtAiKX7Xsn1/l/K9v8nVGbnLmN5+l/K9v9fyvb/X8r2/17J9P9awOr/Xsn1/1/K9v9fyvb/X8r2/17J9f9eyfX/X8r2/1/K9v9fyvb/J1Rm5y5jefpeyfX+X8r2/17J9f5fyvb/Xsn1/l/K9v9eyfX+X8r2/17J9f5fyvb/Xsn1/l/K9v9eyfX+X8r2/ydUZucuY3n6X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v8nVGbnLmN5+l7J9f5fyvb/Xsn1/l/K9v9eyfX+X8r2/17J9f5fyvb/Xsn1/l/K9v9eyfX+X8r2/17J9f5fyvb/J1Rm5y5jefpfyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/1/K9v9fyvb/X8r2/ydUZucUKzWxKFZp6ihWaeooVmnqKFZp6ihWaeooVmnqKFZp6ihWaeooVmnqKFZp6ihWaeooVmnqKFZp6ihWaeoTKDGfAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQQAAAEMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=="
def __init__(self):
self.user_info_data = None
self.user_torrent_data_list = []
self.peer_status_data = None
self.msg_search_data_list = []
self.site_info = None
self.err_msg = None
self.seeding_info = []
def get_site_user_info(self, site_info):
self.site_info = site_info
MtUserInfo.__get_site_data(self)
return self.__parse_result()
def __get_site_data(self):
self._get_user_info()
if self.err_msg is not None:
return self.user_info_data
self._get_user_torrent_list()
self._get_peer_status()
self._get_msg_search()
def _get_data(self, url, json=None, data=None):
res = RequestUtils(content_type="application/json", ua=self.site_info.get("ua") or Config().get_ua(),
authorization=self.site_info.get('cookie'),
proxies=self.site_info.get("proxy")
).post_res(
url=url % self.site_info.get("strict_url"), params=data, json=json)
if res and res.status_code == 200:
data = res.json().get('data')
if res.json().get("code") == "0":
return data
else:
self.err_msg = f"【Sites】站点 {self.site_info.get('name')} 获取数据出错,请检查Cookie或Apikey是否过期"
else:
self.err_msg = f"【Sites】站点 {self.site_info.get('name')} 获取数据出错,请检查Cookie或Apikey是否过期"
def _get_user_info(self):
self.user_info_data = self._get_data(MtUserInfo._user_info_url)
def _get_user_torrent_list(self):
page_no = 1
while True:
torrent_list_params = MtUserInfo.__init_torrent_list_params(self.user_info_data.get('id'), page_no)
user_torrent_list_data = self._get_data(MtUserInfo._user_torrent_list_url,
json=torrent_list_params)
if user_torrent_list_data and len(user_torrent_list_data.get('data')) > 0:
page_no = page_no + 1
self.user_torrent_data_list += user_torrent_list_data.get('data')
else:
break
def _get_peer_status(self):
self.peer_status_data = self._get_data(MtUserInfo._peer_status_url)
# 搜索消息的代码
# 使用 self.user_info_data 进行其他操作
def _get_msg_search(self):
boxes = [-1, -2, 1]
for b in boxes:
msg_search_params = MtUserInfo.__init_msg_search_params(b)
msg_search_data_res = self._get_data(MtUserInfo._msg_search_url,data=msg_search_params)
if self.err_msg is None:
self.msg_search_data_list += msg_search_data_res.get('data')
@staticmethod
def __init_torrent_list_params(user_id, page_no):
params = {
"pageNumber": page_no,
"pageSize": 200,
"type": "SEEDING",
"userid": user_id
}
return params
@staticmethod
def __init_msg_search_params(box):
params = {
"box": box,
"pageNumber": 1,
"pageSize": 100
}
return params
@staticmethod
def __deal_seeder_and_leecher(peer_status_data) -> str:
return peer_status_data.get('leecher')
def __deal_torrent_data(self):
seeding_total_size = 0
for torrent_info in self.user_torrent_data_list:
torrent_siz = int(torrent_info.get('torrent').get('size'))
seeder = int(torrent_info.get('torrent').get('status').get('seeders'))
seeding_total_size += torrent_siz
self.seeding_info.append([seeder,torrent_siz])
return seeding_total_size, len(self.user_torrent_data_list)
def __deal_msg_data(self):
unread_msg_total_size = 0
message_unread_contents = []
for msg_data in self.msg_search_data_list:
if msg_data.get('unread'):
unread_msg_total_size = unread_msg_total_size + 1
message_unread_contents.append(
(msg_data.get('title'), msg_data.get('createdDate'), msg_data.get('context')))
return unread_msg_total_size, message_unread_contents
def __parse_result(self):
if self.err_msg:
site_user_info = {
"err_msg": self.err_msg,
}
else:
seeding_total_size, seeding_num = MtUserInfo.__deal_torrent_data(self)
message_unread_num, message_unread_contents = MtUserInfo.__deal_msg_data(self)
site_user_info = {
"upload": self.user_info_data.get('memberCount').get('uploaded'),
"username": self.user_info_data.get('username'),
"user_level": MtUserInfo._user_level.get(self.user_info_data.get('role')),
"join_at": self.user_info_data.get('createdDate'),
"download": self.user_info_data.get('memberCount').get('downloaded'),
"ratio": self.user_info_data.get('memberCount').get('shareRate'),
"seeding": seeding_num,
"seeding_size": seeding_total_size,
"leeching": MtUserInfo.__deal_seeder_and_leecher(self.peer_status_data),
"bonus": self.user_info_data.get('memberCount').get('bonus'),
"err_msg": self.err_msg,
"message_unread": message_unread_num,
"site_name": self.site_info.get('name'),
"message_unread_contents": message_unread_contents,
"site_url": self.site_info.get('strict_url'),
"site_favicon": MtUserInfo._favicon_base64,
"seeding_info": StringUtils.format_list(self.seeding_info)
}
SiteInfo = namedtuple('SiteInfo', site_user_info.keys())
return SiteInfo(**site_user_info)
| 43,942 | Python | .py | 158 | 265.968354 | 37,064 | 0.856946 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,033 | torrent_leech.py | demigody_nas-tools/app/sites/siteuserinfo/torrent_leech.py | # -*- coding: utf-8 -*-
import re
from lxml import etree
from app.sites.siteuserinfo._base import _ISiteUserInfo, SITE_BASE_ORDER
from app.utils import StringUtils
from app.utils.types import SiteSchema
class TorrentLeechSiteUserInfo(_ISiteUserInfo):
schema = SiteSchema.TorrentLeech
order = SITE_BASE_ORDER + 40
@classmethod
def match(cls, html_text):
return 'TorrentLeech' in html_text
def _parse_site_page(self, html_text):
html_text = self._prepare_html_text(html_text)
user_detail = re.search(r"/profile/([^/]+)/", html_text)
if user_detail and user_detail.group().strip():
self._user_detail_page = user_detail.group().strip().lstrip('/')
self.userid = user_detail.group(1)
self._user_traffic_page = f"profile/{self.userid}/view"
self._torrent_seeding_page = f"profile/{self.userid}/seeding"
def _parse_user_base_info(self, html_text):
self.username = self.userid
def _parse_user_traffic_info(self, html_text):
"""
上传/下载/分享率 [做种数/魔力值]
:param html_text:
:return:
"""
html_text = self._prepare_html_text(html_text)
html = etree.HTML(html_text)
upload_html = html.xpath('//div[contains(@class,"profile-uploaded")]//span/text()')
if upload_html:
self.upload = StringUtils.num_filesize(upload_html[0])
download_html = html.xpath('//div[contains(@class,"profile-downloaded")]//span/text()')
if download_html:
self.download = StringUtils.num_filesize(download_html[0])
ratio_html = html.xpath('//div[contains(@class,"profile-ratio")]//span/text()')
if ratio_html:
self.ratio = StringUtils.str_float(ratio_html[0].replace('∞', '0'))
user_level_html = html.xpath('//table[contains(@class, "profileViewTable")]'
'//tr/td[text()="Class"]/following-sibling::td/text()')
if user_level_html:
self.user_level = user_level_html[0].strip()
join_at_html = html.xpath('//table[contains(@class, "profileViewTable")]'
'//tr/td[text()="Registration date"]/following-sibling::td/text()')
if join_at_html:
self.join_at = StringUtils.unify_datetime_str(join_at_html[0].strip())
bonus_html = html.xpath('//span[contains(@class, "total-TL-points")]/text()')
if bonus_html:
self.bonus = StringUtils.str_float(bonus_html[0].strip())
def _parse_user_detail_info(self, html_text):
pass
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
html = etree.HTML(html_text)
if not html:
return None
size_col = 2
seeders_col = 7
page_seeding = 0
page_seeding_size = 0
page_seeding_info = []
seeding_sizes = html.xpath(f'//tbody/tr/td[{size_col}]')
seeding_seeders = html.xpath(f'//tbody/tr/td[{seeders_col}]/text()')
if seeding_sizes and seeding_seeders:
page_seeding = len(seeding_sizes)
for i in range(0, len(seeding_sizes)):
size = StringUtils.num_filesize(seeding_sizes[i].xpath("string(.)").strip())
seeders = StringUtils.str_int(seeding_seeders[i])
page_seeding_size += size
page_seeding_info.append([seeders, size])
self.seeding += page_seeding
self.seeding_size += page_seeding_size
self.seeding_info.extend(page_seeding_info)
# 是否存在下页数据
next_page = None
return next_page
def _parse_message_unread_links(self, html_text, msg_links):
return None
def _parse_message_content(self, html_text):
return None, None, None
| 4,000 | Python | .py | 86 | 36 | 101 | 0.607602 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,034 | _base.py | demigody_nas-tools/app/sites/siteuserinfo/_base.py | # -*- coding: utf-8 -*-
import base64
import json
import re
from abc import ABCMeta, abstractmethod
from urllib.parse import urljoin, urlsplit
import requests
from lxml import etree
import log
from app.helper import SiteHelper, ChromeHelper
from app.helper.cloudflare_helper import under_challenge
from app.utils import RequestUtils
from app.utils.types import SiteSchema
from config import Config
SITE_BASE_ORDER = 1000
class _ISiteUserInfo(metaclass=ABCMeta):
# 站点模版
schema = SiteSchema.NexusPhp
# 站点解析时判断顺序,值越小越先解析
order = SITE_BASE_ORDER
def __init__(self, site_name, url, site_cookie, index_html, session=None, ua=None, emulate=False, proxy=None):
super().__init__()
# 站点信息
self.site_name = None
self.site_url = None
self.site_favicon = None
# 用户信息
self.username = None
self.userid = None
# 未读消息
self.message_unread = 0
self.message_unread_contents = []
# 流量信息
self.upload = 0
self.download = 0
self.ratio = 0
# 种子信息
self.seeding = 0
self.leeching = 0
self.uploaded = 0
self.completed = 0
self.incomplete = 0
self.seeding_size = 0
self.leeching_size = 0
self.uploaded_size = 0
self.completed_size = 0
self.incomplete_size = 0
# 做种人数, 种子大小
self.seeding_info = []
# 用户详细信息
self.user_level = None
self.join_at = None
self.bonus = 0.0
# 错误信息
self.err_msg = None
# 内部数据
self._base_url = None
self._site_cookie = None
self._index_html = None
self._addition_headers = None
# 站点页面
self._brief_page = "index.php"
self._user_detail_page = "userdetails.php?id="
self._user_traffic_page = "index.php"
self._torrent_seeding_page = "getusertorrentlistajax.php?userid="
self._user_mail_unread_page = "messages.php?action=viewmailbox&box=1&unread=yes"
self._sys_mail_unread_page = "messages.php?action=viewmailbox&box=-2&unread=yes"
self._torrent_seeding_params = None
self._torrent_seeding_headers = None
split_url = urlsplit(url)
self.site_name = site_name
self.site_url = url
self._base_url = f"{split_url.scheme}://{split_url.netloc}"
self._favicon_url = urljoin(self._base_url, "favicon.ico")
self.site_favicon = ""
self._site_cookie = site_cookie
self._index_html = index_html
self._session = session if session else requests.Session()
self._ua = ua
self._emulate = emulate
self._proxy = proxy
def site_schema(self):
"""
站点解析模型
:return: 站点解析模型
"""
return self.schema
@classmethod
def match(cls, html_text):
"""
是否匹配当前解析模型
:param html_text: 站点首页html
:return: 是否匹配
"""
return False
def parse(self):
"""
解析站点信息
:return:
"""
self._parse_favicon(self._index_html)
if not self._parse_logged_in(self._index_html):
return
self._parse_site_page(self._index_html)
self._parse_user_base_info(self._index_html)
self._pase_unread_msgs()
if self._user_traffic_page:
self._parse_user_traffic_info(self._get_page_content(urljoin(self._base_url, self._user_traffic_page)))
if self._user_detail_page:
self._parse_user_detail_info(self._get_page_content(urljoin(self._base_url, self._user_detail_page)))
self._parse_seeding_pages()
self.seeding_info = json.dumps(self.seeding_info)
def _pase_unread_msgs(self):
"""
解析所有未读消息标题和内容
:return:
"""
unread_msg_links = []
if self.message_unread > 0:
links = {self._user_mail_unread_page, self._sys_mail_unread_page}
for link in links:
if not link:
continue
msg_links = []
next_page = self._parse_message_unread_links(
self._get_page_content(urljoin(self._base_url, link)), msg_links)
while next_page:
next_page = self._parse_message_unread_links(
self._get_page_content(urljoin(self._base_url, next_page)), msg_links)
unread_msg_links.extend(msg_links)
for msg_link in unread_msg_links:
log.debug(f"【Sites】{self.site_name} 信息链接 {msg_link}")
head, date, content = self._parse_message_content(self._get_page_content(urljoin(self._base_url, msg_link)))
log.debug(f"【Sites】{self.site_name} 标题 {head} 时间 {date} 内容 {content}")
self.message_unread_contents.append((head, date, content))
def _parse_seeding_pages(self):
if self._torrent_seeding_page:
# 第一页
next_page = self._parse_user_torrent_seeding_info(
self._get_page_content(urljoin(self._base_url, self._torrent_seeding_page),
self._torrent_seeding_params,
self._torrent_seeding_headers))
# 其他页处理
while next_page:
next_page = self._parse_user_torrent_seeding_info(
self._get_page_content(urljoin(urljoin(self._base_url, self._torrent_seeding_page), next_page),
self._torrent_seeding_params,
self._torrent_seeding_headers),
multi_page=True)
@staticmethod
def _prepare_html_text(html_text):
"""
处理掉HTML中的干扰部分
"""
return re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_text))
@abstractmethod
def _parse_message_unread_links(self, html_text, msg_links):
"""
获取未阅读消息链接
:param html_text:
:return:
"""
pass
def _parse_favicon(self, html_text):
"""
解析站点favicon,返回base64 fav图标
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if html:
fav_link = html.xpath('//head/link[contains(@rel, "icon")]/@href')
if fav_link:
self._favicon_url = urljoin(self._base_url, fav_link[0])
res = RequestUtils(headers=self._ua, cookies=self._site_cookie, session=self._session, timeout=60).get_res(
url=self._favicon_url)
if res:
self.site_favicon = base64.b64encode(res.content).decode()
def _get_page_content(self, url, params=None, headers=None):
"""
:param url: 网页地址
:param params: post参数
:param headers: 额外的请求头
:return:
"""
req_headers = None
proxies = Config().get_proxies() if self._proxy else None
if self._ua or headers or self._addition_headers:
req_headers = {}
if headers:
req_headers.update(headers)
if isinstance(self._ua, str):
req_headers.update({
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": f"{self._ua}"
})
else:
req_headers.update(self._ua)
if self._addition_headers:
req_headers.update(self._addition_headers)
if params:
res = RequestUtils(headers=req_headers, cookies=self._site_cookie, proxies=proxies, session=self._session,
timeout=60).post_res(url=url, data=params)
else:
res = RequestUtils(headers=req_headers, cookies=self._site_cookie, proxies=proxies, session=self._session,
timeout=60).get_res(url=url)
if res is not None and res.status_code in (200, 500, 403):
# 如果cloudflare 有防护,尝试使用浏览器仿真
if under_challenge(res.text):
log.debug(f"【Sites】{self.site_name} 检测到Cloudflare,需要浏览器仿真")
chrome = ChromeHelper()
if self._emulate and chrome.get_status():
if not chrome.visit(url=url, ua=self._ua, cookie=self._site_cookie, proxy=self._proxy):
log.error(f"【Sites】{self.site_name} 无法打开网站")
return ""
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
log.error(f"【Sites】{self.site_name} 跳转站点失败")
return ""
return chrome.get_html()
else:
log.warn(
f"【Sites】{self.site_name} 检测到Cloudflare,需要浏览器仿真,但是浏览器不可用或者未开启浏览器仿真")
return ""
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
return res.text
return ""
@abstractmethod
def _parse_site_page(self, html_text):
"""
解析站点相关信息页面
:param html_text:
:return:
"""
pass
@abstractmethod
def _parse_user_base_info(self, html_text):
"""
解析用户基础信息
:param html_text:
:return:
"""
pass
def _parse_logged_in(self, html_text):
"""
解析用户是否已经登陆
:param html_text:
:return: True/False
"""
logged_in = SiteHelper.is_logged_in(html_text)
if not logged_in:
self.err_msg = "未检测到已登陆,请检查cookies是否过期"
log.warn(f"【Sites】{self.site_name} 未登录,跳过后续操作")
return logged_in
@abstractmethod
def _parse_user_traffic_info(self, html_text):
"""
解析用户的上传,下载,分享率等信息
:param html_text:
:return:
"""
pass
@abstractmethod
def _parse_user_torrent_seeding_info(self, html_text, multi_page=False):
"""
解析用户的做种相关信息
:param html_text:
:param multi_page: 是否多页数据
:return: 下页地址
"""
pass
@abstractmethod
def _parse_user_detail_info(self, html_text):
"""
解析用户的详细信息
加入时间/等级/魔力值等
:param html_text:
:return:
"""
pass
@abstractmethod
def _parse_message_content(self, html_text):
"""
解析短消息内容
:param html_text:
:return: head: message, date: time, content: message content
"""
pass
| 11,369 | Python | .py | 292 | 25.59589 | 120 | 0.55535 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,035 | media_server.py | demigody_nas-tools/app/mediaserver/media_server.py | import json
import threading
import log
from app.conf import SystemConfig
from app.db import MediaDb
from app.helper import ProgressHelper, SubmoduleHelper
from app.media import Media
from app.message import Message
from app.utils import ExceptionUtils
from app.utils.commons import singleton
from app.utils.types import MediaServerType, MovieTypes, SystemConfigKey, ProgressKey
from config import Config
lock = threading.Lock()
server_lock = threading.Lock()
@singleton
class MediaServer:
_mediaserver_schemas = []
_server_type = None
_server = None
mediadb = None
progress = None
message = None
media = None
systemconfig = None
def __init__(self):
self._mediaserver_schemas = SubmoduleHelper.import_submodules(
'app.mediaserver.client',
filter_func=lambda _, obj: hasattr(obj, 'client_id')
)
log.debug(f"【MediaServer】加载媒体服务器:{self._mediaserver_schemas}")
self.init_config()
def init_config(self):
self.mediadb = MediaDb()
self.message = Message()
self.progress = ProgressHelper()
self.media = Media()
self.systemconfig = SystemConfig()
# 当前使用的媒体库服务器
self._server_type = Config().get_config('media').get('media_server') or 'emby'
self._server = None
def __build_class(self, ctype, conf):
for mediaserver_schema in self._mediaserver_schemas:
try:
if mediaserver_schema.match(ctype):
return mediaserver_schema(conf)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
@property
def server(self):
with server_lock:
if not self._server:
self._server = self.__get_server(self._server_type)
return self._server
def __get_server(self, ctype: [MediaServerType, str], conf=None):
return self.__build_class(ctype=ctype, conf=conf)
def get_type(self):
"""
当前使用的媒体库服务器
"""
return self.server.get_type()
def get_activity_log(self, limit):
"""
获取媒体服务器的活动日志
:param limit: 条数限制
"""
if not self.server:
return []
return self.server.get_activity_log(limit)
def get_user_count(self):
"""
获取媒体服务器的总用户数
"""
if not self.server:
return 0
return self.server.get_user_count()
def get_medias_count(self):
"""
获取媒体服务器各类型的媒体库
:return: MovieCount SeriesCount SongCount
"""
if not self.server:
return None
return self.server.get_medias_count()
def refresh_root_library(self):
"""
刷新媒体服务器整个媒体库
"""
if not self.server:
return
return self.server.refresh_root_library()
def get_episode_image_by_id(self, item_id, season_id, episode_id):
"""
根据itemid、season_id、episode_id从Emby查询图片地址
:param item_id: 在Emby中的ID
:param season_id: 季
:param episode_id: 集
:return: 图片对应在TMDB中的URL
"""
if not self.server:
return None
if not item_id or not season_id or not episode_id:
return None
return self.server.get_episode_image_by_id(item_id, season_id, episode_id)
def get_remote_image_by_id(self, item_id, image_type):
"""
根据ItemId从媒体服务器查询图片地址
:param item_id: 在Emby中的ID
:param image_type: 图片的类弄地,poster或者backdrop等
:return: 图片对应在TMDB中的URL
"""
if not self.server:
return None
if not item_id:
return None
return self.server.get_remote_image_by_id(item_id, image_type)
def get_local_image_by_id(self, item_id):
"""
根据ItemId从媒体服务器查询图片地址
:param item_id: 在Emby中的ID
"""
if not self.server:
return None
if not item_id:
return None
return self.server.get_local_image_by_id(item_id)
def get_no_exists_episodes(self, meta_info,
season_number,
episode_count):
"""
根据标题、年份、季、总集数,查询媒体服务器中缺少哪几集
:param meta_info: 已识别的需要查询的媒体信息
:param season_number: 季号,数字
:param episode_count: 该季的总集数
:return: 该季不存在的集号列表
"""
if not self.server:
return None
return self.server.get_no_exists_episodes(meta_info,
season_number,
episode_count)
def get_movies(self, title, year=None):
"""
根据标题和年份,检查电影是否在媒体服务器中存在,存在则返回列表
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:return: 含title、year属性的字典列表
"""
if not self.server:
return None
return self.server.get_movies(title, year)
def refresh_library_by_items(self, items):
"""
按类型、名称、年份来刷新媒体库
:param items: 已识别的需要刷新媒体库的媒体信息列表
"""
if not self.server:
return
return self.server.refresh_library_by_items(items)
def get_libraries(self):
"""
获取媒体服务器所有媒体库列表
"""
if not self.server:
return []
return self.server.get_libraries()
def get_items(self, parent):
"""
获取媒体库中的所有媒体
:param parent: 上一级的ID
"""
if not self.server:
return []
return self.server.get_items(parent)
def get_play_url(self, item_id):
"""
获取媒体库中的所有媒体
:param item_id: 媒体的id
"""
if not self.server:
return None
return self.server.get_play_url(item_id)
def get_tv_episodes(self, item_id):
"""
获取电视剧的所有集数信息
:param item_id: 电视剧的ID
"""
if not self.server:
return []
return self.server.get_tv_episodes(item_id=item_id)
def sync_mediaserver(self):
"""
同步媒体库所有数据到本地数据库
"""
if not self.server:
return
with lock:
# 开始进度条
log.info("【MediaServer】开始同步媒体库数据...")
self.progress.start(ProgressKey.MediaSync)
self.progress.update(ptype=ProgressKey.MediaSync, text="请稍候...")
# 获取需同步的媒体库
librarys = self.systemconfig.get(SystemConfigKey.SyncLibrary) or []
# 汇总统计
medias_count = self.get_medias_count()
total_media_count = medias_count.get("MovieCount") + medias_count.get("SeriesCount")
total_count = 0
movie_count = 0
tv_count = 0
# 清空登记薄
self.mediadb.empty(server_type=self._server_type)
for library in self.get_libraries():
if str(library.get("id")) not in librarys:
continue
# 获取媒体库所有项目
self.progress.update(ptype=ProgressKey.MediaSync,
text="正在获取 %s 数据..." % (library.get("name")))
for item in self.get_items(library.get("id")):
if not item:
continue
# 更新进度
seasoninfo = []
total_count += 1
if item.get("type") in ['Movie', 'movie']:
movie_count += 1
elif item.get("type") in ['Series', 'show']:
tv_count += 1
# 查询剧集信息
seasoninfo = self.get_tv_episodes(item.get("id"))
self.progress.update(ptype=ProgressKey.MediaSync,
text="正在同步 %s,已完成:%s / %s ..." % (
library.get("name"), total_count, total_media_count),
value=round(100 * total_count / total_media_count, 1))
# 插入数据
self.mediadb.insert(server_type=self._server_type,
iteminfo=item,
seasoninfo=seasoninfo)
# 更新总体同步情况
self.mediadb.statistics(server_type=self._server_type,
total_count=total_count,
movie_count=movie_count,
tv_count=tv_count)
# 结束进度条
self.progress.update(ptype=ProgressKey.MediaSync,
value=100,
text="媒体库数据同步完成,同步数量:%s" % total_count)
self.progress.end(ProgressKey.MediaSync)
log.info("【MediaServer】媒体库数据同步完成,同步数量:%s" % total_count)
def check_item_exists(self,
mtype,
title=None,
year=None,
tmdbid=None,
season=None,
episode=None):
"""
检查媒体库是否已存在某项目,非实时同步数据,仅用于展示
:param mtype: 媒体类型
:param title: 标题
:param year: 年份
:param tmdbid: TMDB ID
:param season: 季号
:param episode: 集号
:return: 媒体服务器中的ITEMID
"""
media = self.mediadb.query(server_type=self._server_type,
title=title,
year=year,
tmdbid=tmdbid)
if not media:
return None
# 剧集没有季时默认为第1季
if mtype not in MovieTypes:
if not season:
season = 1
if season:
# 匹配剧集是否存在
seasoninfos = json.loads(media.JSON or "[]")
for seasoninfo in seasoninfos:
if seasoninfo.get("season_num") == int(season):
if not episode:
return media.ITEM_ID
elif seasoninfo.get("episode_num") == int(episode):
return media.ITEM_ID
return None
else:
return media.ITEM_ID
def get_mediasync_status(self):
"""
获取当前媒体库同步状态
"""
status = self.mediadb.get_statistics(server_type=self._server_type)
if not status:
return {}
else:
return {"movie_count": status.MOVIE_COUNT, "tv_count": status.TV_COUNT, "time": status.UPDATE_TIME}
def get_iteminfo(self, itemid):
"""
根据ItemId从媒体服务器查询项目详情
:param itemid: 在Emby中的ID
:return: 图片对应在TMDB中的URL
"""
if not self.server:
return None
if not itemid:
return None
return self.server.get_iteminfo(itemid)
def get_playing_sessions(self):
"""
获取正在播放的会话
"""
if not self.server:
return None
return self.server.get_playing_sessions()
def webhook_message_handler(self, message: str, channel: MediaServerType):
"""
处理Webhook消息
"""
if not self.server:
return
if channel != self.server.get_type():
return
event_info = None
try:
event_info = self.server.get_webhook_message(message)
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【MediaServer】webhook 消息解析异常")
if event_info:
# 获取消息图片
if event_info.get("item_type") == "TV":
# 根据返回的item_id、season_id、episode_id去调用媒体服务器获取
image_url = self.get_episode_image_by_id(item_id=event_info.get('item_id'),
season_id=event_info.get('season_id'),
episode_id=event_info.get('episode_id'))
elif event_info.get("item_type") in ["MOV", "SHOW"]:
# 根据返回的item_id去调用媒体服务器获取
image_url = self.get_remote_image_by_id(item_id=event_info.get('item_id'),
image_type="Backdrop")
elif event_info.get("item_type") == "AUD":
image_url = self.get_local_image_by_id(item_id=event_info.get('item_id'))
else:
image_url = None
self.message.send_mediaserver_message(event_info=event_info,
channel=channel.value,
image_url=image_url)
def get_resume(self, num=12):
"""
获取继续观看
"""
if not self.server:
return []
return self.server.get_resume(num=num)
def get_latest(self, num=20):
"""
获取最近添加
"""
if not self.server:
return []
return self.server.get_latest(num=num)
| 14,273 | Python | .py | 360 | 23.236111 | 111 | 0.522281 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,036 | jellyfin.py | demigody_nas-tools/app/mediaserver/client/jellyfin.py | import re
from urllib.parse import quote
import log
from app.mediaserver.client._base import _IMediaClient
from app.utils import RequestUtils, SystemUtils, ExceptionUtils, IpUtils
from app.utils.types import MediaServerType, MediaType
from config import Config
class Jellyfin(_IMediaClient):
# 媒体服务器ID
client_id = "jellyfin"
# 媒体服务器类型
client_type = MediaServerType.JELLYFIN
# 媒体服务器名称
client_name = MediaServerType.JELLYFIN.value
# 私有属性
_client_config = {}
_serverid = None
_apikey = None
_host = None
_play_host = None
_user = None
def __init__(self, config=None):
if config:
self._client_config = config
else:
self._client_config = Config().get_config('jellyfin')
self.init_config()
def init_config(self):
if self._client_config:
self._host = self._client_config.get('host')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._play_host = self._client_config.get('play_host')
if not self._play_host:
self._play_host = self._host
else:
if not self._play_host.startswith('http'):
self._play_host = "http://" + self._play_host
if not self._play_host.endswith('/'):
self._play_host = self._play_host + "/"
self._apikey = self._client_config.get('api_key')
if self._host and self._apikey:
self._user = self.get_user(Config().current_user)
self._serverid = self.get_server_id()
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def get_status(self):
"""
测试连通性
"""
return True if self.get_medias_count() else False
def __get_jellyfin_librarys(self):
"""
获取Jellyfin媒体库的信息
"""
if not self._host or not self._apikey:
return []
req_url = f"{self._host}Users/{self._user}/Views?api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json().get("Items")
else:
log.error(f"【{self.client_name}】Users/Views 未获取到返回数据")
return []
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Views 出错:" + str(e))
return []
def get_user_count(self):
"""
获得用户数量
"""
if not self._host or not self._apikey:
return 0
req_url = "%sUsers?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return len(res.json())
else:
log.error(f"【{self.client_name}】Users 未获取到返回数据")
return 0
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users出错:" + str(e))
return 0
def get_user(self, user_name=None):
"""
获得管理员用户
"""
if not self._host or not self._apikey:
return None
req_url = "%sUsers?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
users = res.json()
# 先查询是否有与当前用户名称匹配的
if user_name:
for user in users:
if user.get("Name") == user_name:
return user.get("Id")
# 查询管理员
for user in users:
if user.get("Policy", {}).get("IsAdministrator"):
return user.get("Id")
else:
log.error(f"【{self.client_name}】Users 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users出错:" + str(e))
return None
def get_server_id(self):
"""
获得服务器信息
"""
if not self._host or not self._apikey:
return None
req_url = "%sSystem/Info?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json().get("Id")
else:
log.error(f"【{self.client_name}】System/Info 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接System/Info出错:" + str(e))
return None
def get_activity_log(self, num):
"""
获取Jellyfin活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%sSystem/ActivityLog/Entries?api_key=%s&Limit=%s" % (self._host, self._apikey, num)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "SessionStarted":
event_type = "LG"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str,
"date": SystemUtils.get_local_time(event_date)}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
activity = {"type": event_type, "event": item.get("Name"),
"date": SystemUtils.get_local_time(event_date)}
ret_array.append(activity)
else:
log.error(f"【{self.client_name}】System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接System/ActivityLog/Entries出错:" + str(e))
return []
return ret_array
def get_medias_count(self):
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return None
req_url = "%sItems/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
else:
log.error(f"【{self.client_name}】Items/Counts 未获取到返回数据")
return {}
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items/Counts出错:" + str(e))
return {}
def __get_jellyfin_series_id_by_name(self, name, year):
"""
根据名称查询Jellyfin中剧集的SeriesId
"""
if not self._host or not self._apikey or not self._user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true" % (
self._host, self._user, self._apikey, name)
try:
res = RequestUtils().get_res(req_url)
if res:
res_items = res.json().get("Items")
if res_items:
for res_item in res_items:
if res_item.get('Name') == name and (
not year or str(res_item.get('ProductionYear')) == str(year)):
return res_item.get('Id')
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items出错:" + str(e))
return None
return ""
def get_movies(self, title, year=None):
"""
根据标题和年份,检查电影是否在Jellyfin中存在,存在则返回列表
:param title: 标题
:param year: 年份,为空则不过滤
:return: 含title、year属性的字典列表
"""
if not self._host or not self._apikey or not self._user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true" % (
self._host, self._user, self._apikey, title)
try:
res = RequestUtils().get_res(req_url)
if res:
res_items = res.json().get("Items")
if res_items:
ret_movies = []
for res_item in res_items:
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
return ret_movies
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items出错:" + str(e))
return None
return []
def get_tv_episodes(self,
item_id=None,
title=None,
year=None,
tmdb_id=None,
season=None):
"""
根据标题和年份和季,返回Jellyfin中的剧集列表
:param item_id: Jellyfin中的剧集ID
:param title: 标题
:param year: 年份
:param tmdb_id: TMDBID
:param season: 季
:return: 集号的列表
"""
if not self._host or not self._apikey or not self._user:
return None
if not item_id:
# 查TVID
item_id = self.__get_jellyfin_series_id_by_name(title, year)
if item_id is None:
return None
if not item_id:
return []
# 验证tmdbid是否相同
item_tmdbid = self.get_iteminfo(item_id).get("ProviderIds", {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return []
if not season:
season = ""
req_url = "%sShows/%s/Episodes?season=%s&&userId=%s&isMissing=false&api_key=%s" % (
self._host, item_id, season, self._user, self._apikey)
try:
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
exists_episodes = []
for res_item in res_items:
exists_episodes.append({
"season_num": res_item.get("ParentIndexNumber") or 0,
"episode_num": res_item.get("IndexNumber") or 0
})
return exists_episodes
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Shows/Id/Episodes出错:" + str(e))
return None
return []
def get_no_exists_episodes(self, meta_info, season, total_num):
"""
根据标题、年份、季、总集数,查询Jellyfin中缺少哪几集
:param meta_info: 已识别的需要查询的媒体信息
:param season: 季号,数字
:param total_num: 该季的总集数
:return: 该季不存在的集号列表
"""
if not self._host or not self._apikey:
return None
# 没有季默认为和1季
if not season:
season = 1
exists_episodes = self.get_tv_episodes(title=meta_info.title,
year=meta_info.year,
tmdb_id=meta_info.tmdb_id,
season=season)
if not isinstance(exists_episodes, list):
return None
exists_episodes = [episode.get("episode_num") for episode in exists_episodes]
total_episodes = [episode for episode in range(1, total_num + 1)]
return list(set(total_episodes).difference(set(exists_episodes)))
def get_episode_image_by_id(self, item_id, season_id, episode_id):
"""
根据itemid、season_id、episode_id从Emby查询图片地址
:param item_id: 在Emby中的ID
:param season_id: 季
:param episode_id: 集
:return: 图片对应在TMDB中的URL
"""
if not self._host or not self._apikey or not self._user:
return None
# 查询所有剧集
req_url = "%sShows/%s/Episodes?season=%s&&userId=%s&isMissing=false&api_key=%s" % (
self._host, item_id, season_id, self._user, self._apikey)
try:
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
for res_item in res_items:
# 查询当前剧集的itemid
if res_item.get("IndexNumber") == episode_id:
# 查询当前剧集的图片
img_url = self.get_remote_image_by_id(res_item.get("Id"), "Primary")
# 没查到tmdb图片则判断播放地址是不是外网,使用jellyfin刮削的图片(直接挂载网盘场景)
if not img_url and not IpUtils.is_internal(self._play_host) \
and res_item.get('ImageTags', {}).get('Primary'):
return "%sItems/%s/Images/Primary?maxHeight=225&maxWidth=400&tag=%s&quality=90" % (
self._play_host, res_item.get("Id"), res_item.get('ImageTags', {}).get('Primary'))
return img_url
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Shows/Id/Episodes出错:" + str(e))
return None
def get_remote_image_by_id(self, item_id, image_type):
"""
根据ItemId从Jellyfin查询TMDB图片地址
:param item_id: 在Emby中的ID
:param image_type: 图片的类弄地,poster或者backdrop等
:return: 图片对应在TMDB中的URL
"""
if not self._host or not self._apikey:
return None
req_url = "%sItems/%s/RemoteImages?api_key=%s" % (self._host, item_id, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
images = res.json().get("Images")
for image in images:
if image.get("ProviderName") == "TheMovieDb" and image.get("Type") == image_type:
return image.get("Url")
else:
log.error(f"【{self.client_name}】Items/RemoteImages 未获取到返回数据")
return None
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items/Id/RemoteImages出错:" + str(e))
return None
return None
def get_local_image_by_id(self, item_id, remote=True, inner=False):
"""
根据ItemId从媒体服务器查询有声书图片地址
:param: item_id: 在Emby中的ID
:param: remote 是否远程使用,TG微信等客户端调用应为True
:param: inner 是否NT内部调用,为True是会使用NT中转
"""
if not self._host or not self._apikey:
return None
if not remote:
image_url = "%sItems/%s/Images/Primary" % (self._host, item_id)
if inner:
return self.get_nt_image_url(image_url)
return image_url
else:
host = self._play_host or self._host
image_url = "%sItems/%s/Images/Primary" % (host, item_id)
if IpUtils.is_internal(host):
return self.get_nt_image_url(url=image_url, remote=True)
return image_url
def refresh_root_library(self):
"""
通知Jellyfin刷新整个媒体库
"""
if not self._host or not self._apikey:
return False
req_url = "%sLibrary/Refresh?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().post_res(req_url)
if res:
return True
else:
log.info(f"【{self.client_name}】刷新媒体库失败,无法连接Jellyfin!")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Library/Refresh出错:" + str(e))
return False
def refresh_library_by_items(self, items):
"""
按类型、名称、年份来刷新媒体库,Jellyfin没有刷单个项目的API,这里直接刷新整个库
:param items: 已识别的需要刷新媒体库的媒体信息列表
"""
# 没找到单项目刷新的对应的API,先按全库刷新
if not items:
return False
if not self._host or not self._apikey:
return False
return self.refresh_root_library()
def get_libraries(self):
"""
获取媒体服务器所有媒体库列表
"""
if not self._host or not self._apikey:
return []
libraries = []
for library in self.__get_jellyfin_librarys() or []:
match library.get("CollectionType"):
case "movies":
library_type = MediaType.MOVIE.value
case "tvshows":
library_type = MediaType.TV.value
case _:
continue
image = self.get_local_image_by_id(library.get("Id"), remote=False, inner=True)
link = f"{self._play_host or self._host}web/index.html#!" \
f"/movies.html?topParentId={library.get('Id')}" \
if library_type == MediaType.MOVIE.value \
else f"{self._play_host or self._host}web/index.html#!" \
f"/tv.html?topParentId={library.get('Id')}"
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type,
"image": image,
"link": link
})
return libraries
def __get_backdrop_url(self, item_id, image_tag, remote=True, inner=False):
"""
获取Backdrop图片地址
:param: item_id: 在Emby中的ID
:param: image_tag: 图片的tag
:param: remote 是否远程使用,TG微信等客户端调用应为True
:param: inner 是否NT内部调用,为True是会使用NT中转
"""
if not self._host or not self._apikey:
return ""
if not image_tag or not item_id:
return ""
if not remote:
image_url = f"{self._host}Items/{item_id}/" \
f"Images/Backdrop?tag={image_tag}&fillWidth=666&api_key={self._apikey}"
if inner:
return self.get_nt_image_url(image_url)
return image_url
else:
host = self._play_host or self._host
image_url = f"{host}Items/{item_id}/" \
f"Images/Backdrop?tag={image_tag}&fillWidth=666&api_key={self._apikey}"
if IpUtils.is_internal(host):
return self.get_nt_image_url(url=image_url, remote=True)
return image_url
def get_iteminfo(self, itemid):
"""
获取单个项目详情
"""
if not itemid:
return {}
if not self._host or not self._apikey:
return {}
req_url = "%sUsers/%s/Items/%s?api_key=%s" % (
self._host, self._user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {}
def get_items(self, parent):
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
if not self._host or not self._apikey:
yield {}
req_url = "%sUsers/%s/Items?parentId=%s&api_key=%s" % (self._host, self._user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
results = res.json().get("Items") or []
for result in results:
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"originalTitle": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
elif "Folder" in result.get("Type"):
for item in self.get_items(result.get("Id")):
yield item
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items出错:" + str(e))
yield {}
def get_play_url(self, item_id):
"""
拼装媒体播放链接
:param item_id: 媒体的的ID
"""
return f"{self._play_host or self._host}web/index.html#!/details?id={item_id}&serverId={self._serverid}"
def get_playing_sessions(self):
"""
获取正在播放的会话
"""
if not self._host or not self._apikey:
return []
playing_sessions = []
req_url = "%sSessions?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
return playing_sessions
except Exception as e:
ExceptionUtils.exception_traceback(e)
return []
def get_webhook_message(self, message):
"""
解析Jellyfin报文
"""
eventItem = {'event': message.get('NotificationType', ''),
'item_name': message.get('Name'),
'user_name': message.get('NotificationUsername'),
'play_url': f"/open?url={quote(self.get_play_url(message.get('Id')))}&type=jellyfin"
}
return eventItem
def get_resume(self, num=12):
"""
获得继续观看
"""
if not self._host or not self._apikey:
return None
req_url = f"{self._host}Users/{self._user}/Items/Resume?Limit={num}&MediaTypes=Video&api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
result = res.json().get("Items") or []
ret_resume = []
for item in result:
if item.get("Type") not in ["Movie", "Episode"]:
continue
item_type = MediaType.MOVIE.value if item.get("Type") == "Movie" else MediaType.TV.value
link = self.get_play_url(item.get("Id"))
if item.get("BackdropImageTags"):
image = self.__get_backdrop_url(item_id=item.get("Id"),
image_tag=item.get("BackdropImageTags")[0],
remote=False,
inner=True)
else:
image = self.get_local_image_by_id(item.get("Id"), remote=False, inner=True)
if item_type == MediaType.MOVIE.value:
title = item.get("Name")
else:
if item.get("ParentIndexNumber") == 1:
title = f'{item.get("SeriesName")} 第{item.get("IndexNumber")}集'
else:
title = f'{item.get("SeriesName")} 第{item.get("ParentIndexNumber")}季第{item.get("IndexNumber")}集'
ret_resume.append({
"id": item.get("Id"),
"name": title,
"type": item_type,
"image": image,
"link": link,
"percent": item.get("UserData", {}).get("PlayedPercentage")
})
return ret_resume
else:
log.error(f"【{self.client_name}】Users/Items/Resume 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items/Resume出错:" + str(e))
return []
def get_latest(self, num=20):
"""
获得最近更新
"""
if not self._host or not self._apikey:
return None
req_url = f"{self._host}Users/{self._user}/Items/Latest?Limit={num}&MediaTypes=Video&api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
result = res.json() or []
ret_latest = []
for item in result:
if item.get("Type") not in ["Movie", "Series"]:
continue
item_type = MediaType.MOVIE.value if item.get("Type") == "Movie" else MediaType.TV.value
link = self.get_play_url(item.get("Id"))
image = self.get_local_image_by_id(item_id=item.get("Id"), remote=False, inner=True)
ret_latest.append({
"id": item.get("Id"),
"name": item.get("Name"),
"type": item_type,
"image": image,
"link": link
})
return ret_latest
else:
log.error(f"【{self.client_name}】Users/Items/Latest 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items/Latest出错:" + str(e))
return []
| 28,389 | Python | .py | 636 | 27.971698 | 124 | 0.504297 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,037 | plex.py | demigody_nas-tools/app/mediaserver/client/plex.py | import os
from urllib.parse import quote
from functools import lru_cache
from urllib.parse import quote_plus
import log
from app.mediaserver.client._base import _IMediaClient
from app.utils import ExceptionUtils
from app.utils.types import MediaServerType, MediaType
from config import Config
from plexapi import media
from plexapi.myplex import MyPlexAccount
from plexapi.server import PlexServer
class Plex(_IMediaClient):
# 媒体服务器ID
client_id = "plex"
# 媒体服务器类型
client_type = MediaServerType.PLEX
# 媒体服务器名称
client_name = MediaServerType.PLEX.value
# 私有属性
_client_config = {}
_host = None
_token = None
_username = None
_password = None
_servername = None
_plex = None
_play_host = None
_libraries = []
def __init__(self, config=None):
if config:
self._client_config = config
else:
self._client_config = Config().get_config('plex')
self.init_config()
def init_config(self):
if self._client_config:
self._host = self._client_config.get('host')
self._token = self._client_config.get('token')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._play_host = self._client_config.get('play_host')
if not self._play_host:
self._play_host = self._host
else:
if not self._play_host.startswith('http'):
self._play_host = "http://" + self._play_host
if not self._play_host.endswith('/'):
self._play_host = self._play_host + "/"
if "app.plex.tv" in self._play_host:
self._play_host = self._play_host + "desktop/"
else:
self._play_host = self._play_host + "web/index.html"
self._username = self._client_config.get('username')
self._password = self._client_config.get('password')
self._servername = self._client_config.get('servername')
if self._host and self._token:
try:
self._plex = PlexServer(self._host, self._token)
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._plex = None
log.error(f"【{self.client_name}】Plex服务器连接失败:{str(e)}")
elif self._username and self._password and self._servername:
try:
self._plex = MyPlexAccount(self._username, self._password).resource(self._servername).connect()
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._plex = None
log.error(f"【{self.client_name}】Plex服务器连接失败:{str(e)}")
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def get_status(self):
"""
测试连通性
"""
return True if self._plex else False
@staticmethod
def get_user_count(**kwargs):
"""
获得用户数量,Plex只能配置一个用户,固定返回1
"""
return 1
def get_activity_log(self, num):
"""
获取Plex活动记录
"""
if not self._plex:
return []
ret_array = []
try:
# type的含义: 1 电影 4 剧集单集 详见 plexapi/utils.py中SEARCHTYPES的定义
# 根据最后播放时间倒序获取数据
historys = self._plex.library.search(sort='lastViewedAt:desc', limit=num, type='1,4')
for his in historys:
# 过滤掉最后播放时间为空的
if his.lastViewedAt:
if his.type == "episode":
event_title = "%s %s%s %s" % (
his.grandparentTitle,
"S" + str(his.parentIndex),
"E" + str(his.index),
his.title
)
event_str = "开始播放剧集 %s" % event_title
else:
event_title = "%s %s" % (
his.title, "(" + str(his.year) + ")")
event_str = "开始播放电影 %s" % event_title
event_type = "PL"
event_date = his.lastViewedAt.strftime('%Y-%m-%d %H:%M:%S')
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接System/ActivityLog/Entries出错:" + str(e))
return []
if ret_array:
ret_array = sorted(ret_array, key=lambda x: x['date'], reverse=True)
return ret_array
def get_medias_count(self):
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._plex:
return {}
sections = self._plex.library.sections()
MovieCount = SeriesCount = SongCount = EpisodeCount = 0
for sec in sections:
if sec.type == "movie":
MovieCount += sec.totalSize
if sec.type == "show":
SeriesCount += sec.totalSize
EpisodeCount += sec.totalViewSize(libtype='episode')
if sec.type == "artist":
SongCount += sec.totalSize
return {
"MovieCount": MovieCount,
"SeriesCount": SeriesCount,
"SongCount": SongCount,
"EpisodeCount": EpisodeCount
}
def get_movies(self, title, year=None):
"""
根据标题和年份,检查电影是否在Plex中存在,存在则返回列表
:param title: 标题
:param year: 年份,为空则不过滤
:return: 含title、year属性的字典列表
"""
if not self._plex:
return None
ret_movies = []
if year:
movies = self._plex.library.search(title=title, year=year, libtype="movie")
else:
movies = self._plex.library.search(title=title, libtype="movie")
for movie in movies:
ret_movies.append({'title': movie.title, 'year': movie.year})
return ret_movies
def get_tv_episodes(self,
item_id=None,
title=None,
year=None,
tmdbid=None,
season=None):
"""
根据标题、年份、季查询电视剧所有集信息
:param item_id: Plex中的ID
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:param tmdbid: TMDBID
:param season: 季号,数字
:return: 所有集的列表
"""
if not self._plex:
return []
if not item_id:
videos = self._plex.library.search(title=title, year=year, libtype="show")
if not videos:
return []
episodes = videos[0].episodes()
else:
episodes = self._plex.fetchItem(item_id).episodes()
ret_tvs = []
for episode in episodes:
if season and episode.seasonNumber != int(season):
continue
ret_tvs.append({
"season_num": episode.seasonNumber,
"episode_num": episode.index
})
return ret_tvs
def get_no_exists_episodes(self, meta_info, season, total_num):
"""
根据标题、年份、季、总集数,查询Plex中缺少哪几集
:param meta_info: 已识别的需要查询的媒体信息
:param season: 季号,数字
:param total_num: 该季的总集数
:return: 该季不存在的集号列表
"""
if not self._plex:
return None
# 没有季默认为和1季
if not season:
season = 1
episodes = self.get_tv_episodes(title=meta_info.title,
year=meta_info.year,
season=season)
exists_episodes = [episode['episode_num'] for episode in episodes]
total_episodes = [episode for episode in range(1, total_num + 1)]
return list(set(total_episodes).difference(set(exists_episodes)))
def get_episode_image_by_id(self, item_id, season_id, episode_id):
"""
根据itemid、season_id、episode_id从Plex查询图片地址
:param item_id: 在Plex中具体的一集的ID
:param season_id: 季,目前没有使用
:param episode_id: 集,目前没有使用
:return: 图片对应在TMDB中的URL
"""
if not self._plex:
return None
try:
images = self._plex.fetchItems('/library/metadata/%s/posters' % item_id, cls=media.Poster)
for image in images:
if hasattr(image, 'key') and image.key.startswith('http'):
return image.key
return None
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】获取剧集封面出错:" + str(e))
return None
def get_remote_image_by_id(self, item_id, image_type):
"""
根据ItemId从Plex查询图片地址
:param item_id: 在Emby中的ID
:param image_type: 图片的类型,Poster或者Backdrop等
:return: 图片对应在TMDB中的URL
"""
if not self._plex:
return None
try:
if image_type == "Poster":
images = self._plex.fetchItems('/library/metadata/%s/posters' % item_id, cls=media.Poster)
else:
images = self._plex.fetchItems('/library/metadata/%s/arts' % item_id, cls=media.Art)
for image in images:
if hasattr(image, 'key') and image.key.startswith('http'):
return image.key
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】获取封面出错:" + str(e))
return None
def get_local_image_by_id(self, item_id, remote=True):
"""
根据ItemId从媒体服务器查询有声书图片地址
:param item_id: 在Emby中的ID
:param remote: 是否远程使用
"""
return None
def refresh_root_library(self):
"""
通知Plex刷新整个媒体库
"""
if not self._plex:
return False
return self._plex.library.update()
def refresh_library_by_items(self, items):
"""
按路径刷新媒体库
"""
if not self._plex:
return False
# _libraries可能未初始化,初始化一下
if not self._libraries:
try:
self._libraries = self._plex.library.sections()
except Exception as err:
ExceptionUtils.exception_traceback(err)
result_dict = {}
for item in items:
file_path = item.get("file_path")
lib_key, path = self.__find_librarie(file_path, self._libraries)
# 如果存在同一剧集的多集,key(path)相同会合并
result_dict[path] = lib_key
if "" in result_dict:
# 如果有匹配失败的,刷新整个库
self._plex.library.update()
else:
# 否则一个一个刷新
for path, lib_key in result_dict.items():
log.info(f"【{self.client_name}】刷新媒体库:{lib_key} : {path}")
self._plex.query(f'/library/sections/{lib_key}/refresh?path={quote_plus(path)}')
@staticmethod
def __find_librarie(path, libraries):
"""
判断这个path属于哪个媒体库
多个媒体库配置的目录不应有重复和嵌套,
使用os.path.commonprefix([path, location]) == location应该没问题
"""
if path is None:
return "", ""
# 只要路径,不要文件名
dir_path = os.path.dirname(path)
try:
for lib in libraries:
if hasattr(lib, "locations") and lib.locations:
for location in lib.locations:
if os.path.commonprefix([dir_path, location]) == location:
return lib.key, dir_path
except Exception as err:
ExceptionUtils.exception_traceback(err)
return "", ""
def get_libraries(self):
"""
获取媒体服务器所有媒体库列表
"""
if not self._plex:
return []
try:
self._libraries = self._plex.library.sections()
except Exception as err:
ExceptionUtils.exception_traceback(err)
return []
libraries = []
for library in self._libraries:
match library.type:
case "movie":
library_type = MediaType.MOVIE.value
image_list_str = self.get_libraries_image(library.key, 1)
case "show":
library_type = MediaType.TV.value
image_list_str = self.get_libraries_image(library.key, 2)
case _:
continue
libraries.append({
"id": library.key,
"name": library.title,
"paths": library.locations,
"type": library_type,
"image_list": image_list_str,
"link": f"{self._play_host or self._host}#!/media/{self._plex.machineIdentifier}"
f"/com.plexapp.plugins.library?source={library.key}"
})
return libraries
@lru_cache(maxsize=10)
def get_libraries_image(self, library_key, type):
"""
获取媒体服务器最近添加的媒体的图片列表
param: library_key
param: type type的含义: 1 电影 2 剧集 详见 plexapi/utils.py中SEARCHTYPES的定义
"""
if not self._plex:
return ""
# 返回结果
poster_urls = {}
# 页码计数
container_start = 0
# 需要的总条数/每页的条数
total_size = 4
# 如果总数不足,接续获取下一页
while len(poster_urls) < total_size:
items = self._plex.fetchItems(f"/hubs/home/recentlyAdded?type={type}§ionID={library_key}",
container_size=total_size,
container_start=container_start)
for item in items:
if item.type == 'episode':
# 如果是剧集的单集,则去找上级的图片
if item.parentThumb is not None:
poster_urls[item.parentThumb] = None
else:
# 否则就用自己的图片
if item.thumb is not None:
poster_urls[item.thumb] = None
if len(poster_urls) == total_size:
break
if len(items) < total_size:
break
container_start += total_size
image_list_str = ", ".join(
[f"{self.get_nt_image_url(self._host.rstrip('/') + url)}?X-Plex-Token={self._token}" for url in
list(poster_urls.keys())[:total_size]])
return image_list_str
def get_iteminfo(self, itemid):
"""
获取单个项目详情
"""
if not self._plex:
return {}
try:
item = self._plex.fetchItem(itemid)
ids = self.__get_ids(item.guids)
return {'ProviderIds': {'Tmdb': ids['tmdb_id'], 'Imdb': ids['imdb_id']}}
except Exception as err:
ExceptionUtils.exception_traceback(err)
return {}
def get_play_url(self, item_id):
"""
拼装媒体播放链接
:param item_id: 媒体的的ID
"""
return f'{self._play_host or self._host}#!/server/{self._plex.machineIdentifier}/details?key={item_id}'
def get_items(self, parent):
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
if not self._plex:
yield {}
try:
section = self._plex.library.sectionByID(parent)
if section:
for item in section.all():
if not item:
continue
ids = self.__get_ids(item.guids)
path = None
if item.locations:
path = item.locations[0]
yield {"id": item.key,
"library": item.librarySectionID,
"type": item.type,
"title": item.title,
"originalTitle": item.originalTitle,
"year": item.year,
"tmdbid": ids['tmdb_id'],
"imdbid": ids['imdb_id'],
"tvdbid": ids['tvdb_id'],
"path": path}
except Exception as err:
ExceptionUtils.exception_traceback(err)
yield {}
@staticmethod
def __get_ids(guids):
guid_mapping = {
"imdb://": "imdb_id",
"tmdb://": "tmdb_id",
"tvdb://": "tvdb_id"
}
ids = {}
for prefix, varname in guid_mapping.items():
ids[varname] = None
for guid in guids:
for prefix, varname in guid_mapping.items():
if isinstance(guid, dict):
if guid['id'].startswith(prefix):
# 找到匹配的ID
ids[varname] = guid['id'][len(prefix):]
break
else:
if guid.id.startswith(prefix):
# 找到匹配的ID
ids[varname] = guid.id[len(prefix):]
break
return ids
def get_playing_sessions(self):
"""
获取正在播放的会话
"""
if not self._plex:
return []
sessions = self._plex.sessions()
ret_sessions = []
for session in sessions:
bitrate = sum([m.bitrate or 0 for m in session.media])
ret_sessions.append({
"type": session.TAG,
"bitrate": bitrate,
"address": session.player.address
})
return ret_sessions
def get_webhook_message(self, message):
"""
解析Plex报文
eventItem 字段的含义
event 事件类型
item_type 媒体类型 TV,MOV
item_name TV:琅琊榜 S1E6 剖心明志 虎口脱险
MOV:猪猪侠大冒险(2001)
overview 剧情描述
"""
eventItem = {'event': message.get('event', '')}
if message.get('Metadata'):
if message.get('Metadata', {}).get('type') == 'episode':
eventItem['item_type'] = "TV"
eventItem['item_name'] = "%s %s%s %s" % (
message.get('Metadata', {}).get('grandparentTitle'),
"S" + str(message.get('Metadata', {}).get('parentIndex')),
"E" + str(message.get('Metadata', {}).get('index')),
message.get('Metadata', {}).get('title'))
eventItem['item_id'] = message.get('Metadata', {}).get('ratingKey')
eventItem['season_id'] = message.get('Metadata', {}).get('parentIndex')
eventItem['episode_id'] = message.get('Metadata', {}).get('index')
if message.get('Metadata', {}).get('summary') and len(message.get('Metadata', {}).get('summary')) > 100:
eventItem['overview'] = str(message.get('Metadata', {}).get('summary'))[:100] + "..."
else:
eventItem['overview'] = message.get('Metadata', {}).get('summary')
else:
eventItem['item_type'] = "MOV" if message.get('Metadata', {}).get('type') == 'movie' else "SHOW"
eventItem['item_name'] = "%s %s" % (
message.get('Metadata', {}).get('title'), "(" + str(message.get('Metadata', {}).get('year')) + ")")
eventItem['item_id'] = message.get('Metadata', {}).get('ratingKey')
if len(message.get('Metadata', {}).get('summary')) > 100:
eventItem['overview'] = str(message.get('Metadata', {}).get('summary'))[:100] + "..."
else:
eventItem['overview'] = message.get('Metadata', {}).get('summary')
if eventItem.get('event') == "library.new":
eventItem['play_url'] = f"/open?url=" \
f"{quote(self.get_play_url(message.get('Metadata', {}).get('key')))}&type=plex"
if message.get('Player'):
eventItem['ip'] = message.get('Player').get('publicAddress')
eventItem['client'] = message.get('Player').get('title')
# 这里给个空,防止拼消息的时候出现None
eventItem['device_name'] = ' '
if message.get('Account'):
eventItem['user_name'] = message.get("Account").get('title')
return eventItem
def get_resume(self, num=12):
"""
获取继续观看的媒体
"""
if not self._plex:
return []
items = self._plex.fetchItems('/hubs/continueWatching/items', container_start=0, container_size=num)
ret_resume = []
for item in items:
item_type = MediaType.MOVIE.value if item.TYPE == "movie" else MediaType.TV.value
if item_type == MediaType.MOVIE.value:
name = item.title
else:
if item.parentIndex == 1:
name = "%s 第%s集" % (item.grandparentTitle, item.index)
else:
name = "%s 第%s季第%s集" % (item.grandparentTitle, item.parentIndex, item.index)
link = self.get_play_url(item.key)
image = self.get_nt_image_url(item.artUrl)
ret_resume.append({
"id": item.key,
"name": name,
"type": item_type,
"image": image,
"link": link,
"percent": item.viewOffset / item.duration * 100 if item.viewOffset and item.duration else 0
})
return ret_resume
def get_latest(self, num=20):
"""
获取最近添加媒体
"""
if not self._plex:
return []
items = self._plex.fetchItems('/library/recentlyAdded', container_start=0, container_size=num)
ret_resume = []
for item in items:
item_type = MediaType.MOVIE.value if item.TYPE == "movie" else MediaType.TV.value
link = self.get_play_url(item.key)
title = item.title if item_type == MediaType.MOVIE.value else \
"%s 第%s季" % (item.parentTitle, item.index)
image = self.get_nt_image_url(item.posterUrl)
ret_resume.append({
"id": item.key,
"name": title,
"type": item_type,
"image": image,
"link": link
})
return ret_resume
| 24,168 | Python | .py | 574 | 26.440767 | 120 | 0.510678 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,038 | emby.py | demigody_nas-tools/app/mediaserver/client/emby.py | import os
import re
from urllib.parse import quote
import log
from app.mediaserver.client._base import _IMediaClient
from app.utils import RequestUtils, SystemUtils, ExceptionUtils, IpUtils
from app.utils.types import MediaType, MediaServerType
from config import Config
class Emby(_IMediaClient):
# 媒体服务器ID
client_id = "emby"
# 媒体服务器类型
client_type = MediaServerType.EMBY
# 媒体服务器名称
client_name = MediaServerType.EMBY.value
# 私有属性
_client_config = {}
_serverid = None
_apikey = None
_host = None
_play_host = None
_user = None
_folders = []
def __init__(self, config=None):
if config:
self._client_config = config
else:
self._client_config = Config().get_config('emby')
self.init_config()
def init_config(self):
if self._client_config:
self._host = self._client_config.get('host')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._play_host = self._client_config.get('play_host')
if not self._play_host:
self._play_host = self._host
else:
if not self._play_host.startswith('http'):
self._play_host = "http://" + self._play_host
if not self._play_host.endswith('/'):
self._play_host = self._play_host + "/"
self._apikey = self._client_config.get('api_key')
if self._host and self._apikey:
self._folders = self.__get_emby_folders()
self._user = self.get_user(Config().current_user)
self._serverid = self.get_server_id()
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def get_status(self):
"""
测试连通性
"""
return True if self.get_medias_count() else False
def __get_emby_folders(self):
"""
获取Emby媒体库路径列表
"""
if not self._host or not self._apikey:
return []
req_url = "%semby/Library/SelectableMediaFolders?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
else:
log.error(f"【{self.client_name}】Library/SelectableMediaFolders 未获取到返回数据")
return []
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Library/SelectableMediaFolders 出错:" + str(e))
return []
def __get_emby_librarys(self):
"""
获取Emby媒体库列表
"""
if not self._host or not self._apikey:
return []
req_url = f"{self._host}emby/Users/{self._user}/Views?api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json().get("Items")
else:
log.error(f"【{self.client_name}】User/Views 未获取到返回数据")
return []
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接User/Views 出错:" + str(e))
return []
def get_user(self, user_name=None):
"""
获得管理员用户
"""
if not self._host or not self._apikey:
return None
req_url = "%sUsers?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
users = res.json()
# 先查询是否有与当前用户名称匹配的
if user_name:
for user in users:
if user.get("Name") == user_name:
return user.get("Id")
# 查询管理员
for user in users:
if user.get("Policy", {}).get("IsAdministrator"):
return user.get("Id")
else:
log.error(f"【{self.client_name}】Users 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users出错:" + str(e))
return None
def __get_backdrop_url(self, item_id, image_tag, remote=True, inner=False):
"""
获取Backdrop图片地址
:param: item_id: 在Emby中的ID
:param: image_tag: 图片的tag
:param: remote 是否远程使用,TG微信等客户端调用应为True
:param: inner 是否NT内部调用,为True是会使用NT中转
"""
if not self._host or not self._apikey:
return ""
if not image_tag or not item_id:
return ""
if not remote:
image_url = f"{self._host}Items/{item_id}/"\
f"Images/Backdrop?tag={image_tag}&fillWidth=666&api_key={self._apikey}"
if inner:
return self.get_nt_image_url(image_url)
return image_url
else:
host = self._play_host or self._host
image_url = f"{host}Items/{item_id}/"\
f"Images/Backdrop?tag={image_tag}&fillWidth=666&api_key={self._apikey}"
if IpUtils.is_internal(host):
return self.get_nt_image_url(url=image_url, remote=True)
return image_url
def get_server_id(self):
"""
获得服务器信息
"""
if not self._host or not self._apikey:
return None
req_url = "%sSystem/Info?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json().get("Id")
else:
log.error(f"【{self.client_name}】System/Info 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接System/Info出错:" + str(e))
return None
def get_user_count(self):
"""
获得用户数量
"""
if not self._host or not self._apikey:
return 0
req_url = "%semby/Users/Query?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json().get("TotalRecordCount")
else:
log.error(f"【{self.client_name}】Users/Query 未获取到返回数据")
return 0
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Query出错:" + str(e))
return 0
def get_activity_log(self, num):
"""
获取Emby活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%semby/System/ActivityLog/Entries?api_key=%s&" % (self._host, self._apikey)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "AuthenticationSucceeded":
event_type = "LG"
event_date = SystemUtils.get_local_time(item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = SystemUtils.get_local_time(item.get("Date"))
event_str = item.get("Name")
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
else:
log.error(f"【{self.client_name}】System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接System/ActivityLog/Entries出错:" + str(e))
return []
return ret_array[:num]
def get_medias_count(self):
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return {}
req_url = "%semby/Items/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
else:
log.error(f"【{self.client_name}】Items/Counts 未获取到返回数据")
return {}
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items/Counts出错:" + str(e))
return {}
def __get_emby_series_id_by_name(self, name, year):
"""
根据名称查询Emby中剧集的SeriesId
:param name: 标题
:param year: 年份
:return: None 表示连不通,""表示未找到,找到返回ID
"""
if not self._host or not self._apikey:
return None
req_url = "%semby/Items?IncludeItemTypes=Series&Fields=ProductionYear&StartIndex=0&Recursive=true&SearchTerm=%s&Limit=10&IncludeSearchTypes=false&api_key=%s" % (
self._host, name, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
res_items = res.json().get("Items")
if res_items:
for res_item in res_items:
if res_item.get('Name') == name and (
not year or str(res_item.get('ProductionYear')) == str(year)):
return res_item.get('Id')
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items出错:" + str(e))
return None
return ""
def get_movies(self, title, year=None):
"""
根据标题和年份,检查电影是否在Emby中存在,存在则返回列表
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:return: 含title、year属性的字典列表
"""
if not self._host or not self._apikey:
return None
req_url = "%semby/Items?IncludeItemTypes=Movie&Fields=ProductionYear&StartIndex=0&Recursive=true&SearchTerm=%s&Limit=10&IncludeSearchTypes=false&api_key=%s" % (
self._host, title, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
res_items = res.json().get("Items")
if res_items:
ret_movies = []
for res_item in res_items:
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
return ret_movies
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items出错:" + str(e))
return None
return []
def get_tv_episodes(self,
item_id=None,
title=None,
year=None,
tmdb_id=None,
season=None):
"""
根据标题和年份和季,返回Emby中的剧集列表
:param item_id: 媒体源ID
:param title: 标题
:param year: 年份
:param tmdb_id: TMDBID
:param season: 季
:return: 集号的列表
"""
if not self._host or not self._apikey:
return None
# 电视剧
if not item_id:
item_id = self.__get_emby_series_id_by_name(title, year)
if item_id is None:
return None
if not item_id:
return []
# 验证tmdbid是否相同
item_tmdbid = self.get_iteminfo(item_id).get("ProviderIds", {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return []
# /Shows/Id/Episodes 查集的信息
if not season:
season = ""
req_url = "%semby/Shows/%s/Episodes?Season=%s&IsMissing=false&api_key=%s" % (
self._host, item_id, season, self._apikey)
try:
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
exists_episodes = []
for res_item in res_items:
exists_episodes.append({
"season_num": res_item.get("ParentIndexNumber") or 0,
"episode_num": res_item.get("IndexNumber") or 0
})
return exists_episodes
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Shows/Id/Episodes出错:" + str(e))
return None
return []
def get_no_exists_episodes(self, meta_info, season, total_num):
"""
根据标题、年份、季、总集数,查询Emby中缺少哪几集
:param meta_info: 已识别的需要查询的媒体信息
:param season: 季号,数字
:param total_num: 该季的总集数
:return: 该季不存在的集号列表
"""
if not self._host or not self._apikey:
return None
# 没有季默认为和1季
if not season:
season = 1
exists_episodes = self.get_tv_episodes(title=meta_info.title,
year=meta_info.year,
tmdb_id=meta_info.tmdb_id,
season=season)
if not isinstance(exists_episodes, list):
return None
exists_episodes = [episode.get("episode_num") for episode in exists_episodes]
total_episodes = [episode for episode in range(1, total_num + 1)]
return list(set(total_episodes).difference(set(exists_episodes)))
def get_episode_image_by_id(self, item_id, season_id, episode_id):
"""
根据itemid、season_id、episode_id从Emby查询图片地址
:param item_id: 在Emby中的ID
:param season_id: 季
:param episode_id: 集
:return: 图片对应在TMDB中的URL
"""
if not self._host or not self._apikey:
return None
# 查询所有剧集
req_url = "%semby/Shows/%s/Episodes?Season=%s&IsMissing=false&api_key=%s" % (
self._host, item_id, season_id, self._apikey)
try:
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
for res_item in res_items:
# 查询当前剧集的itemid
if res_item.get("IndexNumber") == episode_id:
# 查询当前剧集的图片
img_url = self.get_remote_image_by_id(res_item.get("Id"), "Primary")
# 没查到tmdb图片则判断播放地址是不是外网,使用emby刮削的图片(直接挂载网盘场景)
if not img_url and not IpUtils.is_internal(self._play_host) \
and res_item.get('ImageTags', {}).get('Primary'):
return "%semby/Items/%s/Images/Primary?maxHeight=225&maxWidth=400&tag=%s&quality=90" % (
self._play_host, res_item.get("Id"), res_item.get('ImageTags', {}).get('Primary'))
return img_url
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Shows/Id/Episodes出错:" + str(e))
return None
def get_remote_image_by_id(self, item_id, image_type):
"""
根据ItemId从Emby查询TMDB的图片地址
:param item_id: 在Emby中的ID
:param image_type: 图片的类弄地,poster或者backdrop等
:return: 图片对应在TMDB中的URL
"""
if not self._host or not self._apikey:
return None
req_url = "%semby/Items/%s/RemoteImages?api_key=%s" % (self._host, item_id, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
images = res.json().get("Images")
for image in images:
if image.get("ProviderName") == "TheMovieDb" and image.get("Type") == image_type:
return image.get("Url")
else:
log.error(f"【{self.client_name}】Items/RemoteImages 未获取到返回数据")
return None
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items/Id/RemoteImages出错:" + str(e))
return None
return None
def get_local_image_by_id(self, item_id, remote=True, inner=False):
"""
根据ItemId从媒体服务器查询本地图片地址
:param: item_id: 在Emby中的ID
:param: remote 是否远程使用,TG微信等客户端调用应为True
:param: inner 是否NT内部调用,为True是会使用NT中转
"""
if not self._host or not self._apikey:
return None
if not remote:
image_url = "%sItems/%s/Images/Primary" % (self._host, item_id)
if inner:
return self.get_nt_image_url(image_url)
return image_url
else:
host = self._play_host or self._host
image_url = "%sItems/%s/Images/Primary" % (host, item_id)
if IpUtils.is_internal(host):
return self.get_nt_image_url(url=image_url, remote=True)
return image_url
def __refresh_emby_library_by_id(self, item_id):
"""
通知Emby刷新一个项目的媒体库
"""
if not self._host or not self._apikey:
return False
req_url = "%semby/Items/%s/Refresh?Recursive=true&api_key=%s" % (self._host, item_id, self._apikey)
try:
res = RequestUtils().post_res(req_url)
if res:
return True
else:
log.info(f"【{self.client_name}】刷新媒体库对象 {item_id} 失败,无法连接Emby!")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Items/Id/Refresh出错:" + str(e))
return False
return False
def refresh_root_library(self):
"""
通知Emby刷新整个媒体库
"""
if not self._host or not self._apikey:
return False
req_url = "%semby/Library/Refresh?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().post_res(req_url)
if res:
return True
else:
log.info(f"【{self.client_name}】刷新媒体库失败,无法连接Emby!")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Library/Refresh出错:" + str(e))
return False
return False
def refresh_library_by_items(self, items):
"""
按类型、名称、年份来刷新媒体库
:param items: 已识别的需要刷新媒体库的媒体信息列表
"""
if not items:
return
# 收集要刷新的媒体库信息
log.info(f"【{self.client_name}】开始刷新Emby媒体库...")
library_ids = []
for item in items:
if not item:
continue
library_id = self.__get_emby_library_id_by_item(item)
if library_id and library_id not in library_ids:
library_ids.append(library_id)
# 开始刷新媒体库
if "/" in library_ids:
self.refresh_root_library()
return
for library_id in library_ids:
if library_id != "/":
self.__refresh_emby_library_by_id(library_id)
log.info(f"【{self.client_name}】Emby媒体库刷新完成")
def __get_emby_library_id_by_item(self, item):
"""
根据媒体信息查询在哪个媒体库,返回要刷新的位置的ID
:param item: 由title、year、type组成的字典
"""
if not item.get("title") or not item.get("year") or not item.get("type"):
return None
if item.get("type") != MediaType.MOVIE.value:
item_id = self.__get_emby_series_id_by_name(item.get("title"), item.get("year"))
if item_id:
# 存在电视剧,则直接刷新这个电视剧就行
return item_id
else:
if self.get_movies(item.get("title"), item.get("year")):
# 已存在,不用刷新
return None
# 查找需要刷新的媒体库ID
for folder in self._folders:
# 找同级路径最多的媒体库(要求容器内映射路径与实际一致)
max_equal_path_id = None
max_path_len = 0
equal_path_num = 0
for subfolder in folder.get("SubFolders"):
path_list = re.split(pattern='/+|\\\\+', string=subfolder.get("Path"))
if item.get("category") != path_list[-1]:
continue
try:
path_len = len(os.path.commonpath([item.get("target_path"), subfolder.get("Path")]))
if path_len >= max_path_len:
max_path_len = path_len
max_equal_path_id = subfolder.get("Id")
equal_path_num += 1
except Exception as err:
print(str(err))
continue
if max_equal_path_id:
return max_equal_path_id if equal_path_num == 1 else folder.get("Id")
# 如果找不到,只要路径中有分类目录名就命中
for subfolder in folder.get("SubFolders"):
if subfolder.get("Path") and re.search(r"[/\\]%s" % item.get("category"), subfolder.get("Path")):
return folder.get("Id")
# 刷新根目录
return "/"
def get_libraries(self):
"""
获取媒体服务器所有媒体库列表
"""
if not self._host or not self._apikey:
return []
libraries = []
for library in self.__get_emby_librarys() or []:
match library.get("CollectionType"):
case "movies":
library_type = MediaType.MOVIE.value
case "tvshows":
library_type = MediaType.TV.value
case _:
continue
image = self.get_local_image_by_id(library.get("Id"), remote=False, inner=True)
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type,
"image": image,
"link": f'{self._play_host or self._host}web/index.html'
f'#!/videos?serverId={self._serverid}&parentId={library.get("Id")}'
})
return libraries
def get_iteminfo(self, itemid):
"""
获取单个项目详情
"""
if not itemid:
return {}
if not self._host or not self._apikey:
return {}
req_url = "%semby/Users/%s/Items/%s?api_key=%s" % (self._host, self._user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {}
def get_play_url(self, item_id):
"""
拼装媒体播放链接
:param item_id: 媒体的的ID
"""
return f"{self._play_host or self._host}web/index.html#!/item?id={item_id}&context=home&serverId={self._serverid}"
def get_items(self, parent):
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
if not self._host or not self._apikey:
yield {}
req_url = "%semby/Users/%s/Items?ParentId=%s&api_key=%s" % (self._host, self._user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
results = res.json().get("Items") or []
for result in results:
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"originalTitle": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
elif "Folder" in result.get("Type"):
for item in self.get_items(parent=result.get('Id')):
yield item
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items出错:" + str(e))
yield {}
def get_playing_sessions(self):
"""
获取正在播放的会话
"""
if not self._host or not self._apikey:
return []
playing_sessions = []
req_url = "%semby/Sessions?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
sessions = res.json()
for session in sessions:
if session.get("NowPlayingItem") and not session.get("PlayState", {}).get("IsPaused"):
playing_sessions.append(session)
return playing_sessions
except Exception as e:
ExceptionUtils.exception_traceback(e)
return []
def get_webhook_message(self, message):
"""
解析Emby报文
"""
eventItem = {'event': message.get('Event', '')}
if message.get('Item'):
if message.get('Item', {}).get('Type') == 'Episode':
eventItem['item_type'] = "TV"
eventItem['item_name'] = "%s %s%s %s" % (
message.get('Item', {}).get('SeriesName'),
"S" + str(message.get('Item', {}).get('ParentIndexNumber')),
"E" + str(message.get('Item', {}).get('IndexNumber')),
message.get('Item', {}).get('Name'))
eventItem['item_id'] = message.get('Item', {}).get('SeriesId')
eventItem['season_id'] = message.get('Item', {}).get('ParentIndexNumber')
eventItem['episode_id'] = message.get('Item', {}).get('IndexNumber')
elif message.get('Item', {}).get('Type') == 'Audio':
eventItem['item_type'] = "AUD"
album = message.get('Item', {}).get('Album')
file_name = message.get('Item', {}).get('FileName')
eventItem['item_name'] = album
eventItem['overview'] = file_name
eventItem['item_id'] = message.get('Item', {}).get('AlbumId')
else:
eventItem['item_type'] = "MOV"
eventItem['item_name'] = "%s %s" % (
message.get('Item', {}).get('Name'), "(" + str(message.get('Item', {}).get('ProductionYear')) + ")")
eventItem['item_path'] = message.get('Item', {}).get('Path')
eventItem['item_id'] = message.get('Item', {}).get('Id')
eventItem['tmdb_id'] = message.get('Item', {}).get('ProviderIds', {}).get('Tmdb')
if message.get('Item', {}).get('Overview') and len(message.get('Item', {}).get('Overview')) > 100:
eventItem['overview'] = str(message.get('Item', {}).get('Overview'))[:100] + "..."
else:
eventItem['overview'] = message.get('Item', {}).get('Overview')
eventItem['percentage'] = message.get('TranscodingInfo', {}).get('CompletionPercentage')
if not eventItem['percentage']:
if message.get('PlaybackInfo', {}).get('PositionTicks'):
eventItem['percentage'] = message.get('PlaybackInfo', {}).get('PositionTicks') / \
message.get('Item', {}).get('RunTimeTicks') * 100
eventItem['play_url'] = f"/open?url=" \
f"{quote(self.get_play_url(eventItem.get('item_id')))}&type=emby"
if message.get('Session'):
eventItem['ip'] = message.get('Session').get('RemoteEndPoint')
eventItem['device_name'] = message.get('Session').get('DeviceName')
eventItem['client'] = message.get('Session').get('Client')
if message.get("User"):
eventItem['user_name'] = message.get("User").get('Name')
return eventItem
def get_resume(self, num=12):
"""
获得继续观看
"""
if not self._host or not self._apikey:
return None
req_url = f"{self._host}Users/{self._user}/Items/Resume?Limit={num}&MediaTypes=Video&api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
result = res.json().get("Items") or []
ret_resume = []
for item in result:
if item.get("Type") not in ["Movie", "Episode"]:
continue
item_type = MediaType.MOVIE.value if item.get("Type") == "Movie" else MediaType.TV.value
link = self.get_play_url(item.get("Id"))
if item_type == MediaType.MOVIE.value:
title = item.get("Name")
else:
if item.get("ParentIndexNumber") == 1:
title = f'{item.get("SeriesName")} 第{item.get("IndexNumber")}集'
else:
title = f'{item.get("SeriesName")} 第{item.get("ParentIndexNumber")}季第{item.get("IndexNumber")}集'
if item_type == MediaType.MOVIE.value:
if item.get("BackdropImageTags"):
image = self.__get_backdrop_url(item_id=item.get("Id"),
image_tag=item.get("BackdropImageTags")[0],
remote=False,
inner=True)
else:
image = self.get_local_image_by_id(item.get("Id"), remote=False, inner=True)
else:
image = self.__get_backdrop_url(item_id=item.get("SeriesId"),
image_tag=item.get("SeriesPrimaryImageTag"),
remote=False,
inner=True)
if not image:
image = self.get_local_image_by_id(item.get("SeriesId"), remote=False, inner=True)
ret_resume.append({
"id": item.get("Id"),
"name": title,
"type": item_type,
"image": image,
"link": link,
"percent": item.get("UserData", {}).get("PlayedPercentage")
})
return ret_resume
else:
log.error(f"【{self.client_name}】Users/Items/Resume 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items/Resume出错:" + str(e))
return []
def get_latest(self, num=20):
"""
获得最近更新
"""
if not self._host or not self._apikey:
return None
req_url = f"{self._host}Users/{self._user}/Items/Latest?Limit={num}&MediaTypes=Video&api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
result = res.json() or []
ret_latest = []
for item in result:
if item.get("Type") not in ["Movie", "Series"]:
continue
item_type = MediaType.MOVIE.value if item.get("Type") == "Movie" else MediaType.TV.value
link = self.get_play_url(item.get("Id"))
image = self.get_local_image_by_id(item_id=item.get("Id"), remote=False, inner=True)
ret_latest.append({
"id": item.get("Id"),
"name": item.get("Name"),
"type": item_type,
"image": image,
"link": link
})
return ret_latest
else:
log.error(f"【{self.client_name}】Users/Items/Latest 未获取到返回数据")
except Exception as e:
ExceptionUtils.exception_traceback(e)
log.error(f"【{self.client_name}】连接Users/Items/Latest出错:" + str(e))
return []
| 35,712 | Python | .py | 776 | 29.118557 | 169 | 0.504236 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,039 | _base.py | demigody_nas-tools/app/mediaserver/client/_base.py | from abc import ABCMeta, abstractmethod
from urllib.parse import quote
from config import Config
class _IMediaClient(metaclass=ABCMeta):
# 媒体服务器ID
client_id = ""
# 媒体服务器类型
client_type = ""
# 媒体服务器名称
client_name = ""
@abstractmethod
def match(self, ctype):
"""
匹配实例
"""
pass
@abstractmethod
def get_type(self):
"""
获取媒体服务器类型
"""
pass
@abstractmethod
def get_status(self):
"""
检查连通性
"""
pass
@abstractmethod
def get_user_count(self):
"""
获得用户数量
"""
pass
@abstractmethod
def get_activity_log(self, num):
"""
获取Emby活动记录
"""
pass
@abstractmethod
def get_medias_count(self):
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
pass
@abstractmethod
def get_movies(self, title, year):
"""
根据标题和年份,检查电影是否在存在,存在则返回列表
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:return: 含title、year属性的字典列表
"""
pass
@abstractmethod
def get_tv_episodes(self, item_id=None, title=None, year=None, tmdbid=None, season=None):
"""
根据标题、年份、季查询电视剧所有集信息
:param item_id: 服务器中的ID
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:param tmdbid: TMDBID
:param season: 季号,数字
:return: 所有集的列表
"""
pass
@abstractmethod
def get_no_exists_episodes(self, meta_info, season, total_num):
"""
根据标题、年份、季、总集数,查询缺少哪几集
:param meta_info: 已识别的需要查询的媒体信息
:param season: 季号,数字
:param total_num: 该季的总集数
:return: 该季不存在的集号列表
"""
pass
@abstractmethod
def get_remote_image_by_id(self, item_id, image_type):
"""
根据ItemId查询远程图片地址
:param item_id: 在服务器中的ID
:param image_type: 图片的类弄地,poster或者backdrop等
:return: 图片对应在TMDB中的URL
"""
pass
@abstractmethod
def get_local_image_by_id(self, item_id):
"""
根据ItemId查询本地图片地址,需要有外网地址
:param item_id: 在服务器中的ID
:return: 图片对应在TMDB中的URL
"""
pass
@abstractmethod
def refresh_root_library(self):
"""
刷新整个媒体库
"""
pass
@abstractmethod
def refresh_library_by_items(self, items):
"""
按类型、名称、年份来刷新媒体库
:param items: 已识别的需要刷新媒体库的媒体信息列表
"""
pass
@abstractmethod
def get_libraries(self):
"""
获取媒体服务器所有媒体库列表
"""
pass
@abstractmethod
def get_items(self, parent):
"""
获取媒体库中的所有媒体
:param parent: 上一级的ID
"""
pass
@abstractmethod
def get_play_url(self, item_id):
"""
获取媒体库中的所有媒体
:param item_id: 媒体的的ID
"""
pass
@abstractmethod
def get_playing_sessions(self):
"""
获取正在播放的会话
"""
pass
@abstractmethod
def get_webhook_message(self, message):
"""
解析Webhook报文,获取消息内容结构
"""
pass
@staticmethod
def get_nt_image_url(url, remote=False):
"""
获取NT中转内网图片的地址
:param: url: 图片的URL
:param: remote: 是否需要返回完整的URL
"""
if not url:
return ""
if remote:
domain = Config().get_domain()
if domain:
return f"{domain}/img?url={quote(url)}"
else:
return ""
else:
return f"img?url={quote(url)}"
| 4,476 | Python | .py | 157 | 15.121019 | 93 | 0.53904 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,040 | downloader.py | demigody_nas-tools/app/downloader/downloader.py | import os
from threading import Lock
from enum import Enum
import json
from apscheduler.schedulers.background import BackgroundScheduler
import log
from app.conf import ModuleConf
from app.conf import SystemConfig
from app.filetransfer import FileTransfer
from app.helper import DbHelper, ThreadHelper, SubmoduleHelper
from app.media import Media
from app.media.meta import MetaInfo
from app.mediaserver import MediaServer
from app.message import Message
from app.plugins import EventManager
from app.sites import Sites, SiteSubtitle
from app.utils import Torrent, StringUtils, SystemUtils, ExceptionUtils, NumberUtils
from app.utils.commons import singleton
from app.utils.types import MediaType, DownloaderType, SearchType, RmtMode, EventType, SystemConfigKey
from config import Config, PT_TAG, RMT_MEDIAEXT, PT_TRANSFER_INTERVAL
lock = Lock()
client_lock = Lock()
@singleton
class Downloader:
# 客户端实例
clients = {}
_downloader_schema = []
_download_order = None
_download_settings = {}
_downloader_confs = {}
_monitor_downloader_ids = []
# 下载器ID-名称枚举类
_DownloaderEnum = None
_scheduler = None
message = None
mediaserver = None
filetransfer = None
media = None
sites = None
sitesubtitle = None
dbhelper = None
systemconfig = None
eventmanager = None
def __init__(self):
self._downloader_schema = SubmoduleHelper.import_submodules(
'app.downloader.client',
filter_func=lambda _, obj: hasattr(obj, 'client_id')
)
log.debug(f"【Downloader】加载下载器类型:{self._downloader_schema}")
self.init_config()
def init_config(self):
self.dbhelper = DbHelper()
self.message = Message()
self.mediaserver = MediaServer()
self.filetransfer = FileTransfer()
self.media = Media()
self.sites = Sites()
self.systemconfig = SystemConfig()
self.eventmanager = EventManager()
self.sitesubtitle = SiteSubtitle()
# 清空已存在下载器实例
self.clients = {}
# 下载器配置,生成实例
self._downloader_confs = {}
self._monitor_downloader_ids = []
for downloader_conf in self.dbhelper.get_downloaders():
if not downloader_conf:
continue
did = downloader_conf.ID
name = downloader_conf.NAME
enabled = downloader_conf.ENABLED
# 下载器监控配置
transfer = downloader_conf.TRANSFER
only_nastool = downloader_conf.ONLY_NASTOOL
match_path = downloader_conf.MATCH_PATH
rmt_mode = downloader_conf.RMT_MODE
rmt_mode_name = ModuleConf.RMT_MODES.get(rmt_mode).value if rmt_mode else ""
# 输出日志
if transfer:
log_content = ""
if only_nastool:
log_content += "启用标签隔离,"
if match_path:
log_content += "启用目录隔离,"
log.info(f"【Downloader】读取到监控下载器:{name}{log_content}转移方式:{rmt_mode_name}")
if enabled:
self._monitor_downloader_ids.append(did)
else:
log.info(f"【Downloader】下载器:{name} 不进行监控:下载器未启用")
# 下载器登录配置
config = json.loads(downloader_conf.CONFIG)
dtype = downloader_conf.TYPE
self._downloader_confs[str(did)] = {
"id": did,
"name": name,
"type": dtype,
"enabled": enabled,
"transfer": transfer,
"only_nastool": only_nastool,
"match_path": match_path,
"rmt_mode": rmt_mode,
"rmt_mode_name": rmt_mode_name,
"config": config,
"download_dir": json.loads(downloader_conf.DOWNLOAD_DIR)
}
# 下载器ID-名称枚举类生成
self._DownloaderEnum = Enum('DownloaderIdName',
{did: conf.get("name") for did, conf in self._downloader_confs.items()})
pt = Config().get_config('pt')
if pt:
self._download_order = pt.get("download_order")
# 下载设置
self._download_settings = {
"-1": {
"id": -1,
"name": "预设",
"category": '',
"tags": PT_TAG,
"is_paused": 0,
"upload_limit": 0,
"download_limit": 0,
"ratio_limit": 0,
"seeding_time_limit": 0,
"downloader": "",
"downloader_name": "",
"downloader_type": ""
}
}
download_settings = self.dbhelper.get_download_setting()
for download_setting in download_settings:
downloader_id = download_setting.DOWNLOADER
download_conf = self._downloader_confs.get(str(downloader_id))
if download_conf:
downloader_name = download_conf.get("name")
downloader_type = download_conf.get("type")
else:
downloader_name = ""
downloader_type = ""
downloader_id = ""
self._download_settings[str(download_setting.ID)] = {
"id": download_setting.ID,
"name": download_setting.NAME,
"category": download_setting.CATEGORY,
"tags": download_setting.TAGS,
"is_paused": download_setting.IS_PAUSED,
"upload_limit": download_setting.UPLOAD_LIMIT,
"download_limit": download_setting.DOWNLOAD_LIMIT,
"ratio_limit": download_setting.RATIO_LIMIT / 100,
"seeding_time_limit": download_setting.SEEDING_TIME_LIMIT,
"downloader": downloader_id,
"downloader_name": downloader_name,
"downloader_type": downloader_type
}
# 启动下载器监控服务
self.start_service()
def __build_class(self, ctype, conf=None):
for downloader_schema in self._downloader_schema:
try:
if downloader_schema.match(ctype):
return downloader_schema(conf)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
@property
def default_downloader_id(self):
"""
获取默认下载器id
"""
default_downloader_id = SystemConfig().get(SystemConfigKey.DefaultDownloader)
if not default_downloader_id or not self.get_downloader_conf(default_downloader_id):
default_downloader_id = ""
return default_downloader_id
@property
def default_download_setting_id(self):
"""
获取默认下载设置
:return: 默认下载设置id
"""
default_download_setting_id = SystemConfig().get(SystemConfigKey.DefaultDownloadSetting) or "-1"
if not self._download_settings.get(default_download_setting_id):
default_download_setting_id = "-1"
return default_download_setting_id
def get_downloader_type(self, downloader_id=None):
"""
获取下载器的类型
"""
if not downloader_id:
return self.default_client.get_type()
return self.__get_client(downloader_id).get_type()
@property
def default_client(self):
"""
获取默认下载器实例
"""
return self.__get_client(self.default_downloader_id)
@property
def monitor_downloader_ids(self):
"""
获取监控下载器ID列表
"""
return self._monitor_downloader_ids
def start_service(self):
"""
转移任务调度
"""
# 移出现有任务
self.stop_service()
# 启动转移任务
if not self._monitor_downloader_ids:
return
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
for downloader_id in self._monitor_downloader_ids:
self._scheduler.add_job(func=self.transfer,
args=[downloader_id],
trigger='interval',
seconds=PT_TRANSFER_INTERVAL)
self._scheduler.print_jobs()
self._scheduler.start()
log.info("下载文件转移服务启动,目的目录:媒体库")
def __get_client(self, did=None):
if not did:
return None
downloader_conf = self.get_downloader_conf(did)
if not downloader_conf:
log.info("【Downloader】下载器配置不存在")
return None
if not downloader_conf.get("enabled"):
log.info(f"【Downloader】下载器 {downloader_conf.get('name')} 未启用")
return None
ctype = downloader_conf.get("type")
config = downloader_conf.get("config")
config["download_dir"] = downloader_conf.get("download_dir")
config["name"] = downloader_conf.get("name")
with client_lock:
if not self.clients.get(str(did)):
self.clients[str(did)] = self.__build_class(ctype, config)
return self.clients.get(str(did))
def download(self,
media_info,
is_paused=None,
tag=None,
download_dir=None,
download_setting=None,
downloader_id=None,
upload_limit=None,
download_limit=None,
torrent_file=None,
in_from=None,
user_name=None,
proxy=None):
"""
添加下载任务,根据当前使用的下载器分别调用不同的客户端处理
:param media_info: 需下载的媒体信息,含URL地址
:param is_paused: 是否暂停下载
:param tag: 种子标签
:param download_dir: 指定下载目录
:param download_setting: 下载设置id,为None则使用-1默认设置,为"-2"则不使用下载设置
:param downloader_id: 指定下载器ID下载
:param upload_limit: 上传速度限制
:param download_limit: 下载速度限制
:param torrent_file: 种子文件路径
:param in_from: 来源
:param user_name: 用户名
:param proxy: 是否使用代理,指定该选项为 True/False 会覆盖 site_info 的设置
:return: 下载器类型, 种子ID,错误信息
"""
def __download_fail(msg):
"""
触发下载失败事件和发送消息
"""
self.eventmanager.send_event(EventType.DownloadFail, {
"media_info": media_info.to_dict(),
"reason": msg
})
if in_from:
self.message.send_download_fail_message(media_info, f"添加下载任务失败:{msg}")
# 触发下载事件
self.eventmanager.send_event(EventType.DownloadAdd, {
"media_info": media_info.to_dict(),
"is_paused": is_paused,
"tag": tag,
"download_dir": download_dir,
"download_setting": download_setting,
"downloader_id": downloader_id,
"torrent_file": torrent_file
})
# 标题
title = media_info.org_string
# 详情页面
page_url = media_info.page_url
# 默认值
site_info, dl_files_folder, dl_files, retmsg = {}, "", [], ""
if torrent_file:
# 有种子文件时解析种子信息
url = os.path.basename(torrent_file)
content, dl_files_folder, dl_files, retmsg = Torrent().read_torrent_content(torrent_file)
else:
# 没有种子文件解析链接
dl_enclosure = media_info.enclosure
if dl_enclosure.startswith("["):
# 需要解码获取下载地址
url = Torrent.get_download_url(url=dl_enclosure)
else:
url = dl_enclosure if Sites().get_sites_by_url_domain(
dl_enclosure) else Torrent.format_enclosure(dl_enclosure)
if not url:
__download_fail("下载链接为空")
return None, None, "下载链接为空"
# 获取种子内容,磁力链不解析
if url.startswith("magnet:"):
content = url
else:
# 获取Cookie和ua等
site_info = self.sites.get_sites(siteurl=url)
# 下载种子文件,并读取信息
_, content, dl_files_folder, dl_files, retmsg = Torrent().get_torrent_info(
url=url,
cookie=site_info.get("cookie"),
apikey=site_info.get("apikey"),
ua=site_info.get("ua"),
referer=page_url if site_info.get("referer") else None,
proxy=proxy if proxy is not None else site_info.get("proxy")
)
# 解析完成
if retmsg:
log.warn("【Downloader】%s" % retmsg)
if not content:
__download_fail(retmsg)
return None, None, retmsg
# 下载设置
if not download_setting and media_info.site:
# 站点的下载设置
download_setting = self.sites.get_site_download_setting(media_info.site)
if download_setting == "-2":
# 不使用下载设置
download_attr = {}
elif download_setting:
# 传入的下载设置
download_attr = self.get_download_setting(download_setting) \
or self.get_download_setting(self.default_download_setting_id)
else:
# 默认下载设置
download_attr = self.get_download_setting(self.default_download_setting_id)
# 下载设置名称
download_setting_name = download_attr.get('name')
# 下载器实例
if not downloader_id:
downloader_id = download_attr.get("downloader")
downloader_conf = self.get_downloader_conf(downloader_id)
downloader = self.__get_client(downloader_id)
if not downloader or not downloader_conf:
__download_fail("请检查下载设置所选下载器是否有效且启用")
return None, None, f"下载设置 {download_setting_name} 所选下载器失效"
downloader_name = downloader_conf.get("name")
# 开始添加下载
try:
# 下载设置中的分类
category = download_attr.get("category")
# 合并TAG
tags = download_attr.get("tags")
if tags:
tags = str(tags).split(";")
if tag:
if isinstance(tag, list):
tags.extend(tag)
else:
tags.append(tag)
else:
# 字符串是空串或者None
tags = []
if tag:
if isinstance(tag, list):
tags = tag
else:
tags = [tag]
# 添加站点tag
site_tags = self.sites.get_site_download_tags(media_info.site)
if site_tags:
tags.extend(str(site_tags).split(";"))
# 暂停
if is_paused is None:
is_paused = StringUtils.to_bool(download_attr.get("is_paused"))
else:
is_paused = True if is_paused else False
# 上传限速
if not upload_limit:
upload_limit = download_attr.get("upload_limit")
# 下载限速
if not download_limit:
download_limit = download_attr.get("download_limit")
# 分享率
ratio_limit = download_attr.get("ratio_limit")
# 做种时间
seeding_time_limit = download_attr.get("seeding_time_limit")
# 下载目录设置
if not download_dir:
download_info = self.__get_download_dir_info(media_info, downloader_conf.get("download_dir"))
download_dir = download_info.get('path')
# 从下载目录中获取分类标签
if not category:
category = download_info.get('category')
# 添加下载
print_url = content if isinstance(content, str) else url
if is_paused:
log.info(f"【Downloader】下载器 {downloader_name} 添加任务并暂停:%s,目录:%s,Url:%s" % (
title, download_dir, print_url))
else:
log.info(f"【Downloader】下载器 {downloader_name} 添加任务:%s,目录:%s,Url:%s" % (
title, download_dir, print_url))
# 下载ID
download_id = None
downloader_type = downloader.get_type()
if downloader_type == DownloaderType.TR:
ret = downloader.add_torrent(content,
is_paused=is_paused,
download_dir=download_dir,
cookie=site_info.get("cookie"))
if ret:
download_id = ret.hashString
downloader.change_torrent(tid=download_id,
tag=tags,
upload_limit=upload_limit,
download_limit=download_limit,
ratio_limit=ratio_limit,
seeding_time_limit=seeding_time_limit)
elif downloader_type == DownloaderType.QB:
# 加标签以获取添加下载后的编号
torrent_tag = "NT" + StringUtils.generate_random_str(5)
if tags:
tags += [torrent_tag]
else:
tags = [torrent_tag]
# 布局默认原始
ret = downloader.add_torrent(content,
is_paused=is_paused,
download_dir=download_dir,
tag=tags,
category=category,
content_layout="Original",
upload_limit=upload_limit,
download_limit=download_limit,
ratio_limit=ratio_limit,
seeding_time_limit=seeding_time_limit,
cookie=site_info.get("cookie"))
if ret:
download_id = downloader.get_torrent_id_by_tag(torrent_tag)
else:
# 其它下载器,添加下载后需返回下载ID或添加状态
ret = downloader.add_torrent(content,
is_paused=is_paused,
tag=tags,
download_dir=download_dir,
category=category)
download_id = ret
# 添加下载成功
if ret:
# 计算数据文件保存的路径
save_dir = subtitle_dir = None
visit_dir = self.get_download_visit_dir(download_dir)
if visit_dir:
if dl_files_folder:
# 种子文件带目录
save_dir = os.path.join(visit_dir, dl_files_folder)
subtitle_dir = save_dir
elif dl_files:
# 种子文件为单独文件
save_dir = os.path.join(visit_dir, dl_files[0])
subtitle_dir = visit_dir
else:
save_dir = None
subtitle_dir = visit_dir
# 登记下载历史,记录下载目录
self.dbhelper.insert_download_history(media_info=media_info,
downloader=downloader_id,
download_id=download_id,
save_dir=save_dir)
# 下载站点字幕文件
if page_url \
and subtitle_dir \
and site_info \
and site_info.get("subtitle"):
ThreadHelper().start_thread(
self.sitesubtitle.download,
(
media_info,
site_info.get("id"),
site_info.get("cookie"),
site_info.get("ua"),
subtitle_dir
)
)
# 发送下载消息
if in_from:
media_info.user_name = user_name
self.message.send_download_message(in_from=in_from,
can_item=media_info,
download_setting_name=download_setting_name,
downloader_name=downloader_name)
return downloader_id, download_id, ""
else:
__download_fail("请检查下载任务是否已存在")
return downloader_id, None, f"下载器 {downloader_name} 添加下载任务失败,请检查下载任务是否已存在"
except Exception as e:
ExceptionUtils.exception_traceback(e)
__download_fail(str(e))
log.error(f"【Downloader】下载器 {downloader_name} 添加任务出错:%s" % str(e))
return None, None, str(e)
def transfer(self, downloader_id=None):
"""
转移下载完成的文件,进行文件识别重命名到媒体库目录
"""
downloader_ids = [downloader_id] if downloader_id \
else self._monitor_downloader_ids
for downloader_id in downloader_ids:
with lock:
# 获取下载器配置
downloader_conf = self.get_downloader_conf(downloader_id)
name = downloader_conf.get("name")
only_nastool = downloader_conf.get("only_nastool")
match_path = downloader_conf.get("match_path")
rmt_mode = ModuleConf.RMT_MODES.get(downloader_conf.get("rmt_mode"))
# 获取下载器实例
_client = self.__get_client(downloader_id)
if not _client:
continue
trans_tasks = _client.get_transfer_task(tag=PT_TAG if only_nastool else None, match_path=match_path)
if trans_tasks:
log.info(f"【Downloader】下载器 {name} 开始转移下载文件...")
else:
continue
for task in trans_tasks:
done_flag, done_msg = self.filetransfer.transfer_media(
in_from=self._DownloaderEnum[str(downloader_id)],
in_path=task.get("path"),
rmt_mode=rmt_mode)
if not done_flag:
log.warn(f"【Downloader】下载器 {name} 任务%s 转移失败:%s" % (task.get("path"), done_msg))
_client.set_torrents_status(ids=task.get("id"),
tags=task.get("tags"))
else:
if rmt_mode in [RmtMode.MOVE, RmtMode.RCLONE, RmtMode.MINIO]:
log.warn(f"【Downloader】下载器 {name} 移动模式下删除种子文件:%s" % task.get("id"))
_client.delete_torrents(delete_file=True, ids=task.get("id"))
else:
_client.set_torrents_status(ids=task.get("id"),
tags=task.get("tags"))
log.info(f"【Downloader】下载器 {name} 下载文件转移结束")
def get_torrents(self, downloader_id=None, ids=None, tag=None):
"""
获取种子信息
:param downloader_id: 下载器ID
:param ids: 种子ID
:param tag: 种子标签
:return: 种子信息列表
"""
if not downloader_id:
downloader_id = self.default_downloader_id
_client = self.__get_client(downloader_id)
if not _client:
return None
try:
torrents, error_flag = _client.get_torrents(tag=tag, ids=ids)
if error_flag:
return None
return torrents
except Exception as err:
ExceptionUtils.exception_traceback(err)
return None
def get_remove_torrents(self, downloader_id=None, config=None):
"""
查询符合删种策略的种子信息
:return: 符合删种策略的种子信息列表
"""
if not config or not downloader_id:
return []
_client = self.__get_client(downloader_id)
if not _client:
return []
config["filter_tags"] = []
if config.get("onlynastool"):
config["filter_tags"] = config["tags"] + [PT_TAG]
else:
config["filter_tags"] = config["tags"]
torrents = _client.get_remove_torrents(config=config)
torrents.sort(key=lambda x: x.get("name"))
return torrents
def get_downloading_torrents(self, downloader_id=None, ids=None, tag=None):
"""
查询正在下载中的种子信息
:return: 下载器名称,发生错误时返回None
"""
if not downloader_id:
downloader_id = self.default_downloader_id
_client = self.__get_client(downloader_id)
if not _client:
return None
try:
return _client.get_downloading_torrents(tag=tag, ids=ids)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return None
def get_downloading_progress(self, downloader_id=None, ids=None, force_list = False):
"""
查询正在下载中的进度信息
"""
if not downloader_id:
downloader_id = self.default_downloader_id
downloader_conf = self.get_downloader_conf(downloader_id)
only_nastool = downloader_conf.get("only_nastool") if not force_list else False
_client = self.__get_client(downloader_id)
if not _client:
return []
if only_nastool:
tag = [PT_TAG]
else:
tag = None
return _client.get_downloading_progress(tag=tag, ids=ids)
def get_completed_torrents(self, downloader_id=None, ids=None, tag=None):
"""
查询下载完成的种子列表
:param downloader_id: 下载器ID
:param ids: 种子ID列表
:param tag: 种子标签
:return: 种子信息列表,发生错误时返回None
"""
if not downloader_id:
downloader_id = self.default_downloader_id
_client = self.__get_client(downloader_id)
if not _client:
return None
return _client.get_completed_torrents(ids=ids, tag=tag)
def start_torrents(self, downloader_id=None, ids=None):
"""
下载控制:开始
:param downloader_id: 下载器ID
:param ids: 种子ID列表
:return: 处理状态
"""
if not ids:
return False
_client = self.__get_client(downloader_id) if downloader_id else self.default_client
if not _client:
return False
return _client.start_torrents(ids)
def stop_torrents(self, downloader_id=None, ids=None):
"""
下载控制:停止
:param downloader_id: 下载器ID
:param ids: 种子ID列表
:return: 处理状态
"""
if not ids:
return False
_client = self.__get_client(downloader_id) if downloader_id else self.default_client
if not _client:
return False
return _client.stop_torrents(ids)
def delete_torrents(self, downloader_id=None, ids=None, delete_file=False):
"""
删除种子
:param downloader_id: 下载器ID
:param ids: 种子ID列表
:param delete_file: 是否删除文件
:return: 处理状态
"""
if not ids:
return False
_client = self.__get_client(downloader_id) if downloader_id else self.default_client
if not _client:
return False
return _client.delete_torrents(delete_file=delete_file, ids=ids)
def batch_download(self,
in_from: SearchType,
media_list: list,
need_tvs: dict = None,
user_name=None):
"""
根据命中的种子媒体信息,添加下载,由RSS或Searcher调用
:param in_from: 来源
:param media_list: 命中并已经识别好的媒体信息列表,包括名称、年份、季、集等信息
:param need_tvs: 缺失的剧集清单,对于剧集只有在该清单中的季和集才会下载,对于电影无需输入该参数
:param user_name: 用户名称
:return: 已经添加了下载的媒体信息表表、剩余未下载到的媒体信息
"""
# 已下载的项目
return_items = []
# 返回按季、集数倒序排序的列表
download_list = Torrent().get_download_list(media_list, self._download_order)
def __download(download_item, torrent_file=None, tag=None, is_paused=None):
"""
下载及发送通知
"""
_downloader_id, did, msg = self.download(
media_info=download_item,
download_dir=download_item.save_path,
download_setting=download_item.download_setting,
torrent_file=torrent_file,
tag=tag,
is_paused=is_paused,
in_from=in_from,
user_name=user_name)
if did:
if download_item not in return_items:
return_items.append(download_item)
return _downloader_id, did
def __update_seasons(tmdbid, need, current):
"""
更新need_tvs季数
"""
need = list(set(need).difference(set(current)))
for cur in current:
for nt in need_tvs.get(tmdbid):
if cur == nt.get("season") or (cur == 1 and not nt.get("season")):
need_tvs[tmdbid].remove(nt)
if not need_tvs.get(tmdbid):
need_tvs.pop(tmdbid)
return need
def __update_episodes(tmdbid, seq, need, current):
"""
更新need_tvs集数
"""
need = list(set(need).difference(set(current)))
if need:
need_tvs[tmdbid][seq]["episodes"] = need
else:
need_tvs[tmdbid].pop(seq)
if not need_tvs.get(tmdbid):
need_tvs.pop(tmdbid)
return need
def __get_season_episodes(tmdbid, season):
"""
获取需要的季的集数
"""
if not need_tvs.get(tmdbid):
return 0
for nt in need_tvs.get(tmdbid):
if season == nt.get("season"):
return nt.get("total_episodes")
return 0
# 下载掉所有的电影
for item in download_list:
if item.type == MediaType.MOVIE:
__download(item)
# 电视剧整季匹配
if need_tvs:
# 先把整季缺失的拿出来,看是否刚好有所有季都满足的种子
need_seasons = {}
for need_tmdbid, need_tv in need_tvs.items():
for tv in need_tv:
if not tv:
continue
if not tv.get("episodes"):
if not need_seasons.get(need_tmdbid):
need_seasons[need_tmdbid] = []
need_seasons[need_tmdbid].append(tv.get("season") or 1)
# 查找整季包含的种子,只处理整季没集的种子或者是集数超过季的种子
for need_tmdbid, need_season in need_seasons.items():
for item in download_list:
if item.type == MediaType.MOVIE:
continue
item_season = item.get_season_list()
if item.get_episode_list():
continue
if need_tmdbid == item.tmdb_id:
if set(item_season).issubset(set(need_season)):
if len(item_season) == 1:
# 只有一季的可能是命名错误,需要打开种子鉴别,只有实际集数大于等于总集数才下载
torrent_episodes, torrent_path = self.get_torrent_episodes(
url=item.enclosure,
page_url=item.page_url)
if not torrent_episodes \
or len(torrent_episodes) >= __get_season_episodes(need_tmdbid, item_season[0]):
_, download_id = __download(download_item=item, torrent_file=torrent_path)
else:
log.info(
f"【Downloader】种子 {item.org_string} 未含集数信息,解析文件数为 {len(torrent_episodes)}")
continue
else:
_, download_id = __download(item)
if download_id:
# 更新仍需季集
need_season = __update_seasons(tmdbid=need_tmdbid,
need=need_season,
current=item_season)
# 电视剧季内的集匹配
if need_tvs:
need_tv_list = list(need_tvs)
for need_tmdbid in need_tv_list:
need_tv = need_tvs.get(need_tmdbid)
if not need_tv:
continue
index = 0
for tv in need_tv:
need_season = tv.get("season") or 1
need_episodes = tv.get("episodes")
total_episodes = tv.get("total_episodes")
# 缺失整季的转化为缺失集进行比较
if not need_episodes:
need_episodes = list(range(1, total_episodes + 1))
for item in download_list:
if item.type == MediaType.MOVIE:
continue
if item.tmdb_id == need_tmdbid:
if item in return_items:
continue
# 只处理单季含集的种子
item_season = item.get_season_list()
if len(item_season) != 1 or item_season[0] != need_season:
continue
item_episodes = item.get_episode_list()
if not item_episodes:
continue
# 为需要集的子集则下载
if set(item_episodes).issubset(set(need_episodes)):
_, download_id = __download(item)
if download_id:
# 更新仍需集数
need_episodes = __update_episodes(tmdbid=need_tmdbid,
need=need_episodes,
seq=index,
current=item_episodes)
index += 1
# 仍然缺失的剧集,从整季中选择需要的集数文件下载,仅支持QB和TR
if need_tvs:
need_tv_list = list(need_tvs)
for need_tmdbid in need_tv_list:
need_tv = need_tvs.get(need_tmdbid)
if not need_tv:
continue
index = 0
for tv in need_tv:
need_season = tv.get("season") or 1
need_episodes = tv.get("episodes")
if not need_episodes:
continue
for item in download_list:
if item.type == MediaType.MOVIE:
continue
if item in return_items:
continue
if not need_episodes:
break
# 选中一个单季整季的或单季包括需要的所有集的
if item.tmdb_id == need_tmdbid \
and (not item.get_episode_list()
or set(item.get_episode_list()).intersection(set(need_episodes))) \
and len(item.get_season_list()) == 1 \
and item.get_season_list()[0] == need_season:
# 检查种子看是否有需要的集
torrent_episodes, torrent_path = self.get_torrent_episodes(
url=item.enclosure,
page_url=item.page_url)
selected_episodes = set(torrent_episodes).intersection(set(need_episodes))
if not selected_episodes:
log.info("【Downloader】%s 没有需要的集,跳过..." % item.org_string)
continue
# 添加下载并暂停
downloader_id, download_id = __download(download_item=item,
torrent_file=torrent_path,
is_paused=True)
if not download_id:
continue
# 更新仍需集数
need_episodes = __update_episodes(tmdbid=need_tmdbid,
need=need_episodes,
seq=index,
current=selected_episodes)
# 设置任务只下载想要的文件
log.info("【Downloader】从 %s 中选取集:%s" % (item.org_string, selected_episodes))
self.set_files_status(tid=download_id,
need_episodes=selected_episodes,
downloader_id=downloader_id)
# 重新开始任务
log.info("【Downloader】%s 开始下载 " % item.org_string)
self.start_torrents(ids=download_id,
downloader_id=downloader_id)
# 记录下载项
return_items.append(item)
index += 1
# 返回下载的资源,剩下没下完的
return return_items, need_tvs
def check_exists_medias(self, meta_info, no_exists=None, total_ep=None):
"""
检查媒体库,查询是否存在,对于剧集同时返回不存在的季集信息
:param meta_info: 已识别的媒体信息,包括标题、年份、季、集信息
:param no_exists: 在调用该方法前已经存储的不存在的季集信息,有传入时该函数搜索的内容将会叠加后输出
:param total_ep: 各季的总集数
:return: 当前媒体是否缺失,各标题总的季集和缺失的季集,需要发送的消息
"""
if not no_exists:
no_exists = {}
if not total_ep:
total_ep = {}
# 查找的季
if not meta_info.begin_season:
search_season = None
else:
search_season = meta_info.get_season_list()
# 查找的集
search_episode = meta_info.get_episode_list()
if search_episode and not search_season:
search_season = [1]
# 返回的消息列表
message_list = []
if meta_info.type != MediaType.MOVIE:
# 是否存在的标志
return_flag = False
# 搜索电视剧的信息
tv_info = self.media.get_tmdb_info(mtype=MediaType.TV, tmdbid=meta_info.tmdb_id)
if tv_info:
# 传入检查季
total_seasons = []
if search_season:
for season in search_season:
if total_ep.get(season):
episode_num = total_ep.get(season)
else:
episode_num = self.media.get_tmdb_season_episodes_num(tv_info=tv_info, season=season)
if not episode_num:
log.info("【Downloader】%s 第%s季 不存在" % (meta_info.get_title_string(), season))
message_list.append("%s 第%s季 不存在" % (meta_info.get_title_string(), season))
continue
total_seasons.append({"season_number": season, "episode_count": episode_num})
log.info(
"【Downloader】%s 第%s季 共有 %s 集" % (meta_info.get_title_string(), season, episode_num))
else:
# 共有多少季,每季有多少季
total_seasons = self.media.get_tmdb_tv_seasons(tv_info=tv_info)
log.info(
"【Downloader】%s %s 共有 %s 季" % (
meta_info.type.value, meta_info.get_title_string(), len(total_seasons)))
message_list.append(
"%s %s 共有 %s 季" % (meta_info.type.value, meta_info.get_title_string(), len(total_seasons)))
# 没有得到总季数时,返回None
if not total_seasons:
return_flag = None
else:
# 查询缺少多少集
for season in total_seasons:
season_number = season.get("season_number")
episode_count = season.get("episode_count")
if not season_number or not episode_count:
continue
# 检查Emby
no_exists_episodes = self.mediaserver.get_no_exists_episodes(meta_info,
season_number,
episode_count)
# 没有配置Emby
if no_exists_episodes is None:
no_exists_episodes = self.filetransfer.get_no_exists_medias(meta_info,
season_number,
episode_count)
if no_exists_episodes:
# 排序
no_exists_episodes.sort()
# 缺失集初始化
if not no_exists.get(meta_info.tmdb_id):
no_exists[meta_info.tmdb_id] = []
# 缺失集提示文本
exists_tvs_str = "、".join(["%s" % tv for tv in no_exists_episodes])
# 存入总缺失集
if len(no_exists_episodes) >= episode_count:
no_item = {"season": season_number, "episodes": [], "total_episodes": episode_count}
log.info(
"【Downloader】%s 第%s季 缺失 %s 集" % (
meta_info.get_title_string(), season_number, episode_count))
if search_season:
message_list.append(
"%s 第%s季 缺失 %s 集" % (meta_info.title, season_number, episode_count))
else:
message_list.append("第%s季 缺失 %s 集" % (season_number, episode_count))
else:
no_item = {"season": season_number, "episodes": no_exists_episodes,
"total_episodes": episode_count}
log.info(
"【Downloader】%s 第%s季 缺失集:%s" % (
meta_info.get_title_string(), season_number, exists_tvs_str))
if search_season:
message_list.append(
"%s 第%s季 缺失集:%s" % (meta_info.title, season_number, exists_tvs_str))
else:
message_list.append("第%s季 缺失集:%s" % (season_number, exists_tvs_str))
if no_item not in no_exists.get(meta_info.tmdb_id):
no_exists[meta_info.tmdb_id].append(no_item)
# 输入检查集
if search_episode:
# 有集数,肯定只有一季
if not set(search_episode).intersection(set(no_exists_episodes)):
# 搜索的跟不存在的没有交集,说明都存在了
msg = f"媒体库中已存在剧集:\n" \
f" • {meta_info.get_title_string()} {meta_info.get_season_episode_string()}"
log.info(f"【Downloader】{msg}")
message_list.append(msg)
return_flag = True
break
else:
log.info("【Downloader】%s 第%s季 共%s集 已全部存在" % (
meta_info.get_title_string(), season_number, episode_count))
if search_season:
message_list.append(
"%s 第%s季 共%s集 已全部存在" % (meta_info.title, season_number, episode_count))
else:
message_list.append(
"第%s季 共%s集 已全部存在" % (season_number, episode_count))
else:
log.info("【Downloader】%s 无法查询到媒体详细信息" % meta_info.get_title_string())
message_list.append("%s 无法查询到媒体详细信息" % meta_info.get_title_string())
return_flag = None
# 全部存在
if return_flag is False and not no_exists.get(meta_info.tmdb_id):
return_flag = True
# 返回
return return_flag, no_exists, message_list
# 检查电影
else:
exists_movies = self.mediaserver.get_movies(meta_info.title, meta_info.year)
if exists_movies is None:
exists_movies = self.filetransfer.get_no_exists_medias(meta_info)
if exists_movies:
movies_str = "\n • ".join(["%s (%s)" % (m.get('title'), m.get('year')) for m in exists_movies])
msg = f"媒体库中已存在电影:\n • {movies_str}"
log.info(f"【Downloader】{msg}")
message_list.append(msg)
return True, {}, message_list
return False, {}, message_list
def get_files(self, tid, downloader_id=None):
"""
获取种子文件列表
"""
# 客户端
_client = self.__get_client(downloader_id) if downloader_id else self.default_client
if not _client:
return []
# 种子文件
torrent_files = _client.get_files(tid)
if not torrent_files:
return []
ret_files = []
if _client.get_type() == DownloaderType.TR:
for file_id, torrent_file in enumerate(torrent_files):
ret_files.append({
"id": file_id,
"name": torrent_file.name
})
elif _client.get_type() == DownloaderType.QB:
for torrent_file in torrent_files:
ret_files.append({
"id": torrent_file.get("index"),
"name": torrent_file.get("name")
})
return ret_files
def set_files_status(self, tid, need_episodes, downloader_id=None):
"""
设置文件下载状态,选中需要下载的季集对应的文件下载,其余不下载
:param tid: 种子的hash或id
:param need_episodes: 需要下载的文件的集信息
:param downloader_id: 下载器ID
:return: 返回选中的集的列表
"""
sucess_epidised = []
# 客户端
if not downloader_id:
downloader_id = self.default_downloader_id
_client = self.__get_client(downloader_id)
downloader_conf = self.get_downloader_conf(downloader_id)
if not _client:
return []
# 种子文件
torrent_files = self.get_files(tid=tid, downloader_id=downloader_id)
if not torrent_files:
return []
if downloader_conf.get("type") == "transmission":
files_info = {}
for torrent_file in torrent_files:
file_id = torrent_file.get("id")
file_name = torrent_file.get("name")
meta_info = MetaInfo(file_name)
if not meta_info.get_episode_list():
selected = False
else:
selected = set(meta_info.get_episode_list()).issubset(set(need_episodes))
if selected:
sucess_epidised = list(set(sucess_epidised).union(set(meta_info.get_episode_list())))
if not files_info.get(tid):
files_info[tid] = {file_id: {'priority': 'normal', 'selected': selected}}
else:
files_info[tid][file_id] = {'priority': 'normal', 'selected': selected}
if sucess_epidised and files_info:
_client.set_files(file_info=files_info)
elif downloader_conf.get("type") == "qbittorrent":
file_ids = []
for torrent_file in torrent_files:
file_id = torrent_file.get("id")
file_name = torrent_file.get("name")
meta_info = MetaInfo(file_name)
if not meta_info.get_episode_list() or not set(meta_info.get_episode_list()).issubset(
set(need_episodes)):
file_ids.append(file_id)
else:
sucess_epidised = list(set(sucess_epidised).union(set(meta_info.get_episode_list())))
if sucess_epidised and file_ids:
_client.set_files(torrent_hash=tid, file_ids=file_ids, priority=0)
return sucess_epidised
def get_download_dirs(self, setting=None):
"""
返回下载器中设置的保存目录
"""
if not setting:
setting = self.default_download_setting_id
# 查询下载设置
download_setting = self.get_download_setting(sid=setting)
downloader_conf = self.get_downloader_conf(download_setting.get("downloader"))
if not downloader_conf:
return []
downloaddir = downloader_conf.get("download_dir")
# 查询目录
save_path_list = [attr.get("save_path") for attr in downloaddir if attr.get("save_path")]
save_path_list.sort()
return list(set(save_path_list))
def get_download_visit_dirs(self):
"""
返回所有下载器中设置的访问目录
"""
download_dirs = []
for downloader_conf in self.get_downloader_conf().values():
download_dirs += downloader_conf.get("download_dir")
visit_path_list = [attr.get("container_path") or attr.get("save_path") for attr in download_dirs if
attr.get("save_path")]
visit_path_list.sort()
return list(set(visit_path_list))
def get_download_visit_dir(self, download_dir, downloader_id=None):
"""
返回下载器中设置的访问目录
"""
if not downloader_id:
downloader_id = self.default_downloader_id
downloader_conf = self.get_downloader_conf(downloader_id)
_client = self.__get_client(downloader_id)
if not _client:
return ""
true_path, _ = _client.get_replace_path(download_dir, downloader_conf.get("download_dir"))
return true_path
@staticmethod
def __get_download_dir_info(media, downloaddir):
"""
根据媒体信息读取一个下载目录的信息
"""
if media:
for attr in downloaddir or []:
if not attr:
continue
if attr.get("type") and attr.get("type") != media.type.value:
continue
if attr.get("category") and attr.get("category") != media.category:
continue
if not attr.get("save_path") and not attr.get("label"):
continue
if (attr.get("container_path") or attr.get("save_path")) \
and os.path.exists(attr.get("container_path") or attr.get("save_path")) \
and media.size \
and SystemUtils.get_free_space(
attr.get("container_path") or attr.get("save_path")
) < NumberUtils.get_size_gb(
StringUtils.num_filesize(media.size)
):
continue
return {
"path": attr.get("save_path"),
"category": attr.get("label")
}
return {"path": None, "category": None}
@staticmethod
def __get_client_type(type_name):
"""
根据名称返回下载器类型
"""
if not type_name:
return None
for dict_type in DownloaderType:
if dict_type.name == type_name or dict_type.value == type_name:
return dict_type
def get_torrent_episodes(self, url, page_url=None):
"""
解析种子文件,获取集数
:return: 集数列表、种子路径
"""
site_info = self.sites.get_sites(siteurl=url)
# 保存种子文件
file_path, _, _, files, retmsg = Torrent().get_torrent_info(
url=url,
cookie=site_info.get("cookie"),
ua=site_info.get("ua"),
referer=page_url if site_info.get("referer") else None,
proxy=site_info.get("proxy")
)
if not files:
log.error("【Downloader】读取种子文件集数出错:%s" % retmsg)
return [], None
episodes = []
for file in files:
if os.path.splitext(file)[-1] not in RMT_MEDIAEXT:
continue
meta = MetaInfo(file)
if not meta.begin_episode:
continue
episodes = list(set(episodes).union(set(meta.get_episode_list())))
return episodes, file_path
def get_download_setting(self, sid=None):
"""
获取下载设置
:return: 下载设置
"""
# 更新预设
preset_downloader_conf = self.get_downloader_conf(self.default_downloader_id)
if preset_downloader_conf:
self._download_settings["-1"]["downloader"] = self.default_downloader_id
self._download_settings["-1"]["downloader_name"] = preset_downloader_conf.get("name")
self._download_settings["-1"]["downloader_type"] = preset_downloader_conf.get("type")
if not sid:
return self._download_settings
return self._download_settings.get(str(sid)) or {}
def set_speed_limit(self, downloader_id=None, download_limit=None, upload_limit=None):
"""
设置速度限制
:param downloader_id: 下载器ID
:param download_limit: 下载速度限制,单位KB/s
:param upload_limit: 上传速度限制,单位kB/s
"""
if not downloader_id:
return
_client = self.__get_client(downloader_id)
if not _client:
return
try:
download_limit = int(download_limit) if download_limit else 0
except Exception as err:
ExceptionUtils.exception_traceback(err)
download_limit = 0
try:
upload_limit = int(upload_limit) if upload_limit else 0
except Exception as err:
ExceptionUtils.exception_traceback(err)
upload_limit = 0
_client.set_speed_limit(download_limit=download_limit, upload_limit=upload_limit)
def get_downloader_conf(self, did=None):
"""
获取下载器配置
"""
if not did:
return self._downloader_confs
return self._downloader_confs.get(str(did))
def get_downloader_conf_simple(self):
"""
获取简化下载器配置
"""
ret_dict = {}
for downloader_conf in self.get_downloader_conf().values():
ret_dict[str(downloader_conf.get("id"))] = {
"id": downloader_conf.get("id"),
"name": downloader_conf.get("name"),
"type": downloader_conf.get("type"),
"enabled": downloader_conf.get("enabled"),
}
return ret_dict
def get_downloader(self, downloader_id=None):
"""
获取下载器实例
"""
if not downloader_id:
return self.default_client
return self.__get_client(downloader_id)
def get_downloader(self, downloader_id=None):
"""
获取下载器实例
"""
if not downloader_id:
return self.default_client
return self.__get_client(downloader_id)
def get_status(self, dtype=None, config=None):
"""
测试下载器状态
"""
if not config or not dtype:
return False
# 测试状态
download_client = self.__build_class(ctype=dtype, conf=config)
state = download_client.get_status() if download_client else False
if not state:
log.error(f"【Downloader】下载器连接测试失败")
return state
def recheck_torrents(self, downloader_id=None, ids=None):
"""
下载控制:重新校验种子
:param downloader_id: 下载器ID
:param ids: 种子ID列表
:return: 处理状态
"""
if not ids:
return False
_client = self.__get_client(downloader_id) if downloader_id else self.default_client
if not _client:
return False
return _client.recheck_torrents(ids)
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
print(str(e))
def get_download_history(self, date=None, hid=None, num=30, page=1):
"""
获取下载历史记录
"""
return self.dbhelper.get_download_history(date=date, hid=hid, num=num, page=page)
def get_download_history_by_title(self, title):
"""
根据标题查询下载历史记录
:return:
"""
return self.dbhelper.get_download_history_by_title(title=title) or []
def get_download_history_by_downloader(self, downloader, download_id):
"""
根据下载器和下载ID查询下载历史记录
:return:
"""
return self.dbhelper.get_download_history_by_downloader(downloader=downloader,
download_id=download_id)
def update_downloader(self,
did,
name,
enabled,
dtype,
transfer,
only_nastool,
match_path,
rmt_mode,
config,
download_dir):
"""
更新下载器
"""
ret = self.dbhelper.update_downloader(
did=did,
name=name,
enabled=enabled,
dtype=dtype,
transfer=transfer,
only_nastool=only_nastool,
match_path=match_path,
rmt_mode=rmt_mode,
config=config,
download_dir=download_dir
)
self.init_config()
return ret
def delete_downloader(self, did):
"""
删除下载器
"""
ret = self.dbhelper.delete_downloader(did=did)
self.init_config()
return ret
def check_downloader(self, did=None, transfer=None, only_nastool=None, enabled=None, match_path=None):
"""
检查下载器
"""
ret = self.dbhelper.check_downloader(did=did,
transfer=transfer,
only_nastool=only_nastool,
enabled=enabled,
match_path=match_path)
self.init_config()
return ret
def delete_download_setting(self, sid):
"""
删除下载设置
"""
ret = self.dbhelper.delete_download_setting(sid=sid)
self.init_config()
return ret
def update_download_setting(self,
sid,
name,
category,
tags,
is_paused,
upload_limit,
download_limit,
ratio_limit,
seeding_time_limit,
downloader):
"""
更新下载设置
"""
ret = self.dbhelper.update_download_setting(
sid=sid,
name=name,
category=category,
tags=tags,
is_paused=is_paused,
upload_limit=upload_limit,
download_limit=download_limit,
ratio_limit=ratio_limit,
seeding_time_limit=seeding_time_limit,
downloader=downloader
)
self.init_config()
return ret
| 65,338 | Python | .py | 1,414 | 26.399576 | 119 | 0.487483 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,041 | aria2.py | demigody_nas-tools/app/downloader/client/aria2.py | import os
import re
import log
from app.utils import RequestUtils, ExceptionUtils, StringUtils
from app.utils.types import DownloaderType
from config import Config
from app.downloader.client._base import _IDownloadClient
from app.downloader.client._pyaria2 import PyAria2
class Aria2(_IDownloadClient):
schema = "aria2"
# 下载器ID
client_id = "aria2"
client_type = DownloaderType.ARIA2
client_name = DownloaderType.ARIA2.value
_client_config = {}
_client = None
host = None
port = None
secret = None
download_dir = []
def __init__(self, config=None):
if config:
self._client_config = config
self.init_config()
self.connect()
def init_config(self):
if self._client_config:
self.host = self._client_config.get("host")
if self.host:
if not self.host.startswith('http'):
self.host = "http://" + self.host
if self.host.endswith('/'):
self.host = self.host[:-1]
self.port = self._client_config.get("port")
self.secret = self._client_config.get("secret")
self.download_dir = self._client_config.get('download_dir') or []
if self.host and self.port:
self._client = PyAria2(secret=self.secret, host=self.host, port=self.port)
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def connect(self):
pass
def get_status(self):
if not self._client:
return False
ver = self._client.getVersion()
return True if ver else False
def get_torrents(self, ids=None, status=None, **kwargs):
if not self._client:
return []
ret_torrents = []
if ids:
if isinstance(ids, list):
for gid in ids:
ret_torrents.append(self._client.tellStatus(gid=gid))
else:
ret_torrents = [self._client.tellStatus(gid=ids)]
elif status:
if status == "downloading":
ret_torrents = self._client.tellActive() or [] + self._client.tellWaiting(offset=-1, num=100) or []
else:
ret_torrents = self._client.tellStopped(offset=-1, num=1000)
return ret_torrents
def get_downloading_torrents(self, **kwargs):
return self.get_torrents(status="downloading")
def get_completed_torrents(self, **kwargs):
return self.get_torrents(status="completed")
def set_torrents_status(self, ids, **kwargs):
return self.delete_torrents(ids=ids, delete_file=False)
def get_transfer_task(self, tag=None, match_path=False):
if not self._client:
return []
torrents = self.get_completed_torrents()
trans_tasks = []
for torrent in torrents:
name = torrent.get('bittorrent', {}).get('info', {}).get("name")
if not name:
continue
path = torrent.get("dir")
if not path:
continue
true_path, replace_flag = self.get_replace_path(path, self.download_dir)
# 开启目录隔离,未进行目录替换的不处理
if match_path and not replace_flag:
log.debug(f"【{self.client_name}】{self.name} 开启目录隔离,但 {torrent.name} 未匹配下载目录范围")
continue
trans_tasks.append({'path': os.path.join(true_path, name).replace("\\", "/"), 'id': torrent.get("gid")})
return trans_tasks
def get_remove_torrents(self, **kwargs):
return []
def add_torrent(self, content, download_dir=None, **kwargs):
if not self._client:
return None
if isinstance(content, str):
# 转换为磁力链
if re.match("^https*://", content):
try:
p = RequestUtils().get_res(url=content, allow_redirects=False)
if p and p.headers.get("Location"):
content = p.headers.get("Location")
except Exception as result:
ExceptionUtils.exception_traceback(result)
return self._client.addUri(uris=[content], options=dict(dir=download_dir))
else:
return self._client.addTorrent(torrent=content, uris=[], options=dict(dir=download_dir))
def start_torrents(self, ids):
if not self._client:
return False
return self._client.unpause(gid=ids)
def stop_torrents(self, ids):
if not self._client:
return False
return self._client.pause(gid=ids)
def delete_torrents(self, delete_file, ids):
if not self._client:
return False
return self._client.remove(gid=ids)
def get_download_dirs(self):
return []
def change_torrent(self, **kwargs):
pass
def get_downloading_progress(self, **kwargs):
"""
获取正在下载的种子进度
"""
Torrents = self.get_downloading_torrents()
DispTorrents = []
for torrent in Torrents:
# 进度
try:
progress = round(int(torrent.get('completedLength')) / int(torrent.get("totalLength")), 1) * 100
except ZeroDivisionError:
progress = 0.0
if torrent.get('status') in ['paused']:
state = "Stoped"
speed = "已暂停"
else:
state = "Downloading"
_dlspeed = StringUtils.str_filesize(torrent.get('downloadSpeed'))
_upspeed = StringUtils.str_filesize(torrent.get('uploadSpeed'))
speed = "%s%sB/s %s%sB/s" % (chr(8595), _dlspeed, chr(8593), _upspeed)
DispTorrents.append({
'id': torrent.get('gid'),
'name': torrent.get('bittorrent', {}).get('info', {}).get("name"),
'speed': speed,
'state': state,
'progress': progress
})
return DispTorrents
def set_speed_limit(self, download_limit=None, upload_limit=None):
"""
设置速度限制
:param download_limit: 下载速度限制,单位KB/s
:param upload_limit: 上传速度限制,单位kB/s
"""
if not self._client:
return
download_limit = download_limit * 1024
upload_limit = upload_limit * 1024
try:
speed_opt = self._client.getGlobalOption()
if speed_opt['max-overall-upload-limit'] != upload_limit:
speed_opt['max-overall-upload-limit'] = upload_limit
if speed_opt['max-overall-download-limit'] != download_limit:
speed_opt['max-overall-download-limit'] = download_limit
return self._client.changeGlobalOption(speed_opt)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return False
def get_type(self):
return self.client_type
def get_files(self, tid):
try:
return self._client.getFiles(gid=tid)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return None
def recheck_torrents(self, ids):
pass
def set_torrents_tag(self, ids, tags):
pass
| 7,482 | Python | .py | 182 | 29.357143 | 116 | 0.57669 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,042 | transmission.py | demigody_nas-tools/app/downloader/client/transmission.py | import os.path
import re
import time
from datetime import datetime
import transmission_rpc
import log
from app.utils import ExceptionUtils, StringUtils
from app.utils.types import DownloaderType
from app.downloader.client._base import _IDownloadClient
class Transmission(_IDownloadClient):
# 下载器ID
client_id = "transmission"
# 下载器类型
client_type = DownloaderType.TR
# 下载器名称
client_name = DownloaderType.TR.value
# 参考transmission web,仅查询需要的参数,加速种子搜索
_trarg = ["id", "name", "status", "labels", "hashString", "totalSize", "percentDone", "addedDate", "trackerStats",
"leftUntilDone", "rateDownload", "rateUpload", "recheckProgress", "rateDownload", "rateUpload",
"peersGettingFromUs", "peersSendingToUs", "uploadRatio", "uploadedEver", "downloadedEver", "downloadDir",
"error", "errorString", "doneDate", "queuePosition", "activityDate", "trackers"]
# 私有属性
_client_config = {}
trc = None
host = None
port = None
username = None
password = None
download_dir = []
name = "测试"
def __init__(self, config):
self._client_config = config
self.init_config()
self.connect()
# 设置未完成种子添加!part后缀
self.trc.set_session(rename_partial_files=True)
def init_config(self):
if self._client_config:
self.host = self._client_config.get('host')
self.port = int(self._client_config.get('port')) if str(self._client_config.get('port')).isdigit() else 0
self.username = self._client_config.get('username')
self.password = self._client_config.get('password')
self.download_dir = self._client_config.get('download_dir') or []
self.name = self._client_config.get('name') or ""
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def connect(self):
if self.host and self.port:
self.trc = self.__login_transmission()
def __login_transmission(self):
"""
连接transmission
:return: transmission对象
"""
try:
# 登录
trt = transmission_rpc.Client(host=self.host,
port=self.port,
username=self.username,
password=self.password,
timeout=60)
return trt
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 连接出错:{str(err)}")
return None
def get_status(self):
return True if self.trc else False
@staticmethod
def __parse_ids(ids):
"""
统一处理种子ID
"""
if isinstance(ids, list) and any([str(x).isdigit() for x in ids]):
ids = [int(x) for x in ids if str(x).isdigit()]
elif not isinstance(ids, list) and str(ids).isdigit():
ids = int(ids)
return ids
def get_torrents(self, ids=None, status=None, tag=None):
"""
获取种子列表
返回结果 种子列表, 是否有错误
"""
if not self.trc:
return [], True
ids = self.__parse_ids(ids)
try:
torrents = self.trc.get_torrents(ids=ids, arguments=self._trarg)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return [], True
if status and not isinstance(status, list):
status = [status]
if tag and not isinstance(tag, list):
tag = [tag]
ret_torrents = []
for torrent in torrents:
if status and torrent.status not in status:
continue
labels = torrent.labels if hasattr(torrent, "labels") else []
include_flag = True
if tag:
for t in tag:
if t and t not in labels:
include_flag = False
break
if include_flag:
ret_torrents.append(torrent)
return ret_torrents, False
def get_completed_torrents(self, ids=None, tag=None):
"""
获取已完成的种子列表
return 种子列表, 发生错误时返回None
"""
if not self.trc:
return None
try:
torrents, error = self.get_torrents(status=["seeding", "seed_pending"], ids=ids, tag=tag)
return None if error else torrents or []
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取已完成的种子列表出错:{str(err)}")
return None
def get_downloading_torrents(self, ids=None, tag=None):
"""
获取正在下载的种子列表
return 种子列表, 发生错误时返回None
"""
if not self.trc:
return None
try:
torrents, error = self.get_torrents(ids=ids,
status=["downloading", "download_pending"],
tag=tag)
return None if error else torrents or []
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取正在下载的种子列表出错:{str(err)}")
return None
def set_torrents_status(self, ids, tags=None):
"""
设置种子为已整理状态
"""
if not self.trc:
return
ids = self.__parse_ids(ids)
# 合成标签
if tags:
if not isinstance(tags, list):
tags = [tags, "已整理"]
else:
tags.append("已整理")
else:
tags = ["已整理"]
# 打标签
try:
self.trc.change_torrent(labels=tags, ids=ids)
log.info(f"【{self.client_name}】{self.name} 设置种子标签成功")
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置种子为已整理状态出错:{str(err)}")
def set_torrent_tag(self, tid, tag):
"""
设置种子标签
"""
if not tid or not tag:
return
ids = self.__parse_ids(tid)
try:
self.trc.change_torrent(labels=tag, ids=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置种子标签出错:{str(err)}")
def change_torrent(self,
tid,
tag=None,
upload_limit=None,
download_limit=None,
ratio_limit=None,
seeding_time_limit=None):
"""
设置种子
:param tid: ID
:param tag: 标签
:param upload_limit: 上传限速 Kb/s
:param download_limit: 下载限速 Kb/s
:param ratio_limit: 分享率限制
:param seeding_time_limit: 做种时间限制
:return: bool
"""
if not tid:
return
else:
ids = self.__parse_ids(tid)
if tag:
if isinstance(tag, list):
labels = tag
else:
labels = [tag]
else:
labels = []
if upload_limit:
uploadLimited = True
uploadLimit = int(upload_limit)
else:
uploadLimited = False
uploadLimit = 0
if download_limit:
downloadLimited = True
downloadLimit = int(download_limit)
else:
downloadLimited = False
downloadLimit = 0
if ratio_limit:
seedRatioMode = 1
seedRatioLimit = round(float(ratio_limit), 2)
else:
seedRatioMode = 2
seedRatioLimit = 0
if seeding_time_limit:
seedIdleMode = 1
seedIdleLimit = int(seeding_time_limit)
else:
seedIdleMode = 2
seedIdleLimit = 0
try:
self.trc.change_torrent(ids=ids,
labels=labels,
uploadLimited=uploadLimited,
uploadLimit=uploadLimit,
downloadLimited=downloadLimited,
downloadLimit=downloadLimit,
seedRatioMode=seedRatioMode,
seedRatioLimit=seedRatioLimit,
seedIdleMode=seedIdleMode,
seedIdleLimit=seedIdleLimit)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置种子出错:{str(err)}")
def get_transfer_task(self, tag=None, match_path=None):
"""
获取下载文件转移任务种子
"""
# 处理下载完成的任务
torrents = self.get_completed_torrents() or []
trans_tasks = []
for torrent in torrents:
# 3.0版本以下的Transmission没有labels
if not hasattr(torrent, "labels"):
log.error(f"【{self.client_name}】{self.name} 版本可能过低,无labels属性,请安装3.0以上版本!")
break
torrent_tags = torrent.labels or ""
# 含"已整理"tag的不处理
if "已整理" in torrent_tags:
continue
# 开启标签隔离,未包含指定标签的不处理
if tag and tag not in torrent_tags:
log.debug(f"【{self.client_name}】{self.name} 开启标签隔离, {torrent.name} 未包含指定标签:{tag}")
continue
path = torrent.download_dir
# 无法获取下载路径的不处理
if not path:
log.debug(f"【{self.client_name}】{self.name} 未获取到 {torrent.name} 下载保存路径")
continue
true_path, replace_flag = self.get_replace_path(path, self.download_dir)
# 开启目录隔离,未进行目录替换的不处理
if match_path and not replace_flag:
log.debug(f"【{self.client_name}】{self.name} 开启目录隔离,但 {torrent.name} 未匹配下载目录范围")
continue
trans_tasks.append({
'path': os.path.join(true_path, torrent.name).replace("\\", "/"),
'id': torrent.hashString,
'tags': torrent.labels
})
return trans_tasks
def get_remove_torrents(self, config=None):
"""
获取自动删种任务
"""
if not config:
return []
remove_torrents = []
remove_torrents_ids = []
torrents, error_flag = self.get_torrents(tag=config.get("filter_tags"),
status=config.get("tr_state"))
if error_flag:
return []
ratio = config.get("ratio")
# 做种时间 单位:小时
seeding_time = config.get("seeding_time")
# 大小 单位:GB
size = config.get("size")
minsize = size[0] * 1024 * 1024 * 1024 if size else 0
maxsize = size[-1] * 1024 * 1024 * 1024 if size else 0
# 平均上传速度 单位 KB/s
upload_avs = config.get("upload_avs")
savepath_key = config.get("savepath_key")
tracker_key = config.get("tracker_key")
tr_error_key = config.get("tr_error_key")
for torrent in torrents:
date_done = torrent.date_done or torrent.date_added
date_now = int(time.mktime(datetime.now().timetuple()))
torrent_seeding_time = date_now - int(time.mktime(date_done.timetuple())) if date_done else 0
torrent_uploaded = torrent.ratio * torrent.total_size
torrent_upload_avs = torrent_uploaded / torrent_seeding_time if torrent_seeding_time else 0
if ratio and torrent.ratio <= ratio:
continue
if seeding_time and torrent_seeding_time <= seeding_time * 3600:
continue
if size and (torrent.total_size >= maxsize or torrent.total_size <= minsize):
continue
if upload_avs and torrent_upload_avs >= upload_avs * 1024:
continue
if savepath_key and not re.findall(savepath_key, torrent.download_dir, re.I):
continue
if tracker_key:
if not torrent.trackers:
continue
else:
tacker_key_flag = False
for tracker in torrent.trackers:
if re.findall(tracker_key, tracker.get("announce", ""), re.I):
tacker_key_flag = True
break
if not tacker_key_flag:
continue
if tr_error_key:
announce_results = [x.last_announce_result for x in torrent.tracker_stats]
announce_results.append(torrent.error_string)
# 如果announce_results中均不匹配tr_error_key,则跳过
if not any([re.findall(tr_error_key, x, re.I) for x in announce_results]):
continue
remove_torrents.append({
"id": torrent.hashString,
"name": torrent.name,
"site": torrent.trackers[0].get("sitename") if torrent.trackers else "",
"size": torrent.total_size
})
remove_torrents_ids.append(torrent.hashString)
if config.get("samedata") and remove_torrents:
remove_torrents_plus = []
for remove_torrent in remove_torrents:
name = remove_torrent.get("name")
size = remove_torrent.get("size")
for torrent in torrents:
if torrent.name == name and torrent.total_size == size and torrent.hashString not in remove_torrents_ids:
remove_torrents_plus.append({
"id": torrent.hashString,
"name": torrent.name,
"site": torrent.trackers[0].get("sitename") if torrent.trackers else "",
"size": torrent.total_size
})
remove_torrents_plus += remove_torrents
return remove_torrents_plus
return remove_torrents
def add_torrent(self, content,
is_paused=False,
download_dir=None,
upload_limit=None,
download_limit=None,
cookie=None,
**kwargs):
try:
ret = self.trc.add_torrent(torrent=content,
download_dir=download_dir,
paused=is_paused,
cookies=cookie)
if ret and ret.hashString:
if upload_limit:
self.set_uploadspeed_limit(ret.hashString, int(upload_limit))
if download_limit:
self.set_downloadspeed_limit(ret.hashString, int(download_limit))
return ret
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 添加种子出错:{str(err)}")
return False
def start_torrents(self, ids):
if not self.trc:
return False
ids = self.__parse_ids(ids)
try:
return self.trc.start_torrent(ids=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 开始下载出错:{str(err)}")
return False
def stop_torrents(self, ids):
if not self.trc:
return False
ids = self.__parse_ids(ids)
try:
return self.trc.stop_torrent(ids=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 停止下载出错:{str(err)}")
return False
def delete_torrents(self, delete_file, ids):
if not self.trc:
return False
if not ids:
return False
ids = self.__parse_ids(ids)
try:
return self.trc.remove_torrent(delete_data=delete_file, ids=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 删除下载出错:{str(err)}")
return False
def get_files(self, tid):
"""
获取种子文件列表
"""
if not tid:
return None
try:
torrent = self.trc.get_torrent(tid)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取种子文件列表出错:{str(err)}")
return None
if torrent:
return torrent.files()
else:
return None
def set_files(self, **kwargs):
"""
设置下载文件的状态
{
<torrent id>: {
<file id>: {
'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)>
},
...
},
...
}
"""
if not kwargs.get("file_info"):
return False
try:
self.trc.set_files(kwargs.get("file_info"))
return True
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置下载文件的状态出错:{str(err)}")
return False
def get_download_dirs(self):
if not self.trc:
return []
try:
return [self.trc.get_session(timeout=30).download_dir]
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取下载文件夹出错:{str(err)}")
return []
def set_uploadspeed_limit(self, ids, limit):
"""
设置上传限速,单位 KB/sec
"""
if not self.trc:
return
if not ids or not limit:
return
ids = self.__parse_ids(ids)
self.trc.change_torrent(ids, uploadLimit=int(limit))
def set_downloadspeed_limit(self, ids, limit):
"""
设置下载限速,单位 KB/sec
"""
if not self.trc:
return
if not ids or not limit:
return
ids = self.__parse_ids(ids)
self.trc.change_torrent(ids, downloadLimit=int(limit))
def get_downloading_progress(self, tag=None, ids=None):
"""
获取正在下载的种子进度
"""
Torrents = self.get_downloading_torrents(tag=tag, ids=ids) or []
DispTorrents = []
for torrent in Torrents:
if torrent.status in ['stopped']:
state = "Stoped"
speed = "已暂停"
else:
state = "Downloading"
if hasattr(torrent, "rate_download"):
_dlspeed = StringUtils.str_filesize(torrent.rate_download)
else:
_dlspeed = StringUtils.str_filesize(torrent.rateDownload)
if hasattr(torrent, "rate_upload"):
_upspeed = StringUtils.str_filesize(torrent.rate_upload)
else:
_upspeed = StringUtils.str_filesize(torrent.rateUpload)
speed = "%s%sB/s %s%sB/s" % (chr(8595), _dlspeed, chr(8593), _upspeed)
# 进度
progress = round(torrent.progress)
DispTorrents.append({
'id': torrent.hashString,
'name': torrent.name,
'speed': speed,
'state': state,
'progress': progress
})
return DispTorrents
def set_speed_limit(self, download_limit=None, upload_limit=None):
"""
设置速度限制
:param download_limit: 下载速度限制,单位KB/s
:param upload_limit: 上传速度限制,单位kB/s
"""
if not self.trc:
return
try:
session = self.trc.get_session()
download_limit_enabled = True if download_limit else False
upload_limit_enabled = True if upload_limit else False
if download_limit_enabled == session.speed_limit_down_enabled and \
upload_limit_enabled == session.speed_limit_up_enabled and \
download_limit == session.speed_limit_down and \
upload_limit == session.speed_limit_up:
return
self.trc.set_session(
speed_limit_down=download_limit if download_limit != session.speed_limit_down
else session.speed_limit_down,
speed_limit_up=upload_limit if upload_limit != session.speed_limit_up
else session.speed_limit_up,
speed_limit_down_enabled=download_limit_enabled,
speed_limit_up_enabled=upload_limit_enabled
)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置速度限制出错:{str(err)}")
return False
def recheck_torrents(self, ids):
if not self.trc:
return False
ids = self.__parse_ids(ids)
try:
return self.trc.verify_torrent(ids=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 校验种子出错:{str(err)}")
return False
def get_client_speed(self):
if not self.trc:
return False
try:
session_stats = self.trc.session_stats(timeout=30)
if session_stats:
return {
"up_speed": session_stats.upload_speed,
"dl_speed": session_stats.download_speed
}
return False
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取客户端速度出错:{str(err)}")
return False
| 22,593 | Python | .py | 549 | 25.825137 | 125 | 0.526057 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,043 | pikpak.py | demigody_nas-tools/app/downloader/client/pikpak.py | import asyncio
from pikpakapi import PikPakApi, DownloadStatus
import log
from app.downloader.client._base import _IDownloadClient
from app.utils.types import DownloaderType
from config import Config
class PikPak(_IDownloadClient):
schema = "pikpak"
# 下载器ID
client_id = "pikpak"
client_type = DownloaderType.PIKPAK
client_name = DownloaderType.PIKPAK.value
_client_config = {}
_client = None
username = None
password = None
proxy = None
def __init__(self, config=None):
if config:
self._client_config = config
self.init_config()
self.connect()
def init_config(self):
if self._client_config:
self.username = self._client_config.get("username")
self.password = self._client_config.get("password")
self.proxy = self._client_config.get("proxy")
if self.username and self.password:
self._client = PikPakApi(
username=self.username,
password=self.password,
proxy=self.proxy,
)
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def connect(self):
try:
asyncio.run(self._client.login())
except Exception as err:
print(str(err))
return
def get_status(self):
if not self._client:
return False
try:
asyncio.run(self._client.login())
if self._client.user_id is None:
log.info("PikPak 登录失败")
return False
except Exception as err:
log.error("PikPak 登录出错:%s" % str(err))
return False
return True
def get_torrents(self, ids=None, status=None, **kwargs):
rv = []
if self._client.user_id is None:
if self.get_status():
return [], False
if ids is not None:
for id in ids:
status = asyncio.run(self._client.get_task_status(id, ''))
if status == DownloadStatus.downloading:
rv.append({"id": id, "finish": False})
if status == DownloadStatus.done:
rv.append({"id": id, "finish": True})
return rv, True
def get_completed_torrents(self, **kwargs):
return []
def get_downloading_torrents(self, **kwargs):
if self._client.user_id is None:
if self.get_status():
return []
try:
offline_list = asyncio.run(self._client.offline_list())
return offline_list['tasks']
except Exception as err:
print(str(err))
return []
def get_transfer_task(self, **kwargs):
pass
def get_remove_torrents(self, **kwargs):
return []
def add_torrent(self, content, download_dir=None, **kwargs):
try:
task = asyncio.run(self._client.offline_download(content, download_dir))
taskId = task.get('task', {}).get('id')
return taskId is not None and bool(taskId)
except Exception as e:
log.error("PikPak 添加离线下载任务失败: %s" % str(e))
return None
# 需要完成
def delete_torrents(self, delete_file, ids):
pass
def start_torrents(self, ids):
pass
def stop_torrents(self, ids):
pass
# 需要完成
def set_torrents_status(self, ids, **kwargs):
pass
def get_download_dirs(self):
return []
def change_torrent(self, **kwargs):
pass
# 需要完成
def get_downloading_progress(self, **kwargs):
"""
获取正在下载的种子进度
"""
Torrents = self.get_downloading_torrents()
DispTorrents = []
for torrent in Torrents:
DispTorrents.append({
'id': torrent.get('id'),
'file_id': torrent.get('file_id'),
'name': torrent.get('file_name'),
'nomenu': True,
'noprogress': True
})
return DispTorrents
def set_speed_limit(self, **kwargs):
"""
设置速度限制
"""
pass
def get_type(self):
return self.client_type
def get_files(self, tid):
pass
def recheck_torrents(self, ids):
pass
def set_torrents_tag(self, ids, tags):
pass | 4,535 | Python | .py | 134 | 23.373134 | 92 | 0.561679 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,044 | qbittorrent.py | demigody_nas-tools/app/downloader/client/qbittorrent.py | import os
import re
import time
from datetime import datetime
import log
import qbittorrentapi
from app.downloader.client._base import _IDownloadClient
from app.utils import ExceptionUtils, StringUtils
from app.utils.types import DownloaderType
class Qbittorrent(_IDownloadClient):
# 下载器ID
client_id = "qbittorrent"
# 下载器类型
client_type = DownloaderType.QB
# 下载器名称
client_name = DownloaderType.QB.value
# 私有属性
_client_config = {}
_torrent_management = False
qbc = None
ver = None
host = None
port = None
username = None
password = None
download_dir = []
name = "测试"
def __init__(self, config):
self._client_config = config
self.init_config()
self.connect()
# 种子自动管理模式,根据下载路径设置为下载器设置分类
self.init_torrent_management()
if self.qbc:
# 设置未完成种子添加!qb后缀
self.qbc.app_set_preferences({"incomplete_files_ext": True})
def init_config(self):
if self._client_config:
self.host = self._client_config.get('host')
self.port = int(self._client_config.get('port')) if str(self._client_config.get('port')).isdigit() else 0
self.username = self._client_config.get('username')
self.password = self._client_config.get('password')
self.download_dir = self._client_config.get('download_dir') or []
self.name = self._client_config.get('name') or ""
# 种子管理模式
self._torrent_management = self._client_config.get('torrent_management')
if self._torrent_management not in ["default", "manual", "auto"]:
self._torrent_management = "default"
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def connect(self):
if self.host and self.port:
self.qbc = self.__login_qbittorrent()
def __login_qbittorrent(self):
"""
连接qbittorrent
:return: qbittorrent对象
"""
try:
# 登录
qbt = qbittorrentapi.Client(host=self.host,
port=self.port,
username=self.username,
password=self.password,
VERIFY_WEBUI_CERTIFICATE=False,
REQUESTS_ARGS={'timeout': (15, 60)})
try:
qbt.auth_log_in()
self.ver = qbt.app_version()
except qbittorrentapi.LoginFailed as e:
log.error(f"【{self.client_name}】{self.name} 登录出错:{str(e)}")
return qbt
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 连接出错:{str(err)}")
return None
def get_status(self):
if not self.qbc:
return False
try:
return True if self.qbc.transfer_info() else False
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取状态出错:{str(err)}")
return False
def init_torrent_management(self):
"""
根据设置的标签,自动管理模式下自动创建QB分类
"""
# 手动
if self._torrent_management == "manual":
return
# 默认则查询当前下载器管理模式
if self._torrent_management == "default":
if not self.__get_qb_auto():
return
# 获取下载器目前的分类信息
categories = self.__get_qb_category()
# 更新下载器中分类设置
for dir_item in self.download_dir:
label = dir_item.get("label")
save_path = dir_item.get("save_path")
if not label or not save_path:
continue
# 查询分类是否存在
category_item = categories.get(label)
if not category_item:
# 分类不存在,则创建
self.__update_category(name=label, save_path=save_path)
else:
# 如果分类存在,但是路径不一致,则更新
if os.path.normpath(category_item.get("savePath")) != os.path.normpath(save_path):
self.__update_category(name=label, save_path=save_path, is_edit=True)
def __get_qb_category(self):
"""
查询下载器中已设置的分类
"""
if not self.qbc:
return {}
return self.qbc.torrent_categories.categories or {}
def __get_qb_auto(self):
"""
查询下载器是否开启自动管理
:return:
"""
if not self.qbc:
return {}
preferences = self.qbc.app_preferences() or {}
return preferences.get("auto_tmm_enabled")
def __update_category(self, name, save_path, is_edit=False):
"""
更新分类
"""
try:
if is_edit:
self.qbc.torrent_categories.edit_category(name=name, save_path=save_path)
log.info(f"【{self.client_name}】{self.name} 更新分类:{name},路径:{save_path}")
else:
self.qbc.torrent_categories.create_category(name=name, save_path=save_path)
log.info(f"【{self.client_name}】{self.name} 创建分类:{name},路径:{save_path}")
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置分类:{name},路径:{save_path} 错误:{str(err)}")
def __check_category(self, save_path=""):
"""
自动种子管理模式下检查和设置分类
"""
# 没有保存目录分类为None,不改变现状
if not save_path:
return None
# 获取下载器中的分类信息,查询是否有匹配该目录的分类
categories = self.__get_qb_category()
for category_name, category_item in categories.items():
catetory_path = category_item.get("savePath")
if not catetory_path:
continue
if os.path.normpath(catetory_path) == os.path.normpath(save_path):
return category_name
return None
def get_torrents(self, ids=None, status=None, tag=None):
"""
获取种子列表
return: 种子列表, 是否发生异常
"""
if not self.qbc:
return [], True
try:
torrents = self.qbc.torrents_info(torrent_hashes=ids,
status_filter=status)
if tag:
results = []
if not isinstance(tag, list):
tag = [tag]
for torrent in torrents:
include_flag = True
for t in tag:
if t and t not in torrent.get("tags"):
include_flag = False
break
if include_flag:
results.append(torrent)
return results or [], False
return torrents or [], False
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取种子列表出错:{str(err)}")
return [], True
def get_completed_torrents(self, ids=None, tag=None):
"""
获取已完成的种子
return: 种子列表, 如发生异常则返回None
"""
if not self.qbc:
return None
torrents, error = self.get_torrents(status=["completed"], ids=ids, tag=tag)
return None if error else torrents or []
def get_downloading_torrents(self, ids=None, tag=None):
"""
获取正在下载的种子
return: 种子列表, 如发生异常则返回None
"""
if not self.qbc:
return None
torrents, error = self.get_torrents(ids=ids,
status=["downloading"],
tag=tag)
return None if error else torrents or []
def remove_torrents_tag(self, ids, tag):
"""
移除种子Tag
:param ids: 种子Hash列表
:param tag: 标签内容
"""
try:
return self.qbc.torrents_delete_tags(torrent_hashes=ids, tags=tag)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 移除种子tag出错:{str(err)}")
return False
def set_torrents_status(self, ids, tags=None):
"""
设置种子状态为已整理,以及是否强制做种
"""
if not self.qbc:
return
try:
# 打标签
self.qbc.torrents_add_tags(tags="已整理", torrent_hashes=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置种子状态为已整理出错:{str(err)}")
def torrents_set_force_start(self, ids):
"""
设置强制作种
"""
try:
self.qbc.torrents_set_force_start(enable=True, torrent_hashes=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置强制做种出错:{str(err)}")
def get_transfer_task(self, tag=None, match_path=False):
"""
获取下载文件转移任务种子
"""
# 处理下载完成的任务
torrents = self.get_completed_torrents() or []
trans_tasks = []
for torrent in torrents:
torrent_tags = torrent.get("tags") or ""
# 含"已整理"tag的不处理
if "已整理" in torrent_tags:
continue
# 开启标签隔离,未包含指定标签的不处理
if tag and tag not in torrent_tags:
log.debug(f"【{self.client_name}】{self.name} 开启标签隔离, {torrent.get('name')} 未包含指定标签:{tag}")
continue
path = torrent.get("save_path")
# 无法获取下载路径的不处理
if not path:
log.debug(f"【{self.client_name}】{self.name} 未获取到 {torrent.get('name')} 下载保存路径")
continue
true_path, replace_flag = self.get_replace_path(path, self.download_dir)
# 开启目录隔离,未进行目录替换的不处理
if match_path and not replace_flag:
log.debug(f"【{self.client_name}】{self.name} 开启目录隔离, {torrent.get('name')} 未匹配下载目录范围")
continue
content_path = torrent.get("content_path")
if content_path:
trans_name = content_path.replace(path, "").replace("\\", "/")
if trans_name.startswith('/'):
trans_name = trans_name[1:]
else:
trans_name = torrent.get('name')
trans_tasks.append({
'path': os.path.join(true_path, trans_name).replace("\\", "/"),
'id': torrent.get('hash')
})
return trans_tasks
def get_remove_torrents(self, config=None):
"""
获取自动删种任务种子
"""
if not config:
return []
remove_torrents = []
remove_torrents_ids = []
torrents, error_flag = self.get_torrents(tag=config.get("filter_tags"))
if error_flag:
return []
ratio = config.get("ratio")
# 做种时间 单位:小时
seeding_time = config.get("seeding_time")
# 大小 单位:GB
size = config.get("size")
minsize = size[0] * 1024 * 1024 * 1024 if size else 0
maxsize = size[-1] * 1024 * 1024 * 1024 if size else 0
# 平均上传速度 单位 KB/s
upload_avs = config.get("upload_avs")
savepath_key = config.get("savepath_key")
tracker_key = config.get("tracker_key")
qb_state = config.get("qb_state")
qb_category = config.get("qb_category")
for torrent in torrents:
date_done = torrent.completion_on if torrent.completion_on > 0 else torrent.added_on
date_now = int(time.mktime(datetime.now().timetuple()))
torrent_seeding_time = date_now - date_done if date_done else 0
torrent_upload_avs = torrent.uploaded / torrent_seeding_time if torrent_seeding_time else 0
if ratio and torrent.ratio <= ratio:
continue
if seeding_time and torrent_seeding_time <= seeding_time * 3600:
continue
if size and (torrent.size >= maxsize or torrent.size <= minsize):
continue
if upload_avs and torrent_upload_avs >= upload_avs * 1024:
continue
if savepath_key and not re.findall(savepath_key, torrent.save_path, re.I):
continue
if tracker_key and not re.findall(tracker_key, torrent.tracker, re.I):
continue
if qb_state and torrent.state not in qb_state:
continue
if qb_category and torrent.category not in qb_category:
continue
remove_torrents.append({
"id": torrent.hash,
"name": torrent.name,
"site": StringUtils.get_url_sld(torrent.tracker),
"size": torrent.size
})
remove_torrents_ids.append(torrent.hash)
if config.get("samedata") and remove_torrents:
remove_torrents_plus = []
for remove_torrent in remove_torrents:
name = remove_torrent.get("name")
size = remove_torrent.get("size")
for torrent in torrents:
if torrent.name == name and torrent.size == size and torrent.hash not in remove_torrents_ids:
remove_torrents_plus.append({
"id": torrent.hash,
"name": torrent.name,
"site": StringUtils.get_url_sld(torrent.tracker),
"size": torrent.size
})
remove_torrents_plus += remove_torrents
return remove_torrents_plus
return remove_torrents
def __get_last_add_torrentid_by_tag(self, tag, status=None):
"""
根据种子的下载链接获取下载中或暂停的钟子的ID
:return: 种子ID
"""
try:
torrents, _ = self.get_torrents(status=status, tag=tag)
except Exception as err:
ExceptionUtils.exception_traceback(err)
return None
if torrents:
return torrents[0].get("hash")
else:
return None
def get_torrent_id_by_tag(self, tag, status=None):
"""
通过标签多次尝试获取刚添加的种子ID,并移除标签
"""
torrent_id = None
# QB添加下载后需要时间,重试5次每次等待5秒
for i in range(1, 6):
time.sleep(5)
torrent_id = self.__get_last_add_torrentid_by_tag(tag=tag,
status=status)
if torrent_id is None:
continue
else:
self.remove_torrents_tag(torrent_id, tag)
break
return torrent_id
def add_torrent(self,
content,
is_paused=False,
download_dir=None,
tag=None,
category=None,
content_layout=None,
upload_limit=None,
download_limit=None,
ratio_limit=None,
seeding_time_limit=None,
cookie=None
):
"""
添加种子
:param content: 种子urls或文件
:param is_paused: 添加后暂停
:param tag: 标签
:param download_dir: 下载路径
:param category: 分类
:param content_layout: 布局
:param upload_limit: 上传限速 Kb/s
:param download_limit: 下载限速 Kb/s
:param ratio_limit: 分享率限制
:param seeding_time_limit: 做种时间限制
:param cookie: 站点Cookie用于辅助下载种子
:return: bool
"""
if not self.qbc or not content:
return False
if isinstance(content, str):
urls = content
torrent_files = None
else:
urls = None
torrent_files = content
if download_dir:
save_path = download_dir
is_auto = False
else:
save_path = None
is_auto = None
if not category:
category = None
if tag:
tags = tag
else:
tags = None
if not content_layout:
content_layout = None
if upload_limit:
upload_limit = int(upload_limit) * 1024
else:
upload_limit = None
if download_limit:
download_limit = int(download_limit) * 1024
else:
download_limit = None
if ratio_limit:
ratio_limit = round(float(ratio_limit), 2)
else:
ratio_limit = None
if seeding_time_limit:
seeding_time_limit = int(seeding_time_limit)
else:
seeding_time_limit = None
try:
# 读取设置的管理模式
if is_auto is None:
match self._torrent_management:
case "default":
if self.__get_qb_auto():
is_auto = True
case "auto":
is_auto = True
case "manual":
is_auto = False
# 自动管理模式没有分类时,根据保存目录获取
if is_auto and not category:
category = self.__check_category(save_path)
# 添加下载
qbc_ret = self.qbc.torrents_add(urls=urls,
torrent_files=torrent_files,
save_path=save_path,
category=category,
is_paused=is_paused,
tags=tags,
content_layout=content_layout,
upload_limit=upload_limit,
download_limit=download_limit,
ratio_limit=ratio_limit,
seeding_time_limit=seeding_time_limit,
use_auto_torrent_management=is_auto,
cookie=cookie)
return True if qbc_ret and str(qbc_ret).find("Ok") != -1 else False
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 添加种子出错:{str(err)}")
return False
def start_torrents(self, ids):
if not self.qbc:
return False
try:
return self.qbc.torrents_resume(torrent_hashes=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 开始下载出错:{str(err)}")
return False
def stop_torrents(self, ids):
if not self.qbc:
return False
try:
return self.qbc.torrents_pause(torrent_hashes=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 停止下载出错:{str(err)}")
return False
def delete_torrents(self, delete_file, ids):
if not self.qbc:
return False
if not ids:
return False
try:
self.qbc.torrents_delete(delete_files=delete_file, torrent_hashes=ids)
return True
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 删除种子出错:{str(err)}")
return False
def get_files(self, tid):
try:
return self.qbc.torrents_files(torrent_hash=tid)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取文件列表出错:{str(err)}")
return None
def set_files(self, **kwargs):
"""
设置下载文件的状态,priority为0为不下载,priority为1为下载
"""
if not kwargs.get("torrent_hash") or not kwargs.get("file_ids"):
return False
try:
self.qbc.torrents_file_priority(torrent_hash=kwargs.get("torrent_hash"),
file_ids=kwargs.get("file_ids"),
priority=kwargs.get("priority"))
return True
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置下载文件状态出错:{str(err)}")
return False
def set_torrent_tag(self, **kwargs):
pass
def get_download_dirs(self):
if not self.qbc:
return []
ret_dirs = []
try:
categories = self.qbc.torrents_categories(requests_args={'timeout': (10, 30)}) or {}
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取下载文件夹出错:{str(err)}")
return []
for category in categories.values():
if category and category.get("savePath") and category.get("savePath") not in ret_dirs:
ret_dirs.append(category.get("savePath"))
return ret_dirs
def set_uploadspeed_limit(self, ids, limit):
"""
设置上传限速,单位bytes/sec
"""
if not self.qbc:
return
if not ids or not limit:
return
self.qbc.torrents_set_upload_limit(limit=int(limit),
torrent_hashes=ids)
def set_downloadspeed_limit(self, ids, limit):
"""
设置下载限速,单位bytes/sec
"""
if not self.qbc:
return
if not ids or not limit:
return
self.qbc.torrents_set_download_limit(limit=int(limit),
torrent_hashes=ids)
def change_torrent(self, **kwargs):
"""
修改种子状态
"""
pass
def get_downloading_progress(self, tag=None, ids=None):
"""
获取正在下载的种子进度
"""
Torrents = self.get_downloading_torrents(tag=tag, ids=ids) or []
DispTorrents = []
for torrent in Torrents:
# 进度
progress = round(torrent.get('progress') * 100, 1)
if torrent.get('state') in ['pausedDL']:
state = "Stoped"
speed = "已暂停"
else:
state = "Downloading"
_dlspeed = StringUtils.str_filesize(torrent.get('dlspeed'))
_upspeed = StringUtils.str_filesize(torrent.get('upspeed'))
if progress >= 100:
speed = "%s%sB/s %s%sB/s" % (chr(8595), _dlspeed, chr(8593), _upspeed)
else:
eta = StringUtils.str_timelong(torrent.get('eta'))
speed = "%s%sB/s %s%sB/s %s" % (chr(8595), _dlspeed, chr(8593), _upspeed, eta)
# 主键
DispTorrents.append({
'id': torrent.get('hash'),
'name': torrent.get('name'),
'speed': speed,
'state': state,
'progress': progress
})
return DispTorrents
def set_speed_limit(self, download_limit=None, upload_limit=None):
"""
设置速度限制
:param download_limit: 下载速度限制,单位KB/s
:param upload_limit: 上传速度限制,单位kB/s
"""
if not self.qbc:
return
download_limit = download_limit * 1024
upload_limit = upload_limit * 1024
try:
if self.qbc.transfer.upload_limit != upload_limit:
self.qbc.transfer.upload_limit = upload_limit
if self.qbc.transfer.download_limit != download_limit:
self.qbc.transfer.download_limit = download_limit
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 设置速度限制出错:{str(err)}")
return False
def recheck_torrents(self, ids):
if not self.qbc:
return False
try:
return self.qbc.torrents_recheck(torrent_hashes=ids)
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 检验种子出错:{str(err)}")
return False
def get_client_speed(self):
if not self.qbc:
return False
try:
transfer_info = self.qbc.transfer.info
if transfer_info:
return {
"up_speed": transfer_info.get('up_info_speed'),
"dl_speed": transfer_info.get('dl_info_speed')
}
return False
except Exception as err:
log.error(f"【{self.client_name}】{self.name} 获取客户端速度出错:{str(err)}")
return False
| 26,016 | Python | .py | 627 | 25.443381 | 117 | 0.52043 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,045 | _pypan115.py | demigody_nas-tools/app/downloader/client/_pypan115.py | # -*- coding: utf-8 -*-
import re
import time
from urllib import parse
import requests
import log
from app.utils import RequestUtils, ExceptionUtils
class PyPan115:
cookie = None
user_agent = None
req = None
space_info = None
err = None
def __init__(self, cookie):
self.cookie = cookie
self.req = RequestUtils(cookies=self.cookie, session=requests.Session())
# 登录
def login(self):
if not self.getSpaceInfo():
return False
return True
# 获取space info
def getSpaceInfo(self):
try:
self.space_info = {}
url = "https://webapi.115.com/files/index_info"
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取 SpaceInfo 错误:{}".format(rootobject.get("error"))
return False
self.space_info = rootobject.get('data', {}).get('space_info', {})
all_total = self.space_info.get('all_total', {}).get('size_format', '未知')
all_remain = self.space_info.get('all_remain', {}).get('size_format', '未知')
all_use = self.space_info.get('all_use', {}).get('size_format', '未知')
log.info(f"115空间统计: [总计可用]: {all_total} | [当前剩余]: {all_remain} | [已使用]: {all_use}")
return True
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False
# 获取目录ID
def getdirid(self, tdir):
try:
url = "https://webapi.115.com/files/getid?path=" + parse.quote(tdir or '/')
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取目录 [{}]ID 错误:{}".format(tdir, rootobject["error"])
return False, ''
return True, rootobject.get("id")
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, ''
# 获取任务列表
def gettasklist(self, page=1):
try:
tasks = []
url = "https://115.com/web/lixian/?ct=lixian&ac=task_lists"
while True:
postdata = "page={}".format(page)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取任务列表错误:{}".format(rootobject["error"])
return False, tasks
if rootobject.get("count") == 0:
break
tasks += rootobject.get("tasks") or []
if page >= rootobject.get("page_count"):
break
return True, tasks
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, []
# 添加任务
def addtask(self, tdir, content):
try:
ret, dirid = self.getdirid(tdir)
if not ret:
return False, ''
# 转换为磁力
if re.match("^https*://", content):
try:
p = self.req.get_res(url=content)
if p and p.headers.get("Location"):
content = p.headers.get("Location")
except Exception as result:
ExceptionUtils.exception_traceback(result)
content = str(result).replace("No connection adapters were found for '", "").replace("'", "")
url = "https://115.com/web/lixian/?ct=lixian&ac=add_task_urls"
postdata = "url[0]={}&savepath=&wp_path_id={}".format(parse.quote(content), dirid)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = rootobject.get("error")
return False, ''
return True, rootobject.get('result', [{}])[0].get('info_hash', '未知')
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, ''
# 删除任务
def deltask(self, thash):
try:
url = "https://115.com/web/lixian/?ct=lixian&ac=task_del"
postdata = "hash[0]={}".format(thash)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = rootobject.get("error_msg")
return False
return True
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False
# 根据ID获取文件夹路径
def getiddir(self, tid):
try:
path = '/'
url = "https://aps.115.com/natsort/files.php?aid=1&cid={}&o=file_name&asc=1&offset=0&show_dir=1&limit=40&code=&scid=&snap=0&natsort=1&record_open_time=1&source=&format=json&fc_mix=0&type=&star=&is_share=&suffix=&custom_order=0".format(
tid)
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取 ID[{}]路径 错误:{}".format(id, rootobject["error"])
return False, path
patharray = rootobject["path"]
for pathobject in patharray:
if pathobject.get("cid") == 0:
continue
path += pathobject.get("name") + '/'
if path == "/":
self.err = "文件路径不存在"
return False, path
return True, path
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, '/' | 6,562 | Python | .py | 149 | 29.067114 | 247 | 0.512762 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,046 | _pyaria2.py | demigody_nas-tools/app/downloader/client/_pyaria2.py | # -*- coding: utf-8 -*-
import json
import requests
import xmlrpc.client
from base64 import b64encode
import log
from app.utils import Torrent
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 6800
SERVER_URI_FORMAT = '%s:%s/jsonrpc'
class PyAria2(object):
_secret = None
_server_uri = None
_headers = {}
_id = None
def __init__(self, secret=None, host=DEFAULT_HOST, port=DEFAULT_PORT):
"""
PyAria2 constructor.
secret: aria2 secret token
host: string, aria2 rpc host, default is 'localhost'
port: integer, aria2 rpc port, default is 6800
session: string, aria2 rpc session saving.
"""
self._server_uri = SERVER_URI_FORMAT % (host, port)
self._secret = "token:%s" % (secret or "")
self._headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._secret}" if self._secret else None
}
self._id = 0
def _send_request(self, method, params=None):
self._id = (self._id + 1) % 9999
if params is None:
params = []
if self._secret and self._secret not in params:
params.insert(0, self._secret)
payload = {
"jsonrpc": "2.0",
"id": self._id,
"method": method,
"params": params or [],
}
response = requests.post(
self._server_uri,
data=json.dumps(payload),
headers=self._headers,
auth=(self._secret, ""),
)
response_data = response.json()
if "error" in response_data:
raise Exception(response_data["error"]["message"])
return response_data.get("result")
def addUri(self, uris, options=None, position=None):
"""
This method adds new HTTP(S)/FTP/BitTorrent Magnet URI.
uris: list, list of URIs
options: dict, additional options
position: integer, position in download queue
return: This method returns GID of registered download.
"""
return self._send_request("aria2.addUri", [uris, options or {}])
def addTorrent(self, torrent, uris=None, options=None, position=None):
"""
This method adds BitTorrent download by uploading ".torrent" file.
torrent: bin, torrent file bin
uris: list, list of webseed URIs
options: dict, additional options
position: integer, position in download queue
return: This method returns GID of registered download.
"""
magnet_link = Torrent.binary_data_to_magnet_link(torrent)
return self._send_request("aria2.addUri", [[magnet_link], options or {}])
def addMetalink(self, metalink, options=None, position=None):
"""
This method adds Metalink download by uploading ".metalink" file.
metalink: string, metalink file path
options: dict, additional options
position: integer, position in download queue
return: This method returns list of GID of registered download.
"""
return self._send_request("aria2.addMetalink", [metalink, options or {}, position])
def remove(self, gid):
"""
This method removes the download denoted by gid.
gid: string, GID.
return: This method returns GID of removed download.
"""
return self._send_request("aria2.remove", [gid])
def forceRemove(self, gid):
"""
This method removes the download denoted by gid.
gid: string, GID.
return: This method returns GID of removed download.
"""
return self._send_request("aria2.forceRemove", [gid])
def pause(self, gid):
"""
This method pauses the download denoted by gid.
gid: string, GID.
return: This method returns GID of paused download.
"""
return self._send_request("aria2.pause", [gid])
def pauseAll(self):
"""
This method is equal to calling aria2.pause() for every active/waiting download.
return: This method returns OK for success.
"""
return self._send_request("aria2.pauseAll")
def forcePause(self, gid):
"""
This method pauses the download denoted by gid.
gid: string, GID.
return: This method returns GID of paused download.
"""
return self._send_request("aria2.forcePause", [gid])
def forcePauseAll(self):
"""
This method is equal to calling aria2.forcePause() for every active/waiting download.
return: This method returns OK for success.
"""
return self._send_request("aria2.forcePauseAll")
def unpause(self, gid):
"""
This method changes the status of the download denoted by gid from paused to waiting.
gid: string, GID.
return: This method returns GID of unpaused download.
"""
return self._send_request("aria2.unpause", [gid])
def unpauseAll(self):
"""
This method is equal to calling aria2.unpause() for every active/waiting download.
return: This method returns OK for success.
"""
return self._send_request("aria2.unpauseAll")
def tellStatus(self, gid, keys=None):
"""
This method returns download progress of the download denoted by gid.
gid: string, GID.
keys: list, keys for method response.
return: The method response is of type dict and it contains following keys.
"""
params = [gid]
if keys:
params.append(keys)
return self._send_request("aria2.tellStatus", params)
def getUris(self, gid):
"""
This method returns URIs used in the download denoted by gid.
gid: string, GID.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [gid]
return self._send_request("aria2.getUris", params)
def getFiles(self, gid):
"""
This method returns file list of the download denoted by gid.
gid: string, GID.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [gid]
return self._send_request("aria2.getFiles", params)
def getPeers(self, gid):
"""
This method returns peer list of the download denoted by gid.
gid: string, GID.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [gid]
return self._send_request("aria2.getPeers", params)
def getServers(self, gid):
"""
This method returns currently connected HTTP(S)/FTP servers of the download denoted by gid.
gid: string, GID.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [gid]
return self._send_request("aria2.getServers", params)
def tellActive(self, keys=None):
"""
This method returns the list of active downloads.
keys: keys for method response.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = []
if keys:
params.append(keys)
return self._send_request("aria2.tellActive", params)
def tellWaiting(self, offset, num, keys=None):
"""
This method returns the list of waiting download, including paused downloads.
offset: integer, the offset from the download waiting at the front.
num: integer, the number of downloads to be returned.
keys: keys for method response.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [offset, num]
if keys:
params.append(keys)
return self._send_request("aria2.tellWaiting", params)
def tellStopped(self, offset, num, keys=None):
"""
This method returns the list of stopped download.
offset: integer, the offset from the download waiting at the front.
num: integer, the number of downloads to be returned.
keys: keys for method response.
return: The method response is of type list and its element is of type dict and it contains following keys.
"""
params = [offset, num]
if keys:
params.append(keys)
return self._send_request("aria2.tellStopped", params)
def changePosition(self, gid, pos, how):
"""
This method changes the position of the download denoted by gid.
gid: string, GID.
pos: integer, the position relative which to be changed.
how: string.
POS_SET, it moves the download to a position relative to the beginning of the queue.
POS_CUR, it moves the download to a position relative to the current position.
POS_END, it moves the download to a position relative to the end of the queue.
return: The response is of type integer, and it is the destination position.
"""
return self._send_request("aria2.changePosition", [gid, pos, how])
def changeUri(self, gid, fileIndex, delUris, addUris, position=None):
"""
This method removes URIs in delUris from and appends URIs in addUris to download denoted by gid.
gid: string, GID.
fileIndex: integer, file to affect (1-based)
delUris: list, URIs to be removed
addUris: list, URIs to be added
position: integer, where URIs are inserted, after URIs have been removed
return: This method returns a list which contains 2 integers. The first integer is the number of URIs deleted. The second integer is the number of URIs added.
"""
params = [gid, fileIndex, delUris, addUris]
if position is not None:
params.append(position)
return self._send_request("aria2.changeUri", params)
def getOption(self, gid):
"""
This method returns options of the download denoted by gid.
gid: string, GID.
return: The response is of type dict.
"""
return self._send_request("aria2.getOption", [gid])
def changeOption(self, gid, options):
"""
This method changes options of the download denoted by gid dynamically.
gid: string, GID.
options: dict, the options.
return: This method returns OK for success.
"""
return self._send_request("aria2.changeOption", [gid, options])
def getGlobalOption(self):
"""
This method returns global options.
return: The method response is of type dict.
"""
return self._send_request("aria2.getGlobalOption")
def changeGlobalOption(self, options):
"""
This method changes global options dynamically.
options: dict, the options.
return: This method returns OK for success.
"""
return self._send_request("aria2.changeGlobalOption", [options])
def getGlobalStat(self):
"""
This method returns global statistics such as overall download and upload speed.
return: The method response is of type struct and contains following keys.
"""
return self._send_request("aria2.getGlobalStat")
def purgeDownloadResult(self):
"""
This method purges completed/error/removed downloads to free memory.
return: This method returns OK for success.
"""
return self._send_request("aria2.purgeDownloadResult")
def removeDownloadResult(self, gid):
"""
This method removes completed/error/removed download denoted by gid from memory.
return: This method returns OK for success.
"""
return self._send_request("aria2.removeDownloadResult", [gid])
def getVersion(self):
"""
This method returns version of the program and the list of enabled features.
return: The method response is of type dict and contains following keys.
"""
return self._send_request("aria2.getVersion")
def getSessionInfo(self):
"""
This method returns session information.
return: The response is of type dict.
"""
return self._send_request("aria2.getSessionInfo")
def shutdown(self):
"""
This method shutdowns aria2.
return: This method returns OK for success.
"""
return self._send_request("aria2.shutdown")
def forceShutdown(self):
"""
This method shutdowns aria2.
return: This method returns OK for success.
"""
return self._send_request("aria2.forceShutdown") | 12,981 | Python | .py | 307 | 33.478827 | 166 | 0.634051 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,047 | pan115.py | demigody_nas-tools/app/downloader/client/pan115.py | import log
from app.utils import StringUtils
from app.utils.types import DownloaderType
from config import Config
from app.downloader.client._base import _IDownloadClient
from app.downloader.client._pypan115 import PyPan115
class Pan115(_IDownloadClient):
schema = "pan115"
# 下载器ID
client_id = "pan115"
client_type = DownloaderType.PAN115
client_name = DownloaderType.PAN115.value
_client_config = {}
_client = None
cookie = None
lasthash = None
download_dir = []
def __init__(self, config=None):
if config:
self._client_config = config
self.init_config()
self.connect()
def init_config(self):
if self._client_config:
self.cookie = self._client_config.get("cookie")
self.download_dir = self._client_config.get('download_dir') or []
if self.cookie:
self._client = PyPan115(cookie=self.cookie)
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def connect(self):
self._client.login()
def get_status(self):
if not self._client:
return False
ret = self._client.login()
if not ret:
log.info(self._client.err)
return False
return True
def get_torrents(self, ids=None, status=None, **kwargs):
tlist = []
if not self._client:
return tlist
ret, tasks = self._client.gettasklist(page=1)
if not ret:
log.info(f"【{self.client_type}】获取任务列表错误:{self._client.err}")
return tlist
if tasks:
for task in tasks:
if ids:
if task.get("info_hash") not in ids:
continue
if status:
if task.get("status") not in status:
continue
ret, tdir = self._client.getiddir(task.get("file_id"))
task["path"] = tdir
tlist.append(task)
return tlist or []
def get_completed_torrents(self, **kwargs):
return self.get_torrents(status=[2])
def get_downloading_torrents(self, **kwargs):
return self.get_torrents(status=[0, 1])
def remove_torrents_tag(self, **kwargs):
pass
def get_transfer_task(self, **kwargs):
pass
def get_remove_torrents(self, **kwargs):
return []
def add_torrent(self, content, download_dir=None, **kwargs):
if not self._client:
return False
if isinstance(content, str):
ret, self.lasthash = self._client.addtask(tdir=download_dir, content=content)
if not ret:
log.error(f"【{self.client_type}】添加下载任务失败:{self._client.err}")
return None
return self.lasthash
else:
log.info(f"【{self.client_type}】暂时不支持非链接下载")
return None
def delete_torrents(self, delete_file, ids):
if not self._client:
return False
return self._client.deltask(thash=ids)
def start_torrents(self, ids):
pass
def stop_torrents(self, ids):
pass
def set_torrents_status(self, ids, **kwargs):
return self.delete_torrents(ids=ids, delete_file=False)
def get_download_dirs(self):
return []
def change_torrent(self, **kwargs):
pass
def get_downloading_progress(self, **kwargs):
"""
获取正在下载的种子进度
"""
Torrents = self.get_downloading_torrents()
DispTorrents = []
for torrent in Torrents:
# 进度
progress = round(torrent.get('percentDone'), 1)
state = "Downloading"
_dlspeed = StringUtils.str_filesize(torrent.get('peers'))
_upspeed = StringUtils.str_filesize(torrent.get('rateDownload'))
speed = "%s%sB/s %s%sB/s" % (chr(8595), _dlspeed, chr(8593), _upspeed)
DispTorrents.append({
'id': torrent.get('info_hash'),
'name': torrent.get('name'),
'speed': speed,
'state': state,
'progress': progress
})
return DispTorrents
def set_speed_limit(self, **kwargs):
"""
设置速度限制
"""
pass
def get_type(self):
return self.client_type
def get_files(self, tid):
pass
def recheck_torrents(self, ids):
pass
def set_torrents_tag(self, ids, tags):
pass | 4,686 | Python | .py | 131 | 25.19084 | 92 | 0.573822 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,048 | _base.py | demigody_nas-tools/app/downloader/client/_base.py | import os.path
from abc import ABCMeta, abstractmethod
from app.utils import PathUtils
class _IDownloadClient(metaclass=ABCMeta):
# 下载器ID
client_id = ""
# 下载器类型
client_type = ""
# 下载器名称
client_name = ""
@abstractmethod
def match(self, ctype):
"""
匹配实例
"""
pass
@abstractmethod
def get_type(self):
"""
获取下载器类型
"""
pass
@abstractmethod
def connect(self):
"""
连接
"""
pass
@abstractmethod
def get_status(self):
"""
检查连通性
"""
pass
@abstractmethod
def get_torrents(self, ids, status, tag):
"""
按条件读取种子信息
:param ids: 种子ID,单个ID或者ID列表
:param status: 种子状态过滤
:param tag: 种子标签过滤
:return: 种子信息列表,是否发生错误
"""
pass
@abstractmethod
def get_downloading_torrents(self, ids, tag):
"""
读取下载中的种子信息,发生错误时需返回None
"""
pass
@abstractmethod
def get_completed_torrents(self, ids, tag):
"""
读取下载完成的种子信息,发生错误时需返回None
"""
pass
@abstractmethod
def get_files(self, tid):
"""
读取种子文件列表
"""
pass
@abstractmethod
def set_torrents_status(self, ids, tags=None):
"""
迁移完成后设置种子标签为 已整理
:param ids: 种子ID列表
:param tags: 种子标签列表
"""
pass
@abstractmethod
def get_transfer_task(self, tag, match_path=None):
"""
获取需要转移的种子列表
"""
pass
@abstractmethod
def get_remove_torrents(self, config):
"""
获取需要清理的种子清单
:param config: 删种策略
:return: 种子ID列表
"""
pass
@abstractmethod
def add_torrent(self, **kwargs):
"""
添加下载任务
"""
pass
@abstractmethod
def start_torrents(self, ids):
"""
下载控制:开始
"""
pass
@abstractmethod
def stop_torrents(self, ids):
"""
下载控制:停止
"""
pass
@abstractmethod
def delete_torrents(self, delete_file, ids):
"""
删除种子
"""
pass
@abstractmethod
def get_download_dirs(self):
"""
获取下载目录清单
"""
pass
@staticmethod
def get_replace_path(path, downloaddir) -> (str, bool):
"""
对目录路径进行转换
:param path: 需要转换的路径
:param downloaddir: 下载目录清单
:return: 转换后的路径, 是否进行转换
"""
if not path or not downloaddir:
return "", False
path = os.path.normpath(path)
for attr in downloaddir:
save_path = attr.get("save_path")
if not save_path:
continue
save_path = os.path.normpath(save_path)
container_path = attr.get("container_path")
# 没有访问目录,视为与下载保存目录相同
if not container_path:
container_path = save_path
else:
container_path = os.path.normpath(container_path)
if PathUtils.is_path_in_path(save_path, path):
return path.replace(save_path, container_path), True
return path, False
@abstractmethod
def change_torrent(self, **kwargs):
"""
修改种子状态
"""
pass
@abstractmethod
def get_downloading_progress(self):
"""
获取下载进度
"""
pass
@abstractmethod
def set_speed_limit(self, **kwargs):
"""
设置速度限制
"""
pass
@abstractmethod
def recheck_torrents(self, ids):
"""
下载控制:重新校验
"""
pass
| 4,241 | Python | .py | 163 | 14.300613 | 68 | 0.521702 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,049 | meta_helper.py | demigody_nas-tools/app/helper/meta_helper.py | import os
import pickle
import random
import time
from enum import Enum
from threading import RLock
from app.utils import ExceptionUtils
from app.utils.commons import singleton
from config import Config
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = 7 * 24 * 3600
@singleton
class MetaHelper(object):
"""
{
"id": '',
"title": '',
"year": '',
"type": MediaType
}
"""
_meta_data = {}
_meta_path = None
_tmdb_cache_expire = False
def __init__(self):
self.init_config()
def init_config(self):
laboratory = Config().get_config('laboratory')
if laboratory:
self._tmdb_cache_expire = laboratory.get("tmdb_cache_expire")
self._meta_path = os.path.join(Config().get_config_path(), 'tmdb.dat')
self._meta_data = self.__load_meta_data(self._meta_path)
def clear_meta_data(self):
"""
清空所有TMDB缓存
"""
with lock:
self._meta_data = {}
def get_meta_data_path(self):
"""
返回TMDB缓存文件路径
"""
return self._meta_path
def get_meta_data_by_key(self, key):
"""
根据KEY值获取缓存值
"""
with lock:
info: dict = self._meta_data.get(key)
if info:
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire or int(time.time()) < expire:
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
self.update_meta_data({key: info})
elif expire and self._tmdb_cache_expire:
self.delete_meta_data(key)
return info or {}
def dump_meta_data(self, search, page, num):
"""
分页获取当前缓存列表
@param search: 搜索的缓存key
@param page: 页码
@param num: 单页大小
@return: 总数, 缓存列表
"""
if page == 1:
begin_pos = 0
else:
begin_pos = (page - 1) * num
with lock:
search_metas = [(k, {
"id": v.get("id"),
"title": v.get("title"),
"year": v.get("year"),
"media_type": v.get("type").value if isinstance(v.get("type"), Enum) else v.get("type"),
"poster_path": v.get("poster_path"),
"backdrop_path": v.get("backdrop_path")
}, str(k).replace("[电影]", "").replace("[电视剧]", "").replace("[未知]", "").replace("-None", ""))
for k, v in self._meta_data.items() if search.lower() in k.lower() and v.get("id") != 0]
return len(search_metas), search_metas[begin_pos: begin_pos + num]
def delete_meta_data(self, key):
"""
删除缓存信息
@param key: 缓存key
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, None)
def delete_meta_data_by_tmdbid(self, tmdbid):
"""
清空对应TMDBID的所有缓存记录,以强制更新TMDB中最新的数据
"""
for key in list(self._meta_data):
if str(self._meta_data.get(key, {}).get("id")) == str(tmdbid):
with lock:
self._meta_data.pop(key)
def delete_unknown_meta(self):
"""
清除未识别的缓存记录,以便重新搜索TMDB
"""
for key in list(self._meta_data):
if str(self._meta_data.get(key, {}).get("id")) == '0':
with lock:
self._meta_data.pop(key)
def modify_meta_data(self, key, title):
"""
删除缓存信息
@param key: 缓存key
@param title: 标题
@return: 被修改后缓存内容
"""
with lock:
if self._meta_data.get(key):
self._meta_data[key]['title'] = title
self._meta_data[key][CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
return self._meta_data.get(key)
@staticmethod
def __load_meta_data(path):
"""
从文件中加载缓存
"""
try:
if os.path.exists(path):
with open(path, 'rb') as f:
data = pickle.load(f)
return data
return {}
except Exception as e:
ExceptionUtils.exception_traceback(e)
return {}
def update_meta_data(self, meta_data):
"""
新增或更新缓存条目
"""
if not meta_data:
return
with lock:
for key, item in meta_data.items():
if not self._meta_data.get(key):
item[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
self._meta_data[key] = item
def save_meta_data(self, force=False):
"""
保存缓存数据到文件
"""
meta_data = self.__load_meta_data(self._meta_path)
new_meta_data = {k: v for k, v in self._meta_data.items() if str(v.get("id")) != '0'}
if not force \
and not self._random_sample(new_meta_data) \
and meta_data.keys() == new_meta_data.keys():
return
with open(self._meta_path, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL)
def _random_sample(self, new_meta_data):
"""
采样分析是否需要保存
"""
ret = False
if len(new_meta_data) < 25:
keys = list(new_meta_data.keys())
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
else:
count = 0
keys = random.sample(new_meta_data.keys(), 25)
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
count += 1
if count >= 5:
ret |= self._random_sample(new_meta_data)
return ret
def get_cache_title(self, key):
"""
获取缓存的标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info or not cache_media_info.get("id"):
return None
return cache_media_info.get("title")
def set_cache_title(self, key, cn_title):
"""
重新设置缓存标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info:
return
self._meta_data[key]['title'] = cn_title
| 7,397 | Python | .py | 204 | 23.5 | 105 | 0.507053 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,050 | redis_helper.py | demigody_nas-tools/app/helper/redis_helper.py | from app.utils import SystemUtils
class RedisHelper:
@staticmethod
def is_valid():
"""
判斷redis是否有效
"""
if SystemUtils.is_docker():
return True if SystemUtils.execute("which redis-server") else False
else:
return False
| 305 | Python | .py | 11 | 18.818182 | 79 | 0.605735 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,051 | rss_helper.py | demigody_nas-tools/app/helper/rss_helper.py | import re
import xml.dom.minidom
from app.db import MainDb, DbPersist
from app.db.models import RSSTORRENTS
from app.utils import RssTitleUtils, StringUtils, RequestUtils, ExceptionUtils, DomUtils
from config import Config
from third_party.feapder.feapder.utils.log import log
class RssHelper:
_db = MainDb()
@staticmethod
def parse_rssxml(site_info, url, proxy=False):
"""
解析RSS订阅URL,获取RSS中的种子信息
:param url: RSS地址
:param site_info: site_info
:param proxy: 是否使用代理
:return: 种子信息列表,如为None代表Rss过期
"""
_special_title_sites = {
'pt.keepfrds.com': RssTitleUtils.keepfriends_title
}
_rss_expired_msg = [
"RSS 链接已过期, 您需要获得一个新的!",
"RSS Link has expired, You need to get a new one!"
]
# 开始处理
ret_array = []
if not url:
return []
site_domain = StringUtils.get_url_domain(url)
try:
ret = RequestUtils(proxies=Config().get_proxies() if proxy else None).get_res(url)
if not ret:
return []
ret.encoding = ret.apparent_encoding
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return []
if ret:
ret_xml = ret.text
try:
# 解析XML
dom_tree = xml.dom.minidom.parseString(ret_xml)
rootNode = dom_tree.documentElement
items = rootNode.getElementsByTagName("item")
err_msg = None
for item in items:
try:
# 标题
title = DomUtils.tag_value(item, "title", default="")
if not title:
continue
# 标题特殊处理
if site_domain and site_domain in _special_title_sites:
title = _special_title_sites.get(site_domain)(title)
# 描述
description = DomUtils.tag_value(item, "description", default="")
# 种子页面
link = DomUtils.tag_value(item, "link", default="")
# 种子链接
enclosure = DomUtils.tag_value(item, "enclosure", "url", default="")
if not enclosure and not link:
continue
# 大小
size = 0
# 处理mt_enclosure
if 'm-team' in enclosure or 'm-team' in link:
res = re.findall(r'\d+', link)
torrent_id = res[0]
from app.sites.mt import MtFunc
enclosure = MtFunc(site_info).get_download_url(torrent_id)
match = re.search(r'\[([^]]+)]$', title)
if match:
content = match.group(1)
size = StringUtils.num_filesize(content)
else:
err_msg = "RSS链接中未订阅查询种子大小,无法获取数据"
else:
size = DomUtils.tag_value(item, "enclosure", "length", default=0)
if size and str(size).isdigit():
size = int(size)
# 部分RSS只有link没有enclosure
if not enclosure and link:
enclosure = link
link = None
# 发布日期
pubdate = DomUtils.tag_value(item, "pubDate", default="")
if pubdate:
# 转换为时间
pubdate = StringUtils.get_time_stamp(pubdate)
# 返回对象
tmp_dict = {'title': title,
'enclosure': enclosure,
'size': size,
'description': description,
'link': link,
'pubdate': pubdate}
ret_array.append(tmp_dict)
except Exception as e1:
ExceptionUtils.exception_traceback(e1)
continue
if err_msg:
log.warning(err_msg)
except Exception as e2:
# RSS过期 观众RSS 链接已过期,您需要获得一个新的! pthome RSS Link has expired, You need to get a new one!
if ret_xml in _rss_expired_msg:
return None
ExceptionUtils.exception_traceback(e2)
return ret_array
@DbPersist(_db)
def insert_rss_torrents(self, media_info):
"""
将RSS的记录插入数据库
"""
self._db.insert(
RSSTORRENTS(
TORRENT_NAME=media_info.org_string,
ENCLOSURE=media_info.enclosure,
TYPE=media_info.type.value,
TITLE=media_info.title,
YEAR=media_info.year,
SEASON=media_info.get_season_string(),
EPISODE=media_info.get_episode_string()
))
def is_rssd_by_enclosure(self, enclosure):
"""
查询RSS是否处理过,根据下载链接
"""
if not enclosure:
return True
if self._db.query(RSSTORRENTS).filter(RSSTORRENTS.ENCLOSURE == enclosure).count() > 0:
return True
else:
return False
def is_rssd_by_simple(self, torrent_name, enclosure):
"""
查询RSS是否处理过,根据名称
"""
if not torrent_name and not enclosure:
return True
if enclosure:
ret = self._db.query(RSSTORRENTS).filter(RSSTORRENTS.ENCLOSURE == enclosure).count()
else:
ret = self._db.query(RSSTORRENTS).filter(RSSTORRENTS.TORRENT_NAME == torrent_name).count()
return True if ret > 0 else False
@DbPersist(_db)
def simple_insert_rss_torrents(self, title, enclosure):
"""
将RSS的记录插入数据库
"""
self._db.insert(
RSSTORRENTS(
TORRENT_NAME=title,
ENCLOSURE=enclosure
))
@DbPersist(_db)
def simple_delete_rss_torrents(self, title, enclosure=None):
"""
删除RSS的记录
"""
if enclosure:
self._db.query(RSSTORRENTS).filter(RSSTORRENTS.TORRENT_NAME == title,
RSSTORRENTS.ENCLOSURE == enclosure).delete()
else:
self._db.query(RSSTORRENTS).filter(RSSTORRENTS.TORRENT_NAME == title).delete()
@DbPersist(_db)
def truncate_rss_history(self):
"""
清空RSS历史记录
"""
self._db.query(RSSTORRENTS).delete()
| 7,308 | Python | .py | 171 | 24.245614 | 103 | 0.477094 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,052 | ffmpeg_helper.py | demigody_nas-tools/app/helper/ffmpeg_helper.py | import json
import subprocess
from app.utils import SystemUtils
class FfmpegHelper:
@staticmethod
def get_thumb_image_from_video(video_path, image_path, frames="00:03:01"):
"""
使用ffmpeg从视频文件中截取缩略图
"""
if not video_path or not image_path:
return False
cmd = 'ffmpeg -i "{video_path}" -ss {frames} -vframes 1 -f image2 "{image_path}"'.format(video_path=video_path,
frames=frames,
image_path=image_path)
result = SystemUtils.execute(cmd)
if result:
return True
return False
@staticmethod
def extract_wav_from_video(video_path, audio_path, audio_index=None):
"""
使用ffmpeg从视频文件中提取16000hz, 16-bit的wav格式音频
"""
if not video_path or not audio_path:
return False
# 提取指定音频流
if audio_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:a:{audio_index}',
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]
ret = subprocess.run(command).returncode
if ret == 0:
return True
return False
@staticmethod
def get_video_metadata(video_path):
"""
获取视频元数据
"""
if not video_path:
return False
try:
command = ['ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', video_path]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
return json.loads(result.stdout.decode("utf-8"))
except Exception as e:
print(e)
return None
@staticmethod
def extract_subtitle_from_video(video_path, subtitle_path, subtitle_index=None):
"""
从视频中提取字幕
"""
if not video_path or not subtitle_path:
return False
if subtitle_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:s:{subtitle_index}',
subtitle_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path, subtitle_path]
ret = subprocess.run(command).returncode
if ret == 0:
return True
return False
| 2,925 | Python | .py | 69 | 28.057971 | 119 | 0.51184 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,053 | submodule_helper.py | demigody_nas-tools/app/helper/submodule_helper.py | # -*- coding: utf-8 -*-
import importlib
import pkgutil
class SubmoduleHelper:
@classmethod
def import_submodules(cls, package, filter_func=lambda name, obj: True):
"""
导入子模块
:param package: 父包名
:param filter_func: 子模块过滤函数,入参为模块名和模块对象,返回True则导入,否则不导入
:return:
"""
submodules = []
packages = importlib.import_module(package).__path__
for importer, package_name, _ in pkgutil.iter_modules(packages):
full_package_name = f'{package}.{package_name}'
if full_package_name.startswith('_'):
continue
module = importlib.import_module(full_package_name)
for name, obj in module.__dict__.items():
if name.startswith('_'):
continue
if isinstance(obj, type) and filter_func(name, obj):
submodules.append(obj)
return submodules
| 1,021 | Python | .py | 25 | 27.6 | 76 | 0.584245 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,054 | security_helper.py | demigody_nas-tools/app/helper/security_helper.py | import ipaddress
from app.utils import ExceptionUtils
from config import Config
class SecurityHelper:
media_server_webhook_allow_ip = {}
telegram_webhook_allow_ip = {}
synology_webhook_allow_ip = {}
def __init__(self):
security = Config().get_config('security')
if security:
self.media_server_webhook_allow_ip = security.get('media_server_webhook_allow_ip') or {}
self.telegram_webhook_allow_ip = security.get('telegram_webhook_allow_ip') or {}
self.synology_webhook_allow_ip = security.get('synology_webhook_allow_ip') or {}
def check_mediaserver_ip(self, ip):
return self.allow_access(self.media_server_webhook_allow_ip, ip)
def check_telegram_ip(self, ip):
return self.allow_access(self.telegram_webhook_allow_ip, ip)
def check_synology_ip(self, ip):
return self.allow_access(self.synology_webhook_allow_ip, ip)
def check_slack_ip(self, ip):
return self.allow_access({"ipve": "127.0.0.1"}, ip)
@staticmethod
def allow_access(allow_ips, ip):
"""
判断IP是否合法
:param allow_ips: 充许的IP范围 {"ipv4":, "ipv6":}
:param ip: 需要检查的ip
"""
if not allow_ips:
return True
try:
ipaddr = ipaddress.ip_address(ip)
if ipaddr.version == 4:
if not allow_ips.get('ipv4'):
return True
allow_ipv4s = allow_ips.get('ipv4').split(",")
for allow_ipv4 in allow_ipv4s:
if ipaddr in ipaddress.ip_network(allow_ipv4):
return True
elif ipaddr.ipv4_mapped:
if not allow_ips.get('ipv4'):
return True
allow_ipv4s = allow_ips.get('ipv4').split(",")
for allow_ipv4 in allow_ipv4s:
if ipaddr.ipv4_mapped in ipaddress.ip_network(allow_ipv4):
return True
else:
if not allow_ips.get('ipv6'):
return True
allow_ipv6s = allow_ips.get('ipv6').split(",")
for allow_ipv6 in allow_ipv6s:
if ipaddr in ipaddress.ip_network(allow_ipv6):
return True
except Exception as e:
ExceptionUtils.exception_traceback(e)
return False
| 2,417 | Python | .py | 56 | 30.642857 | 100 | 0.568966 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,055 | site_helper.py | demigody_nas-tools/app/helper/site_helper.py | # -*- coding: utf-8 -*-
from datetime import datetime
import os
import re
from lxml import etree
from app.utils import SystemUtils
from config import RMT_SUBEXT
class SiteHelper:
@classmethod
def is_logged_in(cls, html_text):
"""
判断站点是否已经登陆
:param html_text:
:return:
"""
html = etree.HTML(html_text)
if not html:
return False
# 存在明显的密码输入框,说明未登录
if html.xpath("//input[@type='password']"):
return False
# 是否存在登出和用户面板等链接
xpaths = ['//a[contains(@href, "logout")'
' or contains(@data-url, "logout")'
' or contains(@href, "mybonus") '
' or contains(@onclick, "logout")'
' or contains(@href, "usercp")]',
'//form[contains(@action, "logout")]']
for xpath in xpaths:
if html.xpath(xpath):
return True
user_info_div = html.xpath('//div[@class="user-info-side"]')
if user_info_div:
return True
return False
@staticmethod
def get_url_subtitle_name(disposition, url):
"""
从站点下载请求中获取字幕文件名
"""
fname = re.findall(r"filename=\"?(.+)\"?", disposition or "")
if fname:
fname = str(fname[0].encode('ISO-8859-1').decode()).split(";")[0].strip()
if fname.endswith('"'):
fname = fname[:-1]
elif url and os.path.splitext(url)[-1] in (RMT_SUBEXT + ['.zip']):
fname = url.split("/")[-1]
else:
fname = str(datetime.now())
return fname
@staticmethod
def transfer_subtitle(source_sub_file, media_file):
"""
转移站点字幕
"""
new_sub_file = "%s%s" % (os.path.splitext(media_file)[0], os.path.splitext(source_sub_file)[-1])
if os.path.exists(new_sub_file):
return 1
else:
return SystemUtils.copy(source_sub_file, new_sub_file)
| 2,121 | Python | .py | 60 | 23.883333 | 104 | 0.534956 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,056 | thread_helper.py | demigody_nas-tools/app/helper/thread_helper.py | from concurrent.futures import ThreadPoolExecutor
from app.utils.commons import singleton
@singleton
class ThreadHelper:
_thread_num = 100
executor = None
def __init__(self):
self.executor = ThreadPoolExecutor(max_workers=self._thread_num)
def init_config(self):
pass
def start_thread(self, func, kwargs):
self.executor.submit(func, *kwargs)
| 392 | Python | .py | 12 | 27.5 | 72 | 0.721925 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,057 | __init__.py | demigody_nas-tools/app/helper/__init__.py | from .chrome_helper import ChromeHelper, init_chrome
from .meta_helper import MetaHelper
from .progress_helper import ProgressHelper
from .security_helper import SecurityHelper
from .thread_helper import ThreadHelper
from .db_helper import DbHelper
from .dict_helper import DictHelper
from .display_helper import DisplayHelper
from .site_helper import SiteHelper
from .ocr_helper import OcrHelper
from .words_helper import WordsHelper
from .submodule_helper import SubmoduleHelper
from .ffmpeg_helper import FfmpegHelper
from .redis_helper import RedisHelper
from .rss_helper import RssHelper
from .plugin_helper import PluginHelper
| 633 | Python | .py | 16 | 38.5625 | 52 | 0.865478 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,058 | db_helper.py | demigody_nas-tools/app/helper/db_helper.py | import datetime
import os.path
import time
import json
from enum import Enum
from sqlalchemy import cast, func, and_, case
from app.db import MainDb, DbPersist
from app.db.models import *
from app.utils import StringUtils
from app.utils.types import MediaType, RmtMode
class DbHelper:
_db = MainDb()
@DbPersist(_db)
def insert_search_results(self, media_items: list, title=None, ident_flag=True):
"""
将返回信息插入数据库
"""
if not media_items:
return
data_list = []
for media_item in media_items:
if media_item.type == MediaType.TV:
mtype = "TV"
elif media_item.type == MediaType.MOVIE:
mtype = "MOV"
else:
mtype = "ANI"
data_list.append(
SEARCHRESULTINFO(
TORRENT_NAME=media_item.org_string,
ENCLOSURE=media_item.enclosure,
DESCRIPTION=media_item.description,
TYPE=mtype if ident_flag else '',
TITLE=media_item.title if ident_flag else title,
YEAR=media_item.year if ident_flag else '',
SEASON=media_item.get_season_string() if ident_flag else '',
EPISODE=media_item.get_episode_string() if ident_flag else '',
ES_STRING=media_item.get_season_episode_string() if ident_flag else '',
VOTE=media_item.vote_average or "0",
IMAGE=media_item.get_backdrop_image(default=False, original=True),
POSTER=media_item.get_poster_image(),
TMDBID=media_item.tmdb_id,
OVERVIEW=media_item.overview,
RES_TYPE=json.dumps({
"respix": media_item.resource_pix,
"restype": media_item.resource_type,
"reseffect": media_item.resource_effect,
"video_encode": media_item.video_encode
}),
RES_ORDER=media_item.res_order,
SIZE=StringUtils.str_filesize(int(media_item.size)),
SEEDERS=media_item.seeders,
PEERS=media_item.peers,
SITE=media_item.site,
SITE_ORDER=media_item.site_order,
PAGEURL=media_item.page_url,
OTHERINFO=media_item.resource_team,
UPLOAD_VOLUME_FACTOR=media_item.upload_volume_factor,
DOWNLOAD_VOLUME_FACTOR=media_item.download_volume_factor,
NOTE=media_item.labels
))
self._db.insert(data_list)
def get_search_result_by_id(self, dl_id):
"""
根据ID从数据库中查询搜索结果的一条记录
"""
return self._db.query(SEARCHRESULTINFO).filter(SEARCHRESULTINFO.ID == dl_id).all()
def get_search_results(self):
"""
查询搜索结果的所有记录
"""
return self._db.query(SEARCHRESULTINFO).all()
@DbPersist(_db)
def delete_all_search_torrents(self):
"""
删除所有搜索的记录
"""
self._db.query(SEARCHRESULTINFO).delete()
def is_transfer_history_exists(self, source_path, source_filename, dest_path, dest_filename):
"""
查询识别转移记录
"""
if not source_path or not source_filename or not dest_path or not dest_filename:
return False
ret = self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.SOURCE_PATH == source_path,
TRANSFERHISTORY.SOURCE_FILENAME == source_filename,
TRANSFERHISTORY.DEST_PATH == dest_path,
TRANSFERHISTORY.DEST_FILENAME == dest_filename).count()
return True if ret > 0 else False
def update_transfer_history_date(self, source_path, source_filename, dest_path, dest_filename, date):
"""
更新历史转移记录时间
"""
self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.SOURCE_PATH == source_path,
TRANSFERHISTORY.SOURCE_FILENAME == source_filename,
TRANSFERHISTORY.DEST_PATH == dest_path,
TRANSFERHISTORY.DEST_FILENAME == dest_filename).update(
{
"DATE": date
}
)
@DbPersist(_db)
def insert_transfer_history(self, in_from: Enum, rmt_mode: RmtMode, in_path, out_path, dest, media_info):
"""
插入识别转移记录
"""
if not media_info or not media_info.tmdb_info:
return
if in_path:
in_path = os.path.normpath(in_path)
source_path = os.path.dirname(in_path)
source_filename = os.path.basename(in_path)
else:
return
if out_path:
outpath = os.path.normpath(out_path)
dest_path = os.path.dirname(outpath)
dest_filename = os.path.basename(outpath)
season_episode = media_info.get_season_episode_string()
else:
dest_path = ""
dest_filename = ""
season_episode = media_info.get_season_string()
title = media_info.title
timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if self.is_transfer_history_exists(source_path, source_filename, dest_path, dest_filename):
# 更新历史转移记录的时间
self.update_transfer_history_date(source_path, source_filename, dest_path, dest_filename, timestr)
return
dest = dest or ""
self._db.insert(
TRANSFERHISTORY(
MODE=str(rmt_mode.value),
TYPE=media_info.type.value,
CATEGORY=media_info.category,
TMDBID=int(media_info.tmdb_id),
TITLE=title,
YEAR=media_info.year,
SEASON_EPISODE=season_episode,
SOURCE=str(in_from.value),
SOURCE_PATH=source_path,
SOURCE_FILENAME=source_filename,
DEST=dest,
DEST_PATH=dest_path,
DEST_FILENAME=dest_filename,
DATE=timestr
)
)
def get_transfer_history(self, search, page, rownum):
"""
查询识别转移记录
"""
if int(page) == 1:
begin_pos = 0
else:
begin_pos = (int(page) - 1) * int(rownum)
if search:
search = f"%{search}%"
count = self._db.query(TRANSFERHISTORY).filter((TRANSFERHISTORY.SOURCE_FILENAME.like(search))
| (TRANSFERHISTORY.TITLE.like(search))).count()
data = self._db.query(TRANSFERHISTORY).filter((TRANSFERHISTORY.SOURCE_FILENAME.like(search))
| (TRANSFERHISTORY.TITLE.like(search))).order_by(
TRANSFERHISTORY.DATE.desc()).limit(int(rownum)).offset(begin_pos).all()
return count, data
else:
return self._db.query(TRANSFERHISTORY).count(), self._db.query(TRANSFERHISTORY).order_by(
TRANSFERHISTORY.DATE.desc()).limit(int(rownum)).offset(begin_pos).all()
def get_transfer_info_by_id(self, logid):
"""
据logid查询PATH
"""
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.ID == int(logid)).first()
def get_transfer_info_by(self, tmdbid, season=None, season_episode=None):
"""
据tmdbid、season、season_episode查询转移记录
"""
# 电视剧所有季集|电影
if tmdbid and not season and not season_episode:
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid)).all()
# 电视剧某季
if tmdbid and season:
season = f"%{season}%"
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid),
TRANSFERHISTORY.SEASON_EPISODE.like(season)).all()
# 电视剧某季某集
if tmdbid and season_episode:
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid),
TRANSFERHISTORY.SEASON_EPISODE == season_episode).all()
def is_transfer_history_exists_by_source_full_path(self, source_full_path):
"""
据源文件的全路径查询识别转移记录
"""
path = os.path.dirname(source_full_path)
filename = os.path.basename(source_full_path)
ret = self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.SOURCE_PATH == path,
TRANSFERHISTORY.SOURCE_FILENAME == filename).count()
if ret > 0:
return True
else:
return False
@DbPersist(_db)
def delete_transfer_log_by_id(self, logid):
"""
根据logid删除记录
"""
self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.ID == int(logid)).delete()
@DbPersist(_db)
def get_transfer_history_count(self):
"""
转移历史记录总条数
"""
return self._db.query(TRANSFERHISTORY).count()
@DbPersist(_db)
def truncate_transfer_history_list(self):
"""
清空所有转移历史记录
"""
return self._db.query(TRANSFERHISTORY).delete() and \
self._db.query(TRANSFERBLACKLIST).delete()
def get_transfer_unknown_paths(self):
"""
查询未识别的记录列表
"""
return self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.STATE == 'N').all()
def get_transfer_unknown_paths_by_page(self, search, page, rownum):
"""
按页查询未识别的记录列表
"""
if int(page) == 1:
begin_pos = 0
else:
begin_pos = (int(page) - 1) * int(rownum)
if search:
search = f"%{search}%"
count = self._db.query(TRANSFERUNKNOWN).filter((TRANSFERUNKNOWN.STATE == 'N')
& (TRANSFERUNKNOWN.PATH.like(search))).count()
data = self._db.query(TRANSFERUNKNOWN).filter((TRANSFERUNKNOWN.STATE == 'N')
& (TRANSFERUNKNOWN.PATH.like(search))).order_by(
TRANSFERUNKNOWN.ID.desc()).limit(int(rownum)).offset(begin_pos).all()
return count, data
else:
return self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.STATE == 'N').count(), self._db.query(
TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.STATE == 'N').order_by(
TRANSFERUNKNOWN.ID.desc()).limit(int(rownum)).offset(begin_pos).all()
@DbPersist(_db)
def update_transfer_unknown_state(self, path):
"""
更新未识别记录为识别
"""
if not path:
return
self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.PATH == os.path.normpath(path)).update(
{
"STATE": "Y"
}
)
@DbPersist(_db)
def delete_transfer_unknown(self, tid):
"""
删除未识别记录
"""
if not tid:
return []
self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.ID == int(tid)).delete()
def get_unknown_info_by_id(self, tid):
"""
查询未识别记录
"""
if not tid:
return []
return self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.ID == int(tid)).first()
def get_transfer_unknown_by_path(self, path):
"""
根据路径查询未识别记录
"""
if not path:
return []
return self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.PATH == path).all()
@DbPersist(_db)
def get_transfer_unknown_count(self):
"""
手动转移历史记录总条数
"""
return self._db.query(TRANSFERUNKNOWN).count()
def truncate_transfer_unknown_list(self):
"""
清空所有手动转移历史记录
"""
unknown_paths = self.get_transfer_unknown_paths()
if not unknown_paths:
return True
results = [self.delete_transfer_unknown(item.ID) for item in unknown_paths if item.ID]
return all(results)
def is_transfer_unknown_exists(self, path):
"""
查询未识别记录是否存在
"""
if not path:
return False
ret = self._db.query(TRANSFERUNKNOWN).filter(TRANSFERUNKNOWN.PATH == os.path.normpath(path)).count()
if ret > 0:
return True
else:
return False
def is_need_insert_transfer_unknown(self, path):
"""
检查是否需要插入未识别记录
"""
if not path:
return False
"""
1) 如果不存在未识别,则插入
2) 如果存在未处理的未识别,则插入(并不会真正的插入,insert_transfer_unknown里会挡住,主要是标记进行消息推送)
3) 如果未识别已经全部处理完并且存在转移记录,则不插入
4) 如果未识别已经全部处理完并且不存在转移记录,则删除并重新插入
"""
unknowns = self.get_transfer_unknown_by_path(path)
if unknowns:
is_all_proceed = True
for unknown in unknowns:
if unknown.STATE == 'N':
is_all_proceed = False
break
if is_all_proceed:
is_transfer_history_exists = self.is_transfer_history_exists_by_source_full_path(path)
if is_transfer_history_exists:
# 对应 3)
return False
else:
# 对应 4)
for unknown in unknowns:
self.delete_transfer_unknown(unknown.ID)
return True
else:
# 对应 2)
return True
else:
# 对应 1)
return True
@DbPersist(_db)
def insert_transfer_unknown(self, path, dest, rmt_mode):
"""
插入未识别记录
"""
if not path:
return
if self.is_transfer_unknown_exists(path):
return
else:
path = os.path.normpath(path)
if dest:
dest = os.path.normpath(dest)
else:
dest = ""
self._db.insert(TRANSFERUNKNOWN(
PATH=path,
DEST=dest,
STATE='N',
MODE=str(rmt_mode.value)
))
def is_transfer_in_blacklist(self, path):
"""
查询是否为黑名单
"""
if not path:
return False
ret = self._db.query(TRANSFERBLACKLIST).filter(TRANSFERBLACKLIST.PATH == os.path.normpath(path)).count()
if ret > 0:
return True
else:
return False
def is_transfer_notin_blacklist(self, path):
"""
查询是否为黑名单
"""
return not self.is_transfer_in_blacklist(path)
@DbPersist(_db)
def insert_transfer_blacklist(self, path):
"""
插入黑名单记录
"""
if not path:
return
if self.is_transfer_in_blacklist(path):
return
else:
self._db.insert(TRANSFERBLACKLIST(
PATH=os.path.normpath(path)
))
@DbPersist(_db)
def delete_transfer_blacklist(self, path):
"""
删除黑名单记录
"""
self._db.query(TRANSFERBLACKLIST).filter(TRANSFERBLACKLIST.PATH == str(path)).delete()
self._db.query(SYNCHISTORY).filter(SYNCHISTORY.PATH == str(path)).delete()
@DbPersist(_db)
def truncate_transfer_blacklist(self):
"""
清空黑名单记录
"""
self._db.query(TRANSFERBLACKLIST).delete()
self._db.query(SYNCHISTORY).delete()
@DbPersist(_db)
def truncate_rss_episodes(self):
"""
清空RSS历史记录
"""
self._db.query(RSSTVEPISODES).delete()
def get_config_site(self):
"""
查询所有站点信息
"""
return self._db.query(CONFIGSITE).order_by(cast(CONFIGSITE.PRI, Integer).asc())
def get_site_by_id(self, tid):
"""
查询1个站点信息
"""
return self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).all()
@DbPersist(_db)
def insert_config_site(self, name, site_pri,
rssurl=None, signurl=None, cookie=None, note=None, rss_uses=None, apikey=None):
"""
插入站点信息
"""
if not name:
return
self._db.insert(CONFIGSITE(
NAME=name,
PRI=site_pri,
RSSURL=rssurl,
SIGNURL=signurl,
COOKIE=cookie,
APIKEY=apikey,
NOTE=note,
INCLUDE=rss_uses
))
@DbPersist(_db)
def delete_config_site(self, tid):
"""
删除站点信息
"""
if not tid:
return
self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).delete()
@DbPersist(_db)
def update_config_site(self, tid, name, site_pri, rssurl, signurl, cookie, apikey, note, rss_uses):
"""
更新站点信息
"""
if not tid:
return
self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).update(
{
"NAME": name,
"PRI": site_pri,
"RSSURL": rssurl,
"SIGNURL": signurl,
"COOKIE": cookie,
"APIKEY": apikey,
"NOTE": note,
"INCLUDE": rss_uses
}
)
@DbPersist(_db)
def update_config_site_note(self, tid, note):
"""
更新站点属性
"""
if not tid:
return
self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).update(
{
"NOTE": note
}
)
@DbPersist(_db)
def update_site_cookie_ua(self, tid, cookie, ua=None, apikey=None):
"""
更新站点Cookie、ApiKey和ua
"""
if not tid:
return
rec = self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).first()
if rec.NOTE:
note = json.loads(rec.NOTE)
if ua:
note['ua'] = ua
else:
note = {}
self._db.query(CONFIGSITE).filter(CONFIGSITE.ID == int(tid)).update(
{
"COOKIE": cookie,
"APIKEY": apikey,
"NOTE": json.dumps(note)
}
)
def get_config_filter_group(self, gid=None):
"""
查询过滤规则组
"""
if gid:
return self._db.query(CONFIGFILTERGROUP).filter(CONFIGFILTERGROUP.ID == int(gid)).all()
return self._db.query(CONFIGFILTERGROUP).all()
def get_config_filter_rule(self, groupid=None):
"""
查询过滤规则
"""
if not groupid:
return self._db.query(CONFIGFILTERRULES).order_by(CONFIGFILTERRULES.GROUP_ID,
cast(CONFIGFILTERRULES.PRIORITY,
Integer)).all()
else:
return self._db.query(CONFIGFILTERRULES).filter(
CONFIGFILTERRULES.GROUP_ID == int(groupid)).order_by(CONFIGFILTERRULES.GROUP_ID,
cast(CONFIGFILTERRULES.PRIORITY,
Integer)).all()
def get_rss_movies(self, state=None, rssid=None):
"""
查询订阅电影信息
"""
if rssid:
return self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rssid)).all()
else:
if not state:
return self._db.query(RSSMOVIES).all()
else:
return self._db.query(RSSMOVIES).filter(RSSMOVIES.STATE == state).all()
def get_rss_movie_id(self, title, year=None, tmdbid=None):
"""
获取订阅电影ID
"""
if not title:
return ""
if tmdbid:
ret = self._db.query(RSSMOVIES.ID).filter(RSSMOVIES.TMDBID == str(tmdbid)).first()
if ret:
return ret[0]
if not year:
items = self._db.query(RSSMOVIES).filter(RSSMOVIES.NAME == title).all()
else:
items = self._db.query(RSSMOVIES).filter(RSSMOVIES.NAME == title,
RSSMOVIES.YEAR == str(year)).all()
if items:
if tmdbid:
for item in items:
if not item.TMDBID or item.TMDBID == str(tmdbid):
return item.ID
else:
return items[0].ID
else:
return ""
def get_rss_movie_sites(self, rssid):
"""
获取订阅电影站点
"""
if not rssid:
return ""
ret = self._db.query(RSSMOVIES.DESC).filter(RSSMOVIES.ID == int(rssid)).first()
if ret:
return ret[0]
return ""
@DbPersist(_db)
def update_rss_movie_tmdb(self, rid, tmdbid, title, year, image, desc, note):
"""
更新订阅电影的部分信息
"""
if not tmdbid:
return
self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rid)).update({
"TMDBID": tmdbid,
"NAME": title,
"YEAR": year,
"IMAGE": image,
"NOTE": note,
"DESC": desc
})
@DbPersist(_db)
def update_rss_movie_desc(self, rid, desc):
"""
更新订阅电影的DESC
"""
self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rid)).update({
"DESC": desc
})
@DbPersist(_db)
def update_rss_filter_order(self, rtype, rssid, res_order):
"""
更新订阅命中的过滤规则优先级
"""
if rtype == MediaType.MOVIE:
self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rssid)).update({
"FILTER_ORDER": res_order
})
else:
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).update({
"FILTER_ORDER": res_order
})
def get_rss_overedition_order(self, rtype, rssid):
"""
查询当前订阅的过滤优先级
"""
if rtype == MediaType.MOVIE:
res = self._db.query(RSSMOVIES.FILTER_ORDER).filter(RSSMOVIES.ID == int(rssid)).first()
else:
res = self._db.query(RSSTVS.FILTER_ORDER).filter(RSSTVS.ID == int(rssid)).first()
if res and res[0]:
return int(res[0])
else:
return 0
def is_exists_rss_movie(self, title, year):
"""
判断RSS电影是否存在
"""
if not title:
return False
count = self._db.query(RSSMOVIES).filter(RSSMOVIES.NAME == title,
RSSMOVIES.YEAR == str(year)).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_rss_movie(self, media_info,
state='D',
rss_sites=None,
search_sites=None,
over_edition=0,
filter_restype=None,
filter_pix=None,
filter_team=None,
filter_rule=None,
filter_include=None,
filter_exclude=None,
save_path=None,
download_setting=-1,
fuzzy_match=0,
desc=None,
note=None,
keyword=None):
"""
新增RSS电影
"""
if search_sites is None:
search_sites = []
if rss_sites is None:
rss_sites = []
if not media_info:
return -1
if not media_info.title:
return -1
if self.is_exists_rss_movie(media_info.title, media_info.year):
return 9
self._db.insert(RSSMOVIES(
NAME=media_info.title,
YEAR=media_info.year,
TMDBID=media_info.tmdb_id,
IMAGE=media_info.get_message_image(),
RSS_SITES=json.dumps(rss_sites),
SEARCH_SITES=json.dumps(search_sites),
OVER_EDITION=over_edition,
FILTER_RESTYPE=filter_restype,
FILTER_PIX=filter_pix,
FILTER_RULE=filter_rule,
FILTER_TEAM=filter_team,
FILTER_INCLUDE=filter_include,
FILTER_EXCLUDE=filter_exclude,
SAVE_PATH=save_path,
DOWNLOAD_SETTING=download_setting,
FUZZY_MATCH=fuzzy_match,
STATE=state,
DESC=desc,
NOTE=note,
KEYWORD=keyword
))
return 0
@DbPersist(_db)
def delete_rss_movie(self, title=None, year=None, rssid=None, tmdbid=None):
"""
删除RSS电影
"""
if not title and not rssid:
return
if rssid:
self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rssid)).delete()
else:
if tmdbid:
self._db.query(RSSMOVIES).filter(RSSMOVIES.TMDBID == tmdbid).delete()
self._db.query(RSSMOVIES).filter(RSSMOVIES.NAME == title,
RSSMOVIES.YEAR == str(year)).delete()
@DbPersist(_db)
def update_rss_movie_state(self, title=None, year=None, rssid=None, state='R'):
"""
更新电影订阅状态
"""
if not title and not rssid:
return
if rssid:
self._db.query(RSSMOVIES).filter(RSSMOVIES.ID == int(rssid)).update(
{
"STATE": state
})
else:
self._db.query(RSSMOVIES).filter(RSSMOVIES.NAME == title,
RSSMOVIES.YEAR == str(year)).update(
{
"STATE": state
})
def get_rss_tvs(self, state=None, rssid=None):
"""
查询订阅电视剧信息
"""
if rssid:
return self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).all()
else:
if not state:
return self._db.query(RSSTVS).all()
else:
return self._db.query(RSSTVS).filter(RSSTVS.STATE == state).all()
def get_rss_tv_id(self, title, year=None, season=None, tmdbid=None):
"""
获取订阅电视剧ID
"""
if not title:
return ""
if tmdbid:
if season:
ret = self._db.query(RSSTVS.ID).filter(RSSTVS.TMDBID == tmdbid,
RSSTVS.SEASON == season).first()
else:
ret = self._db.query(RSSTVS.ID).filter(RSSTVS.TMDBID == tmdbid).first()
if ret:
return ret[0]
if season and year:
items = self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.SEASON == str(season),
RSSTVS.YEAR == str(year)).all()
elif season and not year:
items = self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.SEASON == str(season)).all()
elif not season and year:
items = self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.YEAR == str(year)).all()
else:
items = self._db.query(RSSTVS).filter(RSSTVS.NAME == title).all()
if items:
if tmdbid:
for item in items:
if not item.TMDBID or item.TMDBID == str(tmdbid):
return item.ID
else:
return items[0].ID
else:
return ""
def get_rss_tv_sites(self, rssid):
"""
获取订阅电视剧站点
"""
if not rssid:
return ""
ret = self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).first()
if ret:
return ret
return ""
@DbPersist(_db)
def update_rss_tv_tmdb(self, rid, tmdbid, title, year, total, lack, image, desc, note):
"""
更新订阅电影的TMDBID
"""
if not tmdbid:
return
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rid)).update(
{
"TMDBID": tmdbid,
"NAME": title,
"YEAR": year,
"TOTAL": total,
"LACK": lack,
"IMAGE": image,
"DESC": desc,
"NOTE": note
}
)
@DbPersist(_db)
def update_rss_tv_desc(self, rid, desc):
"""
更新订阅电视剧的DESC
"""
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rid)).update(
{
"DESC": desc
}
)
def is_exists_rss_tv(self, title, year, season=None):
"""
判断RSS电视剧是否存在
"""
if not title:
return False
if season:
count = self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.YEAR == str(year),
RSSTVS.SEASON == season).count()
else:
count = self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.YEAR == str(year)).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_rss_tv(self,
media_info,
total,
lack=0,
state="D",
rss_sites=None,
search_sites=None,
over_edition=0,
filter_restype=None,
filter_pix=None,
filter_team=None,
filter_rule=None,
filter_include=None,
filter_exclude=None,
save_path=None,
download_setting=-1,
total_ep=None,
current_ep=None,
fuzzy_match=0,
desc=None,
note=None,
keyword=None):
"""
新增RSS电视剧
"""
if search_sites is None:
search_sites = []
if rss_sites is None:
rss_sites = []
if not media_info:
return -1
if not media_info.title:
return -1
if fuzzy_match and media_info.begin_season is None:
season_str = ""
else:
season_str = media_info.get_season_string()
if self.is_exists_rss_tv(media_info.title, media_info.year, season_str):
return 9
self._db.insert(RSSTVS(
NAME=media_info.title,
YEAR=media_info.year,
SEASON=season_str,
TMDBID=media_info.tmdb_id,
IMAGE=media_info.get_message_image(),
RSS_SITES=json.dumps(rss_sites),
SEARCH_SITES=json.dumps(search_sites),
OVER_EDITION=over_edition,
FILTER_RESTYPE=filter_restype,
FILTER_PIX=filter_pix,
FILTER_RULE=filter_rule,
FILTER_TEAM=filter_team,
FILTER_INCLUDE=filter_include,
FILTER_EXCLUDE=filter_exclude,
SAVE_PATH=save_path,
DOWNLOAD_SETTING=download_setting,
FUZZY_MATCH=fuzzy_match,
TOTAL_EP=total_ep,
CURRENT_EP=current_ep,
TOTAL=total,
LACK=lack,
STATE=state,
DESC=desc,
NOTE=note,
KEYWORD=keyword
))
return 0
@DbPersist(_db)
def update_rss_tv_lack(self, title=None, year=None, season=None, rssid=None, lack_episodes: list = None):
"""
更新电视剧缺失的集数
"""
if not title and not rssid:
return
if not lack_episodes:
lack = 0
else:
lack = len(lack_episodes)
if rssid:
self.update_rss_tv_episodes(rssid, lack_episodes)
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).update(
{
"LACK": lack
}
)
else:
self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.YEAR == str(year),
RSSTVS.SEASON == season).update(
{
"LACK": lack
}
)
@DbPersist(_db)
def delete_rss_tv(self, title=None, season=None, rssid=None, tmdbid=None):
"""
删除RSS电视剧
"""
if not title and not rssid:
return
if not rssid:
rssid = self.get_rss_tv_id(title=title, tmdbid=tmdbid, season=season)
if rssid:
self.delete_rss_tv_episodes(rssid)
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).delete()
def is_exists_rss_tv_episodes(self, rid):
"""
判断RSS电视剧是否存在
"""
if not rid:
return False
count = self._db.query(RSSTVEPISODES).filter(RSSTVEPISODES.RSSID == int(rid)).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def update_rss_tv_episodes(self, rid, episodes):
"""
插入或更新电视剧订阅缺失剧集
"""
if not rid:
return
if not episodes:
episodes = []
else:
episodes = [str(epi) for epi in episodes]
if self.is_exists_rss_tv_episodes(rid):
self._db.query(RSSTVEPISODES).filter(RSSTVEPISODES.RSSID == int(rid)).update(
{
"EPISODES": ",".join(episodes)
}
)
else:
self._db.insert(RSSTVEPISODES(
RSSID=rid,
EPISODES=",".join(episodes)
))
def get_rss_tv_episodes(self, rid):
"""
查询电视剧订阅缺失剧集
"""
if not rid:
return []
ret = self._db.query(RSSTVEPISODES.EPISODES).filter(RSSTVEPISODES.RSSID == rid).first()
if ret:
return [int(epi) for epi in str(ret[0]).split(',')]
else:
return None
@DbPersist(_db)
def delete_rss_tv_episodes(self, rid):
"""
删除电视剧订阅缺失剧集
"""
if not rid:
return
self._db.query(RSSTVEPISODES).filter(RSSTVEPISODES.RSSID == int(rid)).delete()
@DbPersist(_db)
def update_rss_tv_state(self, title=None, year=None, season=None, rssid=None, state='R'):
"""
更新电视剧订阅状态
"""
if not title and not rssid:
return
if rssid:
self._db.query(RSSTVS).filter(RSSTVS.ID == int(rssid)).update(
{
"STATE": state
})
else:
self._db.query(RSSTVS).filter(RSSTVS.NAME == title,
RSSTVS.YEAR == str(year),
RSSTVS.SEASON == season).update(
{
"STATE": state
})
def is_sync_in_history(self, path, dest):
"""
查询是否存在同步历史记录
"""
if not path:
return False
count = self._db.query(SYNCHISTORY).filter(SYNCHISTORY.PATH == os.path.normpath(path),
SYNCHISTORY.DEST == os.path.normpath(dest)).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_sync_history(self, path, src, dest):
"""
插入黑名单记录
"""
if not path or not dest:
return
if self.is_sync_in_history(path, dest):
return
else:
self._db.insert(SYNCHISTORY(
PATH=os.path.normpath(path),
SRC=os.path.normpath(src),
DEST=os.path.normpath(dest)
))
def get_users(self, uid=None, name=None):
"""
查询用户列表
"""
if uid:
return self._db.query(CONFIGUSERS).filter(CONFIGUSERS.ID == uid).first()
elif name:
return self._db.query(CONFIGUSERS).filter(CONFIGUSERS.NAME == name).first()
return self._db.query(CONFIGUSERS).all()
def is_user_exists(self, name):
"""
判断用户是否存在
"""
if not name:
return False
count = self._db.query(CONFIGUSERS).filter(CONFIGUSERS.NAME == name).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_user(self, name, password, pris):
"""
新增用户
"""
if not name or not password:
return
if self.is_user_exists(name):
return
else:
self._db.insert(CONFIGUSERS(
NAME=name,
PASSWORD=password,
PRIS=pris
))
@DbPersist(_db)
def delete_user(self, name):
"""
删除用户
"""
self._db.query(CONFIGUSERS).filter(CONFIGUSERS.NAME == name).delete()
def get_transfer_statistics(self, days=30):
"""
查询历史记录统计
"""
begin_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime("%Y-%m-%d %H:%M:%S")
return self._db.query(TRANSFERHISTORY.TYPE,
func.substr(TRANSFERHISTORY.DATE, 1, 10),
func.count('*')
).filter(TRANSFERHISTORY.DATE > begin_date).group_by(
TRANSFERHISTORY.TYPE, func.substr(TRANSFERHISTORY.DATE, 1, 10)
).order_by(TRANSFERHISTORY.DATE).all()
@DbPersist(_db)
def update_site_user_statistics_site_name(self, new_name, old_name):
"""
更新站点用户数据中站点名称
"""
self._db.query(SITEUSERINFOSTATS).filter(SITEUSERINFOSTATS.SITE == old_name).update(
{
"SITE": new_name
}
)
@DbPersist(_db)
def update_site_user_statistics(self, site_user_infos: list):
"""
更新站点用户粒度数据
"""
if not site_user_infos:
return
update_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
for site_user_info in site_user_infos:
site = site_user_info.site_name
username = site_user_info.username
user_level = site_user_info.user_level
join_at = site_user_info.join_at
upload = site_user_info.upload
download = site_user_info.download
ratio = site_user_info.ratio
seeding = site_user_info.seeding
seeding_size = site_user_info.seeding_size
leeching = site_user_info.leeching
bonus = site_user_info.bonus
url = site_user_info.site_url
msg_unread = site_user_info.message_unread
if not self.is_exists_site_user_statistics(url):
self._db.insert(SITEUSERINFOSTATS(
SITE=site,
USERNAME=username,
USER_LEVEL=user_level,
JOIN_AT=join_at,
UPDATE_AT=update_at,
UPLOAD=upload,
DOWNLOAD=download,
RATIO=ratio,
SEEDING=seeding,
LEECHING=leeching,
SEEDING_SIZE=seeding_size,
BONUS=bonus,
URL=url,
MSG_UNREAD=msg_unread
))
else:
self._db.query(SITEUSERINFOSTATS).filter(SITEUSERINFOSTATS.URL == url).update(
{
"SITE": site,
"USERNAME": username,
"USER_LEVEL": user_level,
"JOIN_AT": join_at,
"UPDATE_AT": update_at,
"UPLOAD": upload,
"DOWNLOAD": download,
"RATIO": ratio,
"SEEDING": seeding,
"LEECHING": leeching,
"SEEDING_SIZE": seeding_size,
"BONUS": bonus,
"MSG_UNREAD": msg_unread
}
)
def is_exists_site_user_statistics(self, url):
"""
判断站点数据是滞存在
"""
count = self._db.query(SITEUSERINFOSTATS).filter(SITEUSERINFOSTATS.URL == url).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def update_site_favicon(self, site_user_infos: list):
"""
更新站点图标数据
"""
if not site_user_infos:
return
for site_user_info in site_user_infos:
site_icon = "data:image/ico;base64," + \
site_user_info.site_favicon if site_user_info.site_favicon else site_user_info.site_url \
+ "/favicon.ico"
if not self.is_exists_site_favicon(site_user_info.site_name):
self._db.insert(SITEFAVICON(
SITE=site_user_info.site_name,
URL=site_user_info.site_url,
FAVICON=site_icon
))
elif site_user_info.site_favicon:
self._db.query(SITEFAVICON).filter(SITEFAVICON.SITE == site_user_info.site_name).update(
{
"URL": site_user_info.site_url,
"FAVICON": site_icon
}
)
def is_exists_site_favicon(self, site):
"""
判断站点图标是否存在
"""
count = self._db.query(SITEFAVICON).filter(SITEFAVICON.SITE == site).count()
if count > 0:
return True
else:
return False
def get_site_favicons(self, site=None):
"""
查询站点数据历史
"""
if site:
return self._db.query(SITEFAVICON).filter(SITEFAVICON.SITE == site).all()
else:
return self._db.query(SITEFAVICON).all()
@DbPersist(_db)
def update_site_seed_info_site_name(self, new_name, old_name):
"""
更新站点做种数据中站点名称
:param new_name: 新的站点名称
:param old_name: 原始站点名称
:return:
"""
self._db.query(SITEUSERSEEDINGINFO).filter(SITEUSERSEEDINGINFO.SITE == old_name).update(
{
"SITE": new_name
}
)
@DbPersist(_db)
def update_site_seed_info(self, site_user_infos: list):
"""
更新站点做种数据
"""
if not site_user_infos:
return
update_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
for site_user_info in site_user_infos:
if not self.is_site_seeding_info_exist(url=site_user_info.site_url):
self._db.insert(SITEUSERSEEDINGINFO(
SITE=site_user_info.site_name,
UPDATE_AT=update_at,
SEEDING_INFO=site_user_info.seeding_info,
URL=site_user_info.site_url
))
else:
self._db.query(SITEUSERSEEDINGINFO).filter(SITEUSERSEEDINGINFO.URL == site_user_info.site_url).update(
{
"SITE": site_user_info.site_name,
"UPDATE_AT": update_at,
"SEEDING_INFO": site_user_info.seeding_info
}
)
def is_site_user_statistics_exists(self, url):
"""
判断站点用户数据是否存在
"""
if not url:
return False
count = self._db.query(SITEUSERINFOSTATS).filter(SITEUSERINFOSTATS.URL == url).count()
if count > 0:
return True
else:
return False
def get_site_user_statistics(self, num=100, strict_urls=None):
"""
查询站点数据历史
"""
if strict_urls:
# 根据站点优先级排序
return self._db.query(SITEUSERINFOSTATS) \
.join(CONFIGSITE, SITEUSERINFOSTATS.SITE == CONFIGSITE.NAME) \
.filter(SITEUSERINFOSTATS.URL.in_(tuple(strict_urls + ["__DUMMY__"]))) \
.order_by(cast(CONFIGSITE.PRI, Integer).asc()).limit(num).all()
else:
return self._db.query(SITEUSERINFOSTATS).limit(num).all()
def is_site_statistics_history_exists(self, url, date):
"""
判断站点历史数据是否存在
"""
if not url or not date:
return False
count = self._db.query(SITESTATISTICSHISTORY).filter(SITESTATISTICSHISTORY.URL == url,
SITESTATISTICSHISTORY.DATE == date).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def update_site_statistics_site_name(self, new_name, old_name):
"""
更新站点做种数据中站点名称
:param new_name: 新站点名称
:param old_name: 原始站点名称
:return:
"""
self._db.query(SITESTATISTICSHISTORY).filter(SITESTATISTICSHISTORY.SITE == old_name).update(
{
"SITE": new_name
}
)
@DbPersist(_db)
def insert_site_statistics_history(self, site_user_infos: list):
"""
插入站点数据
"""
if not site_user_infos:
return
date_now = time.strftime('%Y-%m-%d', time.localtime(time.time()))
for site_user_info in site_user_infos:
site = site_user_info.site_name
upload = site_user_info.upload
user_level = site_user_info.user_level
download = site_user_info.download
ratio = site_user_info.ratio
seeding = site_user_info.seeding
seeding_size = site_user_info.seeding_size
leeching = site_user_info.leeching
bonus = site_user_info.bonus
url = site_user_info.site_url
if not self.is_site_statistics_history_exists(date=date_now, url=url):
self._db.insert(SITESTATISTICSHISTORY(
SITE=site,
USER_LEVEL=user_level,
DATE=date_now,
UPLOAD=upload,
DOWNLOAD=download,
RATIO=ratio,
SEEDING=seeding,
LEECHING=leeching,
SEEDING_SIZE=seeding_size,
BONUS=bonus,
URL=url
))
else:
self._db.query(SITESTATISTICSHISTORY).filter(SITESTATISTICSHISTORY.DATE == date_now,
SITESTATISTICSHISTORY.URL == url).update(
{
"SITE": site,
"USER_LEVEL": user_level,
"UPLOAD": upload,
"DOWNLOAD": download,
"RATIO": ratio,
"SEEDING": seeding,
"LEECHING": leeching,
"SEEDING_SIZE": seeding_size,
"BONUS": bonus
}
)
def get_site_statistics_history(self, site, days=30):
"""
查询站点数据历史
"""
return self._db.query(SITESTATISTICSHISTORY).filter(
SITESTATISTICSHISTORY.SITE == site).order_by(
SITESTATISTICSHISTORY.DATE.asc()
).limit(days)
def get_site_seeding_info(self, site):
"""
查询站点做种信息
"""
return self._db.query(SITEUSERSEEDINGINFO.SEEDING_INFO).filter(
SITEUSERSEEDINGINFO.SITE == site).first()
def is_site_seeding_info_exist(self, url):
"""
判断做种数据是否已存在
"""
count = self._db.query(SITEUSERSEEDINGINFO).filter(
SITEUSERSEEDINGINFO.URL == url).count()
if count > 0:
return True
else:
return False
def get_site_statistics_recent_sites(self, days=7, end_day=None, strict_urls=None):
"""
查询近期上传下载量
:param days 需要前几天的数据,传入7实际上会返回6天的数据?
:param end_day 开始时间
:param strict_urls 需要的站点URL的列表
传入 7,"2020-01-01" 表示需要从2020-01-01之前6天的数据
"""
# 查询最大最小日期
if strict_urls is None:
strict_urls = []
end = datetime.datetime.now()
if end_day:
try:
end = datetime.datetime.strptime(end_day, "%Y-%m-%d")
except Exception as e:
pass
# 开始时间
b_date = (end - datetime.timedelta(days=days)).strftime("%Y-%m-%d")
# 结束时间
e_date = end.strftime("%Y-%m-%d")
# 大于开始时间范围里的最大日期与最小日期
date_ret = self._db.query(func.max(SITESTATISTICSHISTORY.DATE),
func.MIN(SITESTATISTICSHISTORY.DATE)).filter(
SITESTATISTICSHISTORY.DATE > b_date, SITESTATISTICSHISTORY.DATE <= e_date).all()
if date_ret and date_ret[0][0]:
total_upload = 0
total_download = 0
ret_site_uploads = []
ret_site_downloads = []
min_date = date_ret[0][1]
max_date = date_ret[0][0]
# 查询开始值
if strict_urls:
subquery = self._db.query(SITESTATISTICSHISTORY.SITE.label("SITE"),
SITESTATISTICSHISTORY.DATE.label("DATE"),
func.sum(SITESTATISTICSHISTORY.UPLOAD).label("UPLOAD"),
func.sum(SITESTATISTICSHISTORY.DOWNLOAD).label("DOWNLOAD")).filter(
SITESTATISTICSHISTORY.DATE >= min_date,
SITESTATISTICSHISTORY.DATE <= max_date,
SITESTATISTICSHISTORY.URL.in_(tuple(strict_urls + ["__DUMMY__"]))
).group_by(SITESTATISTICSHISTORY.SITE, SITESTATISTICSHISTORY.DATE).subquery()
else:
subquery = self._db.query(SITESTATISTICSHISTORY.SITE.label("SITE"),
SITESTATISTICSHISTORY.DATE.label("DATE"),
func.sum(SITESTATISTICSHISTORY.UPLOAD).label("UPLOAD"),
func.sum(SITESTATISTICSHISTORY.DOWNLOAD).label("DOWNLOAD")).filter(
SITESTATISTICSHISTORY.DATE >= min_date,
SITESTATISTICSHISTORY.DATE <= max_date
).group_by(SITESTATISTICSHISTORY.SITE, SITESTATISTICSHISTORY.DATE).subquery()
# 查询大于开始时间范围里的单日,单站点 最大值与最小值
rets = self._db.query(subquery.c.SITE,
func.min(subquery.c.UPLOAD),
func.min(subquery.c.DOWNLOAD),
func.max(subquery.c.UPLOAD),
func.max(subquery.c.DOWNLOAD)).group_by(subquery.c.SITE).all()
ret_sites = []
for ret_b in rets:
# 如果最小值都是0,可能时由于近几日没有更新数据,或者cookie过期,正常有数据的话,第二天能正常
ret_b = list(ret_b)
if ret_b[1] == 0 and ret_b[2] == 0:
ret_b[1] = ret_b[3]
ret_b[2] = ret_b[4]
ret_sites.append(ret_b[0])
if int(ret_b[1]) < int(ret_b[3]):
total_upload += int(ret_b[3]) - int(ret_b[1])
ret_site_uploads.append(int(ret_b[3]) - int(ret_b[1]))
else:
ret_site_uploads.append(0)
if int(ret_b[2]) < int(ret_b[4]):
total_download += int(ret_b[4]) - int(ret_b[2])
ret_site_downloads.append(int(ret_b[4]) - int(ret_b[2]))
else:
ret_site_downloads.append(0)
return total_upload, total_download, ret_sites, ret_site_uploads, ret_site_downloads
else:
return 0, 0, [], [], []
def is_exists_download_history(self, enclosure, downloader, download_id):
"""
查询下载历史是否存在
"""
if enclosure:
count = self._db.query(DOWNLOADHISTORY).filter(
DOWNLOADHISTORY.ENCLOSURE == enclosure
).count()
else:
count = self._db.query(DOWNLOADHISTORY).filter(
DOWNLOADHISTORY.DOWNLOADER == downloader,
DOWNLOADHISTORY.DOWNLOAD_ID == download_id
).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_download_history(self, media_info, downloader, download_id, save_dir):
"""
新增下载历史
"""
if not media_info:
return
if not media_info.title or not media_info.tmdb_id:
return
if self.is_exists_download_history(enclosure=media_info.enclosure,
downloader=downloader,
download_id=download_id):
self._db.query(DOWNLOADHISTORY).filter(DOWNLOADHISTORY.ENCLOSURE == media_info.enclosure,
DOWNLOADHISTORY.DOWNLOADER == downloader,
DOWNLOADHISTORY.DOWNLOAD_ID == download_id).update(
{
"TORRENT": media_info.org_string,
"ENCLOSURE": media_info.enclosure,
"DESC": media_info.description,
"SITE": media_info.site,
"DOWNLOADER": downloader,
"DOWNLOAD_ID": download_id,
"SAVE_PATH": save_dir,
"SE": media_info.get_season_episode_string(),
"DATE": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
}
)
else:
self._db.insert(DOWNLOADHISTORY(
TITLE=media_info.title,
YEAR=media_info.year,
TYPE=media_info.type.value,
TMDBID=media_info.tmdb_id,
VOTE=media_info.vote_average,
POSTER=media_info.get_poster_image(),
OVERVIEW=media_info.overview,
TORRENT=media_info.org_string,
ENCLOSURE=media_info.enclosure,
DESC=media_info.description,
DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
SITE=media_info.site,
DOWNLOADER=downloader,
DOWNLOAD_ID=download_id,
SAVE_PATH=save_dir,
SE=media_info.get_season_episode_string()
))
def get_download_history(self, date=None, hid=None, num=30, page=1):
"""
查询下载历史
"""
if hid:
return self._db.query(DOWNLOADHISTORY).filter(DOWNLOADHISTORY.ID == int(hid)).all()
sub_query = self._db.query(DOWNLOADHISTORY,
func.max(DOWNLOADHISTORY.DATE)
).group_by(DOWNLOADHISTORY.TITLE).subquery()
if date:
return self._db.query(DOWNLOADHISTORY).filter(
DOWNLOADHISTORY.DATE > date).join(
sub_query,
and_(sub_query.c.ID == DOWNLOADHISTORY.ID)
).order_by(DOWNLOADHISTORY.DATE.desc()).all()
else:
offset = (int(page) - 1) * int(num)
return self._db.query(DOWNLOADHISTORY).join(
sub_query,
and_(sub_query.c.ID == DOWNLOADHISTORY.ID)
).order_by(
DOWNLOADHISTORY.DATE.desc()
).limit(num).offset(offset).all()
def get_download_history_by_title(self, title):
"""
根据标题查找下载历史
"""
return self._db.query(DOWNLOADHISTORY).filter(DOWNLOADHISTORY.TITLE == title).all()
def get_download_history_by_path(self, path):
"""
根据路径查找下载历史
"""
return self._db.query(DOWNLOADHISTORY).filter(
DOWNLOADHISTORY.SAVE_PATH == os.path.normpath(path)
).order_by(DOWNLOADHISTORY.DATE.desc()).first()
def get_download_history_by_downloader(self, downloader, download_id):
"""
根据下载器查找下载历史
"""
return self._db.query(DOWNLOADHISTORY).filter(
DOWNLOADHISTORY.DOWNLOADER == downloader,
DOWNLOADHISTORY.DOWNLOAD_ID == download_id
).order_by(DOWNLOADHISTORY.DATE.desc()).first()
@DbPersist(_db)
def update_brushtask(self, brush_id, item):
"""
新增刷流任务
"""
if not brush_id:
self._db.insert(SITEBRUSHTASK(
NAME=item.get('name'),
SITE=item.get('site'),
FREELEECH=item.get('free'),
RSS_RULE=str(item.get('rss_rule')),
REMOVE_RULE=str(item.get('remove_rule')),
SEED_SIZE=item.get('seed_size'),
RSSURL=item.get('rssurl'),
INTEVAL=item.get('interval'),
DOWNLOADER=item.get('downloader'),
LABEL=item.get('label'),
UP_LIMIT=item.get('up_limit'),
DL_LIMIT=item.get('dl_limit'),
SAVEPATH=item.get('savepath'),
TRANSFER=item.get('transfer'),
DOWNLOAD_COUNT=0,
REMOVE_COUNT=0,
DOWNLOAD_SIZE=0,
UPLOAD_SIZE=0,
STATE=item.get('state'),
LST_MOD_DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
SENDMESSAGE=item.get('sendmessage')
))
else:
self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(brush_id)).update(
{
"NAME": item.get('name'),
"SITE": item.get('site'),
"FREELEECH": item.get('free'),
"RSS_RULE": str(item.get('rss_rule')),
"REMOVE_RULE": str(item.get('remove_rule')),
"SEED_SIZE": item.get('seed_size'),
"RSSURL": item.get('rssurl'),
"INTEVAL": item.get('interval'),
"DOWNLOADER": item.get('downloader'),
"LABEL": item.get('label'),
"UP_LIMIT": item.get('up_limit'),
"DL_LIMIT": item.get('dl_limit'),
"SAVEPATH": item.get('savepath'),
"TRANSFER": item.get('transfer'),
"STATE": item.get('state'),
"LST_MOD_DATE": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
"SENDMESSAGE": item.get('sendmessage')
}
)
@DbPersist(_db)
def delete_brushtask(self, brush_id):
"""
删除刷流任务
"""
self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(brush_id)).delete()
self._db.query(SITEBRUSHTORRENTS).filter(SITEBRUSHTORRENTS.TASK_ID == brush_id).delete()
def get_brushtasks(self, brush_id=None):
"""
查询刷流任务
"""
if brush_id:
return self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(brush_id)).first()
else:
# 根据站点优先级排序
return self._db.query(SITEBRUSHTASK) \
.join(CONFIGSITE, SITEBRUSHTASK.SITE == CONFIGSITE.ID) \
.order_by(cast(CONFIGSITE.PRI, Integer).asc()).all()
def get_brushtask_totalsize(self, brush_id):
"""
查询刷流任务总体积
"""
if not brush_id:
return 0
ret = self._db.query(func.sum(cast(SITEBRUSHTORRENTS.TORRENT_SIZE,
Integer))).filter(SITEBRUSHTORRENTS.TASK_ID == brush_id,
SITEBRUSHTORRENTS.DOWNLOAD_ID != '0').first()
if ret:
return ret[0] or 0
else:
return 0
@DbPersist(_db)
def update_brushtask_state(self, state, tid=None):
"""
改变所有刷流任务的状态
"""
if tid:
self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(tid)).update(
{
"STATE": "Y" if state == "Y" else "N"
}
)
else:
self._db.query(SITEBRUSHTASK).update(
{
"STATE": "Y" if state == "Y" else "N"
}
)
@DbPersist(_db)
def add_brushtask_download_count(self, brush_id):
"""
增加刷流下载数
"""
if not brush_id:
return
self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(brush_id)).update(
{
"DOWNLOAD_COUNT": SITEBRUSHTASK.DOWNLOAD_COUNT + 1,
"LST_MOD_DATE": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
}
)
def get_brushtask_remove_size(self, brush_id):
"""
获取已删除种子的上传量
"""
if not brush_id:
return 0
return self._db.query(SITEBRUSHTORRENTS.TORRENT_SIZE).filter(SITEBRUSHTORRENTS.TASK_ID == brush_id,
SITEBRUSHTORRENTS.DOWNLOAD_ID == '0').all()
@DbPersist(_db)
def add_brushtask_upload_count(self, brush_id, upload_size, download_size, remove_count):
"""
更新上传下载量和删除种子数
"""
if not brush_id:
return
delete_upsize = 0
delete_dlsize = 0
remove_sizes = self.get_brushtask_remove_size(brush_id)
for remove_size in remove_sizes:
if not remove_size[0]:
continue
if str(remove_size[0]).find(",") != -1:
sizes = str(remove_size[0]).split(",")
delete_upsize += int(sizes[0] or 0)
if len(sizes) > 1:
delete_dlsize += int(sizes[1] or 0)
else:
delete_upsize += int(remove_size[0])
self._db.query(SITEBRUSHTASK).filter(SITEBRUSHTASK.ID == int(brush_id)).update({
"REMOVE_COUNT": SITEBRUSHTASK.REMOVE_COUNT + remove_count,
"UPLOAD_SIZE": int(upload_size) + delete_upsize,
"DOWNLOAD_SIZE": int(download_size) + delete_dlsize,
})
@DbPersist(_db)
def insert_brushtask_torrent(self, brush_id, title, enclosure, downloader, download_id, size):
"""
增加刷流下载的种子信息
"""
if not brush_id:
return
if self.is_brushtask_torrent_exists(brush_id, title, enclosure):
return
self._db.insert(SITEBRUSHTORRENTS(
TASK_ID=brush_id,
TORRENT_NAME=title,
TORRENT_SIZE=size,
ENCLOSURE=enclosure,
DOWNLOADER=downloader,
DOWNLOAD_ID=download_id,
LST_MOD_DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
))
def get_brushtask_torrents(self, brush_id, active=True):
"""
查询刷流任务所有种子
"""
if not brush_id:
return []
if active:
return self._db.query(SITEBRUSHTORRENTS).filter(
SITEBRUSHTORRENTS.TASK_ID == int(brush_id),
SITEBRUSHTORRENTS.DOWNLOAD_ID != '0').all()
else:
return self._db.query(SITEBRUSHTORRENTS).filter(
SITEBRUSHTORRENTS.TASK_ID == int(brush_id)
).order_by(SITEBRUSHTORRENTS.LST_MOD_DATE.desc()).all()
def get_brushtask_torrent_by_enclosure(self, enclosure):
"""
根据URL查询刷流任务种子
"""
if not enclosure:
return None
return self._db.query(SITEBRUSHTORRENTS).filter(SITEBRUSHTORRENTS.ENCLOSURE == enclosure).first()
def is_brushtask_torrent_exists(self, brush_id, title, enclosure):
"""
查询刷流任务种子是否已存在
"""
if not brush_id:
return False
count = self._db.query(SITEBRUSHTORRENTS).filter(SITEBRUSHTORRENTS.TASK_ID == brush_id,
SITEBRUSHTORRENTS.TORRENT_NAME == title,
SITEBRUSHTORRENTS.ENCLOSURE == enclosure).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def update_brushtask_torrent_state(self, ids: list):
"""
更新刷流种子的状态
"""
if not ids:
return
for _id in ids:
self._db.query(SITEBRUSHTORRENTS).filter(SITEBRUSHTORRENTS.TASK_ID == _id[1],
SITEBRUSHTORRENTS.DOWNLOAD_ID == _id[2]).update(
{
"TORRENT_SIZE": _id[0],
"DOWNLOAD_ID": '0'
}
)
@DbPersist(_db)
def delete_brushtask_torrent(self, brush_id, download_id):
"""
删除刷流种子记录
"""
if not download_id or not brush_id:
return
self._db.query(SITEBRUSHTORRENTS).filter(SITEBRUSHTORRENTS.TASK_ID == brush_id,
SITEBRUSHTORRENTS.DOWNLOAD_ID == download_id).delete()
@DbPersist(_db)
def add_filter_group(self, name, default='N'):
"""
新增规则组
"""
if default == 'Y':
self.set_default_filtergroup(0)
group_id = self.get_filter_groupid_by_name(name)
if group_id:
self._db.query(CONFIGFILTERGROUP).filter(CONFIGFILTERGROUP.ID == int(group_id)).update({
"IS_DEFAULT": default
})
else:
self._db.insert(CONFIGFILTERGROUP(
GROUP_NAME=name,
IS_DEFAULT=default
))
def get_filter_groupid_by_name(self, name):
ret = self._db.query(CONFIGFILTERGROUP.ID).filter(CONFIGFILTERGROUP.GROUP_NAME == name).first()
if ret:
return ret[0]
else:
return ""
@DbPersist(_db)
def set_default_filtergroup(self, groupid):
"""
设置默认的规则组
"""
self._db.query(CONFIGFILTERGROUP).filter(CONFIGFILTERGROUP.ID == int(groupid)).update({
"IS_DEFAULT": 'Y'
})
self._db.query(CONFIGFILTERGROUP).filter(CONFIGFILTERGROUP.ID != int(groupid)).update({
"IS_DEFAULT": 'N'
})
@DbPersist(_db)
def delete_filtergroup(self, groupid):
"""
删除规则组
"""
self._db.query(CONFIGFILTERRULES).filter(CONFIGFILTERRULES.GROUP_ID == groupid).delete()
self._db.query(CONFIGFILTERGROUP).filter(CONFIGFILTERGROUP.ID == int(groupid)).delete()
@DbPersist(_db)
def delete_filterrule(self, ruleid):
"""
删除规则
"""
self._db.query(CONFIGFILTERRULES).filter(CONFIGFILTERRULES.ID == int(ruleid)).delete()
@DbPersist(_db)
def insert_filter_rule(self, item, ruleid=None):
"""
新增规则
"""
if ruleid:
self._db.query(CONFIGFILTERRULES).filter(CONFIGFILTERRULES.ID == int(ruleid)).update(
{
"ROLE_NAME": item.get("name"),
"PRIORITY": item.get("pri"),
"INCLUDE": item.get("include"),
"EXCLUDE": item.get("exclude"),
"SIZE_LIMIT": item.get("size"),
"NOTE": item.get("free")
}
)
else:
self._db.insert(CONFIGFILTERRULES(
GROUP_ID=item.get("group"),
ROLE_NAME=item.get("name"),
PRIORITY=item.get("pri"),
INCLUDE=item.get("include"),
EXCLUDE=item.get("exclude"),
SIZE_LIMIT=item.get("size"),
NOTE=item.get("free")
))
def get_userrss_tasks(self, tid=None):
if tid:
return self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).all()
else:
return self._db.query(CONFIGUSERRSS).order_by(CONFIGUSERRSS.STATE.desc()).all()
@DbPersist(_db)
def delete_userrss_task(self, tid):
if not tid:
return
self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).delete()
@DbPersist(_db)
def update_userrss_task_info(self, tid, count):
if not tid:
return
self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).update(
{
"PROCESS_COUNT": CONFIGUSERRSS.PROCESS_COUNT + count,
"UPDATE_TIME": time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
}
)
@DbPersist(_db)
def update_userrss_task(self, item):
if item.get("id") and self.get_userrss_tasks(item.get("id")):
self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(item.get("id"))).update(
{
"NAME": item.get("name"),
"ADDRESS": json.dumps(item.get("address")),
"PARSER": json.dumps(item.get("parser")),
"INTERVAL": item.get("interval"),
"USES": item.get("uses"),
"INCLUDE": item.get("include"),
"EXCLUDE": item.get("exclude"),
"FILTER": item.get("filter_rule"),
"UPDATE_TIME": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
"STATE": item.get("state"),
"SAVE_PATH": item.get("save_path"),
"DOWNLOAD_SETTING": item.get("download_setting"),
"RECOGNIZATION": item.get("recognization"),
"OVER_EDITION": int(item.get("over_edition")) if str(item.get("over_edition")).isdigit() else 0,
"SITES": json.dumps(item.get("sites")),
"FILTER_ARGS": json.dumps(item.get("filter_args")),
"NOTE": json.dumps(item.get("note"))
}
)
else:
self._db.insert(CONFIGUSERRSS(
NAME=item.get("name"),
ADDRESS=json.dumps(item.get("address")),
PARSER=json.dumps(item.get("parser")),
INTERVAL=item.get("interval"),
USES=item.get("uses"),
INCLUDE=item.get("include"),
EXCLUDE=item.get("exclude"),
FILTER=item.get("filter_rule"),
UPDATE_TIME=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
STATE=item.get("state"),
SAVE_PATH=item.get("save_path"),
DOWNLOAD_SETTING=item.get("download_setting"),
RECOGNIZATION=item.get("recognization"),
OVER_EDITION=item.get("over_edition"),
SITES=json.dumps(item.get("sites")),
FILTER_ARGS=json.dumps(item.get("filter_args")),
NOTE=json.dumps(item.get("note")),
PROCESS_COUNT='0'
))
@DbPersist(_db)
def check_userrss_task(self, tid=None, state=None):
if state is None:
return
if tid:
self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).update(
{
"STATE": state
}
)
else:
self._db.query(CONFIGUSERRSS).update(
{
"STATE": state
}
)
@DbPersist(_db)
def insert_userrss_mediainfos(self, tid=None, mediainfo=None):
if not tid or not mediainfo:
return
taskinfo = self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).all()
if not taskinfo:
return
mediainfos = json.loads(taskinfo[0].MEDIAINFOS) if taskinfo[0].MEDIAINFOS else []
tmdbid = str(mediainfo.tmdb_id)
season = int(mediainfo.get_season_seq())
for media in mediainfos:
if media.get("id") == tmdbid and media.get("season") == season:
return
mediainfos.append({
"id": tmdbid,
"rssid": "",
"season": season,
"name": mediainfo.title
})
self._db.query(CONFIGUSERRSS).filter(CONFIGUSERRSS.ID == int(tid)).update(
{
"MEDIAINFOS": json.dumps(mediainfos)
})
def get_userrss_parser(self, pid=None):
if pid:
return self._db.query(CONFIGRSSPARSER).filter(CONFIGRSSPARSER.ID == int(pid)).first()
else:
return self._db.query(CONFIGRSSPARSER).all()
@DbPersist(_db)
def delete_userrss_parser(self, pid):
if not pid:
return
self._db.query(CONFIGRSSPARSER).filter(CONFIGRSSPARSER.ID == int(pid)).delete()
@DbPersist(_db)
def update_userrss_parser(self, item):
if not item:
return
if item.get("id") and self.get_userrss_parser(item.get("id")):
self._db.query(CONFIGRSSPARSER).filter(CONFIGRSSPARSER.ID == int(item.get("id"))).update(
{
"NAME": item.get("name"),
"TYPE": item.get("type"),
"FORMAT": item.get("format"),
"PARAMS": item.get("params")
}
)
else:
self._db.insert(CONFIGRSSPARSER(
NAME=item.get("name"),
TYPE=item.get("type"),
FORMAT=item.get("format"),
PARAMS=item.get("params")
))
@DbPersist(_db)
def excute(self, sql):
return self._db.excute(sql)
@DbPersist(_db)
def drop_table(self, table_name):
return self._db.excute(f"""DROP TABLE IF EXISTS {table_name}""")
@DbPersist(_db)
def insert_userrss_task_history(self, task_id, title, downloader):
"""
增加自定义RSS订阅任务的下载记录
"""
self._db.insert(USERRSSTASKHISTORY(
TASK_ID=task_id,
TITLE=title,
DOWNLOADER=downloader,
DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
))
def get_userrss_task_history(self, task_id):
"""
查询自定义RSS订阅任务的下载记录
"""
if not task_id:
return []
return self._db.query(USERRSSTASKHISTORY).filter(USERRSSTASKHISTORY.TASK_ID == task_id) \
.order_by(USERRSSTASKHISTORY.DATE.desc()).all()
def get_rss_history(self, rtype=None, rid=None):
"""
查询RSS历史
"""
if rid:
return self._db.query(RSSHISTORY).filter(RSSHISTORY.ID == int(rid)).all()
elif rtype:
return self._db.query(RSSHISTORY).filter(RSSHISTORY.TYPE == rtype) \
.order_by(RSSHISTORY.FINISH_TIME.desc()).all()
return self._db.query(RSSHISTORY).order_by(RSSHISTORY.FINISH_TIME.desc()).all()
def is_exists_rss_history(self, rssid):
"""
判断RSS历史是否存在
"""
if not rssid:
return False
count = self._db.query(RSSHISTORY).filter(RSSHISTORY.RSSID == rssid).count()
if count > 0:
return True
else:
return False
def check_rss_history(self, type_str, name, year, season):
"""
检查RSS历史是否存在
"""
count = self._db.query(RSSHISTORY).filter(
RSSHISTORY.TYPE == type_str,
RSSHISTORY.NAME == name,
RSSHISTORY.YEAR == year,
RSSHISTORY.SEASON == season
).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_rss_history(self, rssid, rtype, name, year, tmdbid, image, desc, season=None, total=None, start=None):
"""
登记RSS历史
"""
if not self.is_exists_rss_history(rssid):
self._db.insert(RSSHISTORY(
TYPE=rtype,
RSSID=rssid,
NAME=name,
YEAR=year,
TMDBID=tmdbid,
SEASON=season,
IMAGE=image,
DESC=desc,
TOTAL=total,
START=start,
FINISH_TIME=time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time()))
))
@DbPersist(_db)
def delete_rss_history(self, rssid):
"""
删除RSS历史
"""
if not rssid:
return
self._db.query(RSSHISTORY).filter(RSSHISTORY.ID == int(rssid)).delete()
@DbPersist(_db)
def insert_custom_word(self, replaced, replace, front, back, offset, wtype, gid, season, enabled, regex, whelp,
note=None):
"""
增加自定义识别词
"""
self._db.insert(CUSTOMWORDS(
REPLACED=replaced,
REPLACE=replace,
FRONT=front,
BACK=back,
OFFSET=offset,
TYPE=int(wtype),
GROUP_ID=int(gid),
SEASON=int(season),
ENABLED=int(enabled),
REGEX=int(regex),
HELP=whelp,
NOTE=note
))
@DbPersist(_db)
def delete_custom_word(self, wid=None):
"""
删除自定义识别词
"""
if not wid:
self._db.query(CUSTOMWORDS).delete()
self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.ID == int(wid)).delete()
@DbPersist(_db)
def check_custom_word(self, wid=None, enabled=None):
"""
设置自定义识别词状态
"""
if enabled is None:
return
if wid:
self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.ID == int(wid)).update(
{
"ENABLED": int(enabled)
}
)
else:
self._db.query(CUSTOMWORDS).update(
{
"ENABLED": int(enabled)
}
)
def get_custom_words(self, wid=None, gid=None, enabled=None):
"""
查询自定义识别词
"""
if wid:
return self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.ID == int(wid)).all()
elif gid:
return self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.GROUP_ID == int(gid)) \
.order_by(CUSTOMWORDS.ENABLED.desc(), CUSTOMWORDS.TYPE, CUSTOMWORDS.REGEX, CUSTOMWORDS.ID).all()
elif enabled is not None:
return self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.ENABLED == int(enabled)) \
.order_by(CUSTOMWORDS.GROUP_ID, CUSTOMWORDS.TYPE, CUSTOMWORDS.REGEX, CUSTOMWORDS.ID).all()
return self._db.query(CUSTOMWORDS) \
.order_by(CUSTOMWORDS.GROUP_ID,
CUSTOMWORDS.ENABLED.desc(),
CUSTOMWORDS.TYPE,
CUSTOMWORDS.REGEX,
CUSTOMWORDS.ID) \
.all()
def is_custom_words_existed(self, replaced=None, front=None, back=None):
"""
查询自定义识别词
"""
if replaced:
count = self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.REPLACED == replaced).count()
elif front and back:
count = self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.FRONT == front,
CUSTOMWORDS.BACK == back).count()
else:
return False
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_custom_word_groups(self, title, year, gtype, tmdbid, season_count, note=None):
"""
增加自定义识别词组
"""
self._db.insert(CUSTOMWORDGROUPS(
TITLE=title,
YEAR=year,
TYPE=int(gtype),
TMDBID=int(tmdbid),
SEASON_COUNT=int(season_count),
NOTE=note
))
@DbPersist(_db)
def delete_custom_word_group(self, gid):
"""
删除自定义识别词组
"""
if not gid:
return
self._db.query(CUSTOMWORDS).filter(CUSTOMWORDS.GROUP_ID == int(gid)).delete()
self._db.query(CUSTOMWORDGROUPS).filter(CUSTOMWORDGROUPS.ID == int(gid)).delete()
def get_custom_word_groups(self, gid=None, tmdbid=None, gtype=None):
"""
查询自定义识别词组
"""
if gid:
return self._db.query(CUSTOMWORDGROUPS).filter(CUSTOMWORDGROUPS.ID == int(gid)).all()
if tmdbid and gtype:
return self._db.query(CUSTOMWORDGROUPS).filter(CUSTOMWORDGROUPS.TMDBID == int(tmdbid),
CUSTOMWORDGROUPS.TYPE == int(gtype)).all()
return self._db.query(CUSTOMWORDGROUPS).all()
def is_custom_word_group_existed(self, tmdbid=None, gtype=None):
"""
查询自定义识别词组
"""
if not gtype or not tmdbid:
return False
count = self._db.query(CUSTOMWORDGROUPS).filter(CUSTOMWORDGROUPS.TMDBID == int(tmdbid),
CUSTOMWORDGROUPS.TYPE == int(gtype)).count()
if count > 0:
return True
else:
return False
@DbPersist(_db)
def insert_config_sync_path(self, source, dest, unknown, mode, compatibility, rename, enabled, locating, note=None):
"""
增加目录同步
"""
return self._db.insert(CONFIGSYNCPATHS(
SOURCE=source,
DEST=dest,
UNKNOWN=unknown,
MODE=mode,
COMPATIBILITY=int(compatibility),
RENAME=int(rename),
ENABLED=int(enabled),
LOCATING=int(locating),
NOTE=note
))
@DbPersist(_db)
def delete_config_sync_path(self, sid):
"""
删除目录同步
"""
if not sid:
return
self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).delete()
def get_config_sync_paths(self, sid=None):
"""
查询目录同步
"""
if sid:
return self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).all()
return self._db.query(CONFIGSYNCPATHS).order_by(CONFIGSYNCPATHS.SOURCE).all()
@DbPersist(_db)
def check_config_sync_paths(self, sid=None, compatibility=None, rename=None, enabled=None, locating=None):
"""
设置目录同步状态
"""
if sid and rename is not None:
self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).update(
{
"RENAME": int(rename)
}
)
elif sid and enabled is not None:
self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).update(
{
"ENABLED": int(enabled)
}
)
elif sid and compatibility is not None:
self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).update(
{
"COMPATIBILITY": int(compatibility)
}
)
elif sid and locating is not None:
self._db.query(CONFIGSYNCPATHS).filter(CONFIGSYNCPATHS.ID == int(sid)).update(
{
"LOCATING": int(locating)
}
)
@DbPersist(_db)
def delete_download_setting(self, sid):
"""
删除下载设置
"""
if not sid:
return
self._db.query(DOWNLOADSETTING).filter(DOWNLOADSETTING.ID == int(sid)).delete()
def get_download_setting(self, sid=None):
"""
查询下载设置
"""
if sid:
return self._db.query(DOWNLOADSETTING).filter(DOWNLOADSETTING.ID == int(sid)).all()
return self._db.query(DOWNLOADSETTING).all()
@DbPersist(_db)
def update_download_setting(self,
sid,
name,
category,
tags,
is_paused,
upload_limit,
download_limit,
ratio_limit,
seeding_time_limit,
downloader):
"""
设置下载设置
"""
if sid:
self._db.query(DOWNLOADSETTING).filter(DOWNLOADSETTING.ID == int(sid)).update(
{
"NAME": name,
"CATEGORY": category,
"TAGS": tags,
"IS_PAUSED": int(is_paused),
"UPLOAD_LIMIT": int(float(upload_limit)),
"DOWNLOAD_LIMIT": int(float(download_limit)),
"RATIO_LIMIT": int(round(float(ratio_limit), 2) * 100),
"SEEDING_TIME_LIMIT": int(float(seeding_time_limit)),
"DOWNLOADER": downloader
}
)
else:
self._db.insert(DOWNLOADSETTING(
NAME=name,
CATEGORY=category,
TAGS=tags,
IS_PAUSED=int(is_paused),
UPLOAD_LIMIT=int(float(upload_limit)),
DOWNLOAD_LIMIT=int(float(download_limit)),
RATIO_LIMIT=int(round(float(ratio_limit), 2) * 100),
SEEDING_TIME_LIMIT=int(float(seeding_time_limit)),
DOWNLOADER=downloader
))
@DbPersist(_db)
def delete_message_client(self, cid):
"""
删除消息服务器
"""
if not cid:
return
self._db.query(MESSAGECLIENT).filter(MESSAGECLIENT.ID == int(cid)).delete()
def get_message_client(self, cid=None):
"""
查询消息服务器
"""
if cid:
return self._db.query(MESSAGECLIENT).filter(MESSAGECLIENT.ID == int(cid)).all()
return self._db.query(MESSAGECLIENT).all()
@DbPersist(_db)
def insert_message_client(self,
name,
ctype,
config,
switchs: list,
interactive,
enabled,
note=''):
"""
设置消息服务器
"""
self._db.insert(MESSAGECLIENT(
NAME=name,
TYPE=ctype,
CONFIG=config,
SWITCHS=json.dumps(switchs),
INTERACTIVE=int(interactive),
ENABLED=int(enabled),
NOTE=note
))
@DbPersist(_db)
def check_message_client(self, cid=None, interactive=None, enabled=None, ctype=None):
"""
设置目录同步状态
"""
if cid and interactive is not None:
self._db.query(MESSAGECLIENT).filter(MESSAGECLIENT.ID == int(cid)).update(
{
"INTERACTIVE": int(interactive)
}
)
elif cid and enabled is not None:
self._db.query(MESSAGECLIENT).filter(MESSAGECLIENT.ID == int(cid)).update(
{
"ENABLED": int(enabled)
}
)
elif not cid and int(interactive) == 0 and ctype:
self._db.query(MESSAGECLIENT).filter(MESSAGECLIENT.INTERACTIVE == 1,
MESSAGECLIENT.TYPE == ctype).update(
{
"INTERACTIVE": 0
}
)
@DbPersist(_db)
def delete_torrent_remove_task(self, tid):
"""
删除自动删种策略
"""
if not tid:
return
self._db.query(TORRENTREMOVETASK).filter(TORRENTREMOVETASK.ID == int(tid)).delete()
def get_torrent_remove_tasks(self, tid=None):
"""
查询自动删种策略
"""
if tid:
return self._db.query(TORRENTREMOVETASK).filter(TORRENTREMOVETASK.ID == int(tid)).all()
return self._db.query(TORRENTREMOVETASK).order_by(TORRENTREMOVETASK.NAME).all()
@DbPersist(_db)
def insert_torrent_remove_task(self,
name,
action,
interval,
enabled,
samedata,
onlynastool,
downloader,
config: dict,
note=None):
"""
设置自动删种策略
"""
self._db.insert(TORRENTREMOVETASK(
NAME=name,
ACTION=int(action),
INTERVAL=int(interval),
ENABLED=int(enabled),
SAMEDATA=int(samedata),
ONLY_NASTOOL=int(onlynastool),
DOWNLOADER=downloader,
CONFIG=json.dumps(config),
NOTE=note
))
@DbPersist(_db)
def update_downloader(self,
did,
name,
enabled,
dtype,
transfer,
only_nastool,
match_path,
rmt_mode,
config,
download_dir):
"""
更新下载器
"""
if did:
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).update(
{
"NAME": name,
"ENABLED": int(enabled),
"TYPE": dtype,
"TRANSFER": int(transfer),
"ONLY_NASTOOL": int(only_nastool),
"MATCH_PATH": int(match_path),
"RMT_MODE": rmt_mode,
"CONFIG": config,
"DOWNLOAD_DIR": download_dir
}
)
else:
self._db.insert(DOWNLOADER(
NAME=name,
ENABLED=int(enabled),
TYPE=dtype,
TRANSFER=int(transfer),
ONLY_NASTOOL=int(only_nastool),
MATCH_PATH=int(match_path),
RMT_MODE=rmt_mode,
CONFIG=config,
DOWNLOAD_DIR=download_dir
))
@DbPersist(_db)
def delete_downloader(self, did):
"""
删除下载器
"""
if not did:
return
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).delete()
@DbPersist(_db)
def check_downloader(self, did=None, transfer=None, only_nastool=None, enabled=None, match_path=None):
"""
设置下载器状态
"""
if not did:
return
if transfer is not None:
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).update(
{
"TRANSFER": int(transfer)
}
)
elif only_nastool is not None:
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).update(
{
"ONLY_NASTOOL": int(only_nastool)
}
)
elif match_path is not None:
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).update(
{
"MATCH_PATH": int(match_path)
}
)
elif enabled is not None:
self._db.query(DOWNLOADER).filter(DOWNLOADER.ID == int(did)).update(
{
"ENABLED": int(enabled)
}
)
def get_downloaders(self):
"""
查询下载器
"""
return self._db.query(DOWNLOADER).all()
@DbPersist(_db)
def insert_indexer_statistics(self,
indexer,
itype,
seconds,
result):
"""
插入索引器统计
"""
self._db.insert(INDEXERSTATISTICS(
INDEXER=indexer,
TYPE=itype,
SECONDS=seconds,
RESULT=result,
DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
))
def get_indexer_statistics(self):
"""
查询索引器统计
"""
return self._db.query(
INDEXERSTATISTICS.INDEXER,
func.count(INDEXERSTATISTICS.ID).label("TOTAL"),
func.sum(case((INDEXERSTATISTICS.RESULT == 'N', 1),
else_=0)).label("FAIL"),
func.sum(case((INDEXERSTATISTICS.RESULT == 'Y', 1),
else_=0)).label("SUCCESS"),
func.avg(INDEXERSTATISTICS.SECONDS).label("AVG"),
).group_by(INDEXERSTATISTICS.INDEXER).all()
@DbPersist(_db)
def insert_plugin_history(self, plugin_id, key, value):
"""
新增插件运行记录
"""
self._db.insert(PLUGINHISTORY(
PLUGIN_ID=plugin_id,
KEY=key,
VALUE=value,
DATE=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
))
def get_plugin_history(self, plugin_id, key):
"""
查询插件运行记录
"""
if not plugin_id:
return None
if key:
return self._db.query(PLUGINHISTORY).filter(PLUGINHISTORY.PLUGIN_ID == plugin_id,
PLUGINHISTORY.KEY == key).first()
else:
return self._db.query(PLUGINHISTORY).filter(PLUGINHISTORY.PLUGIN_ID == plugin_id).all()
@DbPersist(_db)
def update_plugin_history(self, plugin_id, key, value):
"""
更新插件运行记录
"""
self._db.query(PLUGINHISTORY).filter(PLUGINHISTORY.PLUGIN_ID == plugin_id,
PLUGINHISTORY.KEY == key).update(
{
"VALUE": value
}
)
@DbPersist(_db)
def delete_plugin_history(self, plugin_id, key):
"""
删除插件运行记录
"""
self._db.query(PLUGINHISTORY).filter(PLUGINHISTORY.PLUGIN_ID == plugin_id,
PLUGINHISTORY.KEY == key).delete()
| 97,998 | Python | .py | 2,535 | 23.853649 | 120 | 0.499435 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,059 | progress_helper.py | demigody_nas-tools/app/helper/progress_helper.py | from enum import Enum
from app.utils.commons import singleton
from app.utils.types import ProgressKey
@singleton
class ProgressHelper(object):
_process_detail = {}
def __init__(self):
self._process_detail = {}
def init_config(self):
pass
def __reset(self, ptype=ProgressKey.Search):
if isinstance(ptype, Enum):
ptype = ptype.value
self._process_detail[ptype] = {
"enable": False,
"value": 0,
"text": "请稍候..."
}
def start(self, ptype=ProgressKey.Search):
self.__reset(ptype)
if isinstance(ptype, Enum):
ptype = ptype.value
self._process_detail[ptype]['enable'] = True
def end(self, ptype=ProgressKey.Search):
if isinstance(ptype, Enum):
ptype = ptype.value
if not self._process_detail.get(ptype):
return
self._process_detail[ptype]['enable'] = False
def update(self, value=None, text=None, ptype=ProgressKey.Search):
if isinstance(ptype, Enum):
ptype = ptype.value
if not self._process_detail.get(ptype, {}).get('enable'):
return
if value:
self._process_detail[ptype]['value'] = value
if text:
self._process_detail[ptype]['text'] = text
def get_process(self, ptype=ProgressKey.Search):
if isinstance(ptype, Enum):
ptype = ptype.value
return self._process_detail.get(ptype)
| 1,500 | Python | .py | 42 | 26.904762 | 70 | 0.599861 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,060 | ocr_helper.py | demigody_nas-tools/app/helper/ocr_helper.py | import requests
import json
from base64 import b64encode
from urllib.parse import urlencode
import log
from config import Config
from app.utils import RequestUtils, StringUtils
class OcrHelper:
_ocr_b64_url = "https://ocr.ddsrem.com/captcha/base64"
_baiduocr_api_key = None
_baiduocr_secret_key = None
def __init__(self):
ocr = Config().get_config('ocr')
if ocr:
self._baiduocr_api_key = ocr.get('baiduocr_api_key', '') or ''
self._baiduocr_secret_key = ocr.get('baiduocr_secret_key', '') or ''
custom_oc_url = ocr.get('custom_ocr_url', '') or ''
if StringUtils.is_string_and_not_empty(custom_oc_url):
self._ocr_b64_url = custom_oc_url.rstrip('/')
def get_captcha_text(self, image_url=None, image_b64=None, cookie=None, ua=None):
"""
根据图片地址,获取验证码图片,并识别内容
:param image_url: 图片地址
:param image_b64: 图片base64,跳过图片地址下载
:param cookie: 下载图片使用的cookie
:param ua: 下载图片使用的ua
"""
if image_url:
ret = RequestUtils(headers=ua, cookies=cookie).get_res(image_url)
if ret is not None:
image_bin = ret.content
if not image_bin:
return ""
image_b64 = b64encode(image_bin).decode()
if not image_b64:
return ""
captcha = ""
if self.baiduocr_avaliable():
captcha = self.get_captcha_text_by_baiduocr(image_b64=image_b64)
if StringUtils.is_string_and_not_empty(captcha):
return captcha
if not self.custom_server_avaliable():
return ""
ret = RequestUtils(content_type="application/json").post_res(
url=self._ocr_b64_url,
json={"base64_img": image_b64})
if ret:
return ret.json().get("result")
return ""
def get_captcha_text_by_baiduocr(self, image_b64=None):
if not self.baiduocr_avaliable() or not image_b64:
return ""
url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic?access_token=" + self.get_baiduocr_access_token()
payload = {'image': f'{image_b64}'}
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
if not response:
return ""
if response.status_code != 200:
return ""
try:
result = response.json()
if 'words_result' in result:
words_result = result.get('words_result', []) or []
if len(words_result) > 0:
captcha_text_list = [result['words'] for result in words_result]
return " ".join(captcha_text_list)
else:
return ""
else:
log.error(f"【OCR】验证码识别失败, 原始返回: {response.json()}")
return ""
except Exception as e:
log.error(f"【OCR】验证码识别失败: {str(e)}")
return ""
def get_baiduocr_access_token(self):
"""
使用 AK,SK 生成鉴权签名(Access Token)
:return: access_token,或是None(如果错误)
"""
url = "https://aip.baidubce.com/oauth/2.0/token"
params = {"grant_type": "client_credentials", "client_id": self._baiduocr_api_key, "client_secret": self._baiduocr_secret_key}
return str(requests.post(url, params=params).json().get("access_token"))
def baiduocr_avaliable(self):
"""
判断百度OCR是否可用
"""
return StringUtils.is_string_and_not_empty(self._baiduocr_api_key) and StringUtils.is_string_and_not_empty(self._baiduocr_secret_key)
def custom_server_avaliable(self):
"""
判断自建服务端OCR是否可用
"""
return StringUtils.is_string_and_not_empty(self._ocr_b64_url)
| 4,145 | Python | .py | 96 | 30.614583 | 141 | 0.577638 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,061 | plugin_helper.py | demigody_nas-tools/app/helper/plugin_helper.py | from cachetools import cached, TTLCache
from app.utils import RequestUtils
# 2023年08月30日 nastool原作者服务已失效
class PluginHelper:
@staticmethod
def install(plugin_id):
"""
插件安装统计计数
"""
# return RequestUtils(timeout=5).get(f"https://nastool.org/plugin/{plugin_id}/install")
pass
@staticmethod
def report(plugins):
"""
批量上报插件安装统计数据
"""
# return RequestUtils(content_type="application/json",
# timeout=5).post(f"https://nastool.org/plugin/update",
# json={
# "plugins": [
# {
# "plugin_id": plugin,
# "count": 1
# } for plugin in plugins
# ]
# })
return {}
@staticmethod
@cached(cache=TTLCache(maxsize=1, ttl=3600))
def statistic():
"""
获取插件安装统计数据
"""
# ret = RequestUtils(accept_type="application/json",
# timeout=5).get_res("https://nastool.org/plugin/statistic")
# if ret:
# try:
# return ret.json()
# except Exception as e:
# print(e)
# return {}
return {}
| 1,616 | Python | .py | 42 | 28.809524 | 95 | 0.405787 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,062 | display_helper.py | demigody_nas-tools/app/helper/display_helper.py | import os
from pyvirtualdisplay import Display
from app.utils.commons import singleton
from app.utils import ExceptionUtils
from config import XVFB_PATH
@singleton
class DisplayHelper(object):
_display = None
def __init__(self):
self.init_config()
def init_config(self):
self.stop_service()
if self.can_display():
try:
self._display = Display(visible=False, size=(1024, 768))
self._display.start()
os.environ["NASTOOL_DISPLAY"] = "true"
except Exception as err:
ExceptionUtils.exception_traceback(err)
def get_display(self):
return self._display
def stop_service(self):
os.environ["NASTOOL_DISPLAY"] = ""
if self._display:
self._display.stop()
@staticmethod
def can_display():
for path in XVFB_PATH:
if os.path.exists(path):
return True
return False
def __del__(self):
self.stop_service()
| 1,031 | Python | .py | 33 | 22.909091 | 72 | 0.606275 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,063 | chrome_helper.py | demigody_nas-tools/app/helper/chrome_helper.py | import json
import os.path
import tempfile
from functools import reduce
from threading import Lock
import requests
import undetected_chromedriver as uc
from webdriver_manager.chrome import ChromeDriverManager
import log
import app.helper.cloudflare_helper as CloudflareHelper
from app.utils import SystemUtils, RequestUtils, ExceptionUtils
from config import Config
lock = Lock()
driver_executable_path = None
class ChromeHelper(object):
_executable_path = None
_chrome = None
_headless = False
_proxy = None
def __init__(self, headless=False):
self._executable_path = SystemUtils.get_webdriver_path() or driver_executable_path
if SystemUtils.is_windows() or SystemUtils.is_macos():
self._headless = False
elif not os.environ.get("NASTOOL_DISPLAY"):
self._headless = True
else:
self._headless = headless
def init_driver(self):
if self._executable_path:
return
if not uc.find_chrome_executable():
return
global driver_executable_path
try:
download_webdriver_path = ChromeDriverManager().install()
SystemUtils.chmod755(download_webdriver_path)
driver_executable_path = download_webdriver_path
except Exception as err:
ExceptionUtils.exception_traceback(err)
@property
def browser(self):
with lock:
if not self._chrome:
self._chrome = self.__get_browser()
return self._chrome
def get_status(self):
if self._executable_path \
and not os.path.exists(self._executable_path):
return False
if not uc.find_chrome_executable():
return False
return True
def __get_browser(self):
if not self.get_status():
return None
options = uc.ChromeOptions()
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--start-maximized")
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_argument("--disable-extensions")
options.add_argument("--disable-plugins-discovery")
options.add_argument('--no-first-run')
options.add_argument('--no-service-autorun')
options.add_argument('--no-default-browser-check')
options.add_argument('--password-store=basic')
if SystemUtils.is_windows() or SystemUtils.is_macos():
options.add_argument("--window-position=-32000,-32000")
if self._proxy:
proxy = Config().get_proxies().get("https")
if proxy:
proxy = proxy.split('/')[-1]
options.add_argument('--proxy-server=%s' % proxy or '')
if self._headless:
options.add_argument('--headless')
prefs = {
"useAutomationExtension": False,
"profile.managed_default_content_settings.images": 2 if self._headless else 1,
"excludeSwitches": ["enable-automation"]
}
options.add_argument('−−lang=zh-CN')
options.add_experimental_option("prefs", prefs)
chrome = ChromeWithPrefs(options=options, driver_executable_path=self._executable_path)
chrome.set_page_load_timeout(30)
return chrome
def visit(self, url, ua=None, cookie=None, timeout=30, proxy=None):
self._proxy = proxy
if not self.browser:
return False
try:
if ua:
self._chrome.execute_cdp_cmd("Emulation.setUserAgentOverride", {
"userAgent": ua
})
if timeout:
self._chrome.implicitly_wait(timeout)
self._chrome.get(url)
if cookie:
self._chrome.delete_all_cookies()
for cookie in RequestUtils.cookie_parse(cookie, array=True):
self._chrome.add_cookie(cookie)
self._chrome.get(url)
return True
except Exception as err:
print(str(err))
return False
def new_tab(self, url, ua=None, cookie=None):
if not self._chrome:
return False
# 新开一个标签页
try:
self._chrome.switch_to.new_window('tab')
except Exception as err:
print(str(err))
return False
# 访问URL
return self.visit(url=url, ua=ua, cookie=cookie)
def close_tab(self):
try:
self._chrome.close()
self._chrome.switch_to.window(self._chrome.window_handles[0])
except Exception as err:
print(str(err))
return False
def pass_cloudflare(self):
challenge = CloudflareHelper.resolve_challenge(driver=self._chrome)
return challenge
def execute_script(self, script):
if not self._chrome:
return False
try:
return self._chrome.execute_script(script)
except Exception as err:
print(str(err))
def get_title(self):
if not self._chrome:
return ""
return self._chrome.title
def get_html(self):
if not self._chrome:
return ""
return self._chrome.page_source
def get_cookies(self):
if not self._chrome:
return ""
cookie_str = ""
try:
for _cookie in self._chrome.get_cookies():
if not _cookie:
continue
cookie_str += "%s=%s;" % (_cookie.get("name"), _cookie.get("value"))
except Exception as err:
print(str(err))
return cookie_str
def get_ua(self):
try:
return self._chrome.execute_script("return navigator.userAgent")
except Exception as err:
print(str(err))
return None
def quit(self):
if self._chrome:
self._chrome.close()
self._chrome.quit()
self._fixup_uc_pid_leak()
self._chrome = None
def _fixup_uc_pid_leak(self):
"""
uc 在处理退出时为强制kill进程,没有调用wait,会导致出现僵尸进程,此处增加wait,确保系统正常回收
:return:
"""
try:
# chromedriver 进程
if hasattr(self._chrome, "service") and getattr(self._chrome.service, "process", None):
self._chrome.service.process.wait(3)
# chrome 进程
os.waitpid(self._chrome.browser_pid, 0)
except Exception as e:
print(str(e))
pass
def __del__(self):
self.quit()
class ChromeWithPrefs(uc.Chrome):
def __init__(self, *args, options=None, **kwargs):
if options:
self._handle_prefs(options)
super().__init__(*args, options=options, **kwargs)
# remove the user_data_dir when quitting
self.keep_user_data_dir = False
@staticmethod
def _handle_prefs(options):
if prefs := options.experimental_options.get("prefs"):
# turn a (dotted key, value) into a proper nested dict
def undot_key(key, value):
if "." in key:
key, rest = key.split(".", 1)
value = undot_key(rest, value)
return {key: value}
# undot prefs dict keys
undot_prefs = reduce(
lambda d1, d2: {**d1, **d2}, # merge dicts
(undot_key(key, value) for key, value in prefs.items()),
)
# create a user_data_dir and add its path to the options
user_data_dir = os.path.normpath(tempfile.mkdtemp())
options.add_argument(f"--user-data-dir={user_data_dir}")
# create the preferences json file in its default directory
default_dir = os.path.join(user_data_dir, "Default")
os.mkdir(default_dir)
prefs_file = os.path.join(default_dir, "Preferences")
with open(prefs_file, encoding="latin1", mode="w") as f:
json.dump(undot_prefs, f)
# pylint: disable=protected-access
# remove the experimental_options to avoid an error
del options._experimental_options["prefs"]
def init_chrome():
"""
初始化chrome驱动
"""
ChromeHelper().init_driver()
| 8,562 | Python | .py | 222 | 27.738739 | 99 | 0.58646 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,064 | cloudflare_helper.py | demigody_nas-tools/app/helper/cloudflare_helper.py | import time
import os
from func_timeout import func_timeout, FunctionTimedOut
from pyquery import PyQuery
from selenium.common import TimeoutException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import log
ACCESS_DENIED_TITLES = [
# Cloudflare
'Access denied',
# Cloudflare http://bitturk.net/ Firefox
'Attention Required! | Cloudflare'
]
ACCESS_DENIED_SELECTORS = [
# Cloudflare
'div.cf-error-title span.cf-code-label span',
# Cloudflare http://bitturk.net/ Firefox
'#cf-error-details div.cf-error-overview h1'
]
CHALLENGE_TITLES = [
# Cloudflare
'Just a moment...',
'请稍候…',
# DDoS-GUARD
'DDOS-GUARD',
]
CHALLENGE_SELECTORS = [
# Cloudflare
'#cf-challenge-running', '.ray_id', '.attack-box', '#cf-please-wait', '#challenge-spinner', '#trk_jschal_js',
# Custom CloudFlare for EbookParadijs, Film-Paleis, MuziekFabriek and Puur-Hollands
'td.info #js_info',
# Fairlane / pararius.com
'div.vc div.text-box h2'
]
SHORT_TIMEOUT = 6
CF_TIMEOUT = int(os.getenv("NASTOOL_CF_TIMEOUT", "60"))
def resolve_challenge(driver: WebDriver, timeout=CF_TIMEOUT):
start_ts = time.time()
try:
func_timeout(timeout, _evil_logic, args=(driver,))
return True
except FunctionTimedOut:
log.error(f'Error solving the challenge. Timeout {timeout} after {round(time.time() - start_ts, 1)} seconds.')
return False
except Exception as e:
log.error('Error solving the challenge. ' + str(e))
return False
def under_challenge(html_text: str):
"""
Check if the page is under challenge
:param html_text:
:return:
"""
# get the page title
if not html_text:
return False
page_title = PyQuery(html_text)('title').text()
log.debug("under_challenge page_title=" + page_title)
for title in CHALLENGE_TITLES:
if page_title.lower() == title.lower():
return True
for selector in CHALLENGE_SELECTORS:
html_doc = PyQuery(html_text)
if html_doc(selector):
return True
return False
def _until_title_changes(driver: WebDriver, titles):
WebDriverWait(driver, SHORT_TIMEOUT).until_not(lambda x: _any_match_titles(x, titles))
def _any_match_titles(driver: WebDriver, titles):
page_title = driver.title
for title in titles:
if page_title.lower() == title.lower():
return True
return False
def _until_selectors_disappear(driver: WebDriver, selectors):
WebDriverWait(driver, SHORT_TIMEOUT).until_not(lambda x: _any_match_selectors(x, selectors))
def _any_match_selectors(driver: WebDriver, selectors):
for selector in selectors:
html_doc = PyQuery(driver.page_source)
if html_doc(selector):
return True
return False
def _evil_logic(driver: WebDriver):
driver.implicitly_wait(SHORT_TIMEOUT)
# wait for the page
html_element = driver.find_element(By.TAG_NAME, "html")
# find access denied titles
if _any_match_titles(driver, ACCESS_DENIED_TITLES):
raise Exception('Cloudflare has blocked this request. '
'Probably your IP is banned for this site, check in your web browser.')
# find access denied selectors
if _any_match_selectors(driver, ACCESS_DENIED_SELECTORS):
raise Exception('Cloudflare has blocked this request. '
'Probably your IP is banned for this site, check in your web browser.')
# find challenge by title
challenge_found = False
if _any_match_titles(driver, CHALLENGE_TITLES):
challenge_found = True
log.info("Challenge detected. Title found: " + driver.title)
if not challenge_found:
# find challenge by selectors
if _any_match_selectors(driver, CHALLENGE_SELECTORS):
challenge_found = True
log.info("Challenge detected. Selector found")
attempt = 0
if challenge_found:
while True:
try:
attempt = attempt + 1
# wait until the title changes
_until_title_changes(driver, CHALLENGE_TITLES)
# then wait until all the selectors disappear
_until_selectors_disappear(driver, CHALLENGE_SELECTORS)
# all elements not found
break
except TimeoutException:
log.debug("Timeout waiting for selector")
click_verify(driver)
# update the html (cloudflare reloads the page every 5 s)
html_element = driver.find_element(By.TAG_NAME, "html")
# waits until cloudflare redirection ends
log.debug("Waiting for redirect")
# noinspection PyBroadException
try:
WebDriverWait(driver, SHORT_TIMEOUT).until(EC.staleness_of(html_element))
except Exception:
log.debug("Timeout waiting for redirect")
log.info("Challenge solved!")
else:
log.info("Challenge not detected!")
def click_verify(driver: WebDriver):
try:
log.debug("Try to find the Cloudflare verify checkbox")
iframe = driver.find_element(By.XPATH, "//iframe[@title='Widget containing a Cloudflare security challenge']")
driver.switch_to.frame(iframe)
checkbox = driver.find_element(
by=By.XPATH,
value='//*[@id="cf-stage"]//label[@class="ctp-checkbox-label"]/input',
)
if checkbox:
actions = ActionChains(driver)
actions.move_to_element_with_offset(checkbox, 5, 7)
actions.click(checkbox)
actions.perform()
log.debug("Cloudflare verify checkbox found and clicked")
except Exception as e:
log.debug(f"Cloudflare verify checkbox not found on the page: {str(e)}")
# print(e)
finally:
driver.switch_to.default_content()
try:
log.debug("Try to find the Cloudflare 'Verify you are human' button")
button = driver.find_element(
by=By.XPATH,
value="//input[@type='button' and @value='Verify you are human']",
)
if button:
actions = ActionChains(driver)
actions.move_to_element_with_offset(button, 5, 7)
actions.click(button)
actions.perform()
log.debug("The Cloudflare 'Verify you are human' button found and clicked")
except Exception as e:
log.debug(f"The Cloudflare 'Verify you are human' button not found on the page:{str(e)}")
# print(e)
time.sleep(2)
| 6,803 | Python | .py | 170 | 32.305882 | 118 | 0.657008 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,065 | openai_helper.py | demigody_nas-tools/app/helper/openai_helper.py | import json
import openai
from app.utils import OpenAISessionCache
from app.utils.commons import singleton
from config import Config
@singleton
class OpenAiHelper:
_api_key = None
_api_url = None
def __init__(self):
self.init_config()
def init_config(self):
self._api_key = Config().get_config("openai").get("api_key")
if self._api_key:
openai.api_key = self._api_key
self._api_url = Config().get_config("openai").get("api_url")
if self._api_url:
openai.api_base = self._api_url + "/v1"
else:
proxy_conf = Config().get_proxies()
if proxy_conf and proxy_conf.get("https"):
openai.proxy = proxy_conf.get("https")
def get_state(self):
return True if self._api_key else False
@staticmethod
def __save_session(session_id, message):
"""
保存会话
:param session_id: 会话ID
:param message: 消息
:return:
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "assistant",
"content": message
})
OpenAISessionCache.set(session_id, seasion)
@staticmethod
def __get_session(session_id, message):
"""
获取会话
:param session_id: 会话ID
:return: 会话上下文
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "user",
"content": message
})
else:
seasion = [
{
"role": "system",
"content": "请在接下来的对话中请使用中文回复,并且内容尽可能详细。"
},
{
"role": "user",
"content": message
}]
OpenAISessionCache.set(session_id, seasion)
return seasion
@staticmethod
def __get_model(message,
prompt=None,
user="NAStool",
**kwargs):
"""
获取模型
"""
if not isinstance(message, list):
if prompt:
message = [
{
"role": "system",
"content": prompt
},
{
"role": "user",
"content": message
}
]
else:
message = [
{
"role": "user",
"content": message
}
]
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
user=user,
messages=message,
**kwargs
)
@staticmethod
def __clear_session(session_id):
"""
清除会话
:param session_id: 会话ID
:return:
"""
if OpenAISessionCache.get(session_id):
OpenAISessionCache.delete(session_id)
def get_media_name(self, filename):
"""
从文件名中提取媒体名称等要素
:param filename: 文件名
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_filename_prompt = "I will give you a movie/tvshow file name.You need to return a Json." \
"\nPay attention to the correct identification of the film name." \
"\n{\"title\":string,\"version\":string,\"part\":string,\"year\":string,\"resolution\":string,\"season\":number|null,\"episode\":number|null}"
completion = self.__get_model(prompt=_filename_prompt, message=filename)
result = completion.choices[0].message.content
return json.loads(result)
except Exception as e:
print(f"{str(e)}:{result}")
return {}
def get_answer(self, text, userid):
"""
获取答案
:param text: 输入文本
:param userid: 用户ID
:return:
"""
if not self.get_state():
return ""
try:
if not userid:
return "用户信息错误"
else:
userid = str(userid)
if text == "#清除":
self.__clear_session(userid)
return "会话已清除"
# 获取历史上下文
messages = self.__get_session(userid, text)
completion = self.__get_model(message=messages, user=userid)
result = completion.choices[0].message.content
if result:
self.__save_session(userid, text)
return result
except openai.error.RateLimitError as e:
return f"请求被ChatGPT拒绝了,{str(e)}"
except openai.error.APIConnectionError as e:
return "ChatGPT网络连接失败!"
except openai.error.Timeout as e:
return "没有接收到ChatGPT的返回消息!"
except Exception as e:
return f"请求ChatGPT出现错误:{str(e)}"
def translate_to_zh(self, text):
"""
翻译为中文
:param text: 输入文本
"""
if not self.get_state():
return False, None
system_prompt = "You are a translation engine that can only translate text and cannot interpret it."
user_prompt = f"translate to zh-CN:\n\n{text}"
result = ""
try:
completion = self.__get_model(prompt=system_prompt,
message=user_prompt,
temperature=0,
top_p=1,
frequency_penalty=0,
presence_penalty=0)
result = completion.choices[0].message.content.strip()
return True, result
except Exception as e:
print(f"{str(e)}:{result}")
return False, str(e)
def get_question_answer(self, question):
"""
从给定问题和选项中获取正确答案
:param question: 问题及选项
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_question_prompt = "下面我们来玩一个游戏,你是老师,我是学生,你需要回答我的问题,我会给你一个题目和几个选项,你的回复必须是给定选项中正确答案对应的序号,请直接回复数字"
completion = self.__get_model(prompt=_question_prompt, message=question)
result = completion.choices[0].message.content
return result
except Exception as e:
print(f"{str(e)}:{result}")
return {}
| 7,074 | Python | .py | 196 | 21.005102 | 173 | 0.490362 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,066 | words_helper.py | demigody_nas-tools/app/helper/words_helper.py | import regex as re
import cn2an
from app.helper.db_helper import DbHelper
from app.utils.commons import singleton
from app.utils.exception_utils import ExceptionUtils
@singleton
class WordsHelper:
dbhelper = None
# 识别词
words_info = []
def __init__(self):
self.init_config()
def init_config(self):
self.dbhelper = DbHelper()
self.words_info = self.dbhelper.get_custom_words(enabled=1)
def process(self, title):
# 错误信息
msg = []
# 应用屏蔽
used_ignored_words = []
# 应用替换
used_replaced_words = []
# 应用集偏移
used_offset_words = []
# 应用识别词
for word_info in self.words_info:
match word_info.TYPE:
case 1:
# 屏蔽
ignored = word_info.REPLACED
ignored_word = ignored
title, ignore_msg, ignore_flag = self.replace_regex(title, ignored, "") \
if word_info.REGEX else self.replace_noregex(title, ignored, "")
if ignore_flag:
used_ignored_words.append(ignored_word)
elif ignore_msg:
msg.append(f"自定义屏蔽词 {ignored_word} 设置有误:{ignore_msg}")
case 2:
# 替换
replaced, replace = word_info.REPLACED, word_info.REPLACE
replaced_word = f"{replaced} ⇒ {replace}"
title, replace_msg, replace_flag = self.replace_regex(title, replaced, replace) \
if word_info.REGEX else self.replace_noregex(title, replaced, replace)
if replace_flag:
used_replaced_words.append(replaced_word)
elif replace_msg:
msg.append(f"自定义替换词 {replaced_word} 格式有误:{replace_msg}")
case 3:
# 替换+集偏移
replaced, replace, front, back, offset = \
word_info.REPLACED, word_info.REPLACE, word_info.FRONT, word_info.BACK, word_info.OFFSET
replaced_word = f"{replaced} ⇒ {replace}"
offset_word = f"{front} + {back} >> {offset}"
replaced_offset_word = f"{replaced_word} @@@ {offset_word}"
# 记录替换前title
title_cache = title
# 替换
title, replace_msg, replace_flag = self.replace_regex(title, replaced, replace)
# 替换应用成功进行集数偏移
if replace_flag:
title, offset_msg, offset_flag = self.episode_offset(title, front, back, offset)
# 集数偏移应用成功
if offset_flag:
used_replaced_words.append(replaced_word)
used_offset_words.append(offset_word)
elif offset_msg:
# 还原title
title = title_cache
msg.append(
f"自定义替换+集偏移词 {replaced_offset_word} 集偏移部分格式有误:{offset_msg}")
elif replace_msg:
msg.append(f"自定义替换+集偏移词 {replaced_offset_word} 替换部分格式有误:{replace_msg}")
case 4:
# 集数偏移
front, back, offset = word_info.FRONT, word_info.BACK, word_info.OFFSET
offset_word = f"{front} + {back} >> {offset}"
title, offset_msg, offset_flag = self.episode_offset(title, front, back, offset)
if offset_flag:
used_offset_words.append(offset_word)
elif offset_msg:
msg.append(f"自定义集偏移词 {offset_word} 格式有误:{offset_msg}")
case _:
pass
return title, msg, {"ignored": used_ignored_words, "replaced": used_replaced_words, "offset": used_offset_words}
@staticmethod
def replace_regex(title, replaced, replace) -> (str, str, bool):
try:
if not re.findall(r'%s' % replaced, title):
return title, "", False
else:
return re.sub(r'%s' % replaced, r'%s' % replace, title), "", True
except Exception as err:
ExceptionUtils.exception_traceback(err)
return title, str(err), False
@staticmethod
def replace_noregex(title, replaced, replace) -> (str, str, bool):
try:
if title.find(replaced) == -1:
return title, "", False
else:
return title.replace(replaced, replace), "", True
except Exception as err:
ExceptionUtils.exception_traceback(err)
return title, str(err), False
@staticmethod
def episode_offset(title, front, back, offset) -> (str, str, bool):
try:
if back and not re.findall(r'%s' % back, title):
return title, "", False
if front and not re.findall(r'%s' % front, title):
return title, "", False
offset_word_info_re = re.compile(r'(?<=%s.*?)[0-9一二三四五六七八九十]+(?=.*?%s)' % (front, back))
episode_nums_str = re.findall(offset_word_info_re, title)
if not episode_nums_str:
return title, "", False
episode_nums_offset_str = []
offset_order_flag = False
for episode_num_str in episode_nums_str:
episode_num_int = int(cn2an.cn2an(episode_num_str, "smart"))
offset_caculate = offset.replace("EP", str(episode_num_int))
episode_num_offset_int = int(eval(offset_caculate))
# 向前偏移
if episode_num_int > episode_num_offset_int:
offset_order_flag = True
# 向后偏移
elif episode_num_int < episode_num_offset_int:
offset_order_flag = False
# 原值是中文数字,转换回中文数字,阿拉伯数字则还原0的填充
if not episode_num_str.isdigit():
episode_num_offset_str = cn2an.an2cn(episode_num_offset_int, "low")
else:
count_0 = re.findall(r"^0+", episode_num_str)
if count_0:
episode_num_offset_str = f"{count_0[0]}{episode_num_offset_int}"
else:
episode_num_offset_str = str(episode_num_offset_int)
episode_nums_offset_str.append(episode_num_offset_str)
episode_nums_dict = dict(zip(episode_nums_str, episode_nums_offset_str))
# 集数向前偏移,集数按升序处理
if offset_order_flag:
episode_nums_list = sorted(episode_nums_dict.items(), key=lambda x: x[1])
# 集数向后偏移,集数按降序处理
else:
episode_nums_list = sorted(episode_nums_dict.items(), key=lambda x: x[1], reverse=True)
for episode_num in episode_nums_list:
episode_offset_re = re.compile(
r'(?<=%s.*?)%s(?=.*?%s)' % (front, episode_num[0], back))
title = re.sub(episode_offset_re, r'%s' % episode_num[1], title)
return title, "", True
except Exception as err:
ExceptionUtils.exception_traceback(err)
return title, str(err), False
def is_custom_words_existed(self, replaced=None, front=None, back=None):
"""
判断自定义词是否存在
"""
return self.dbhelper.is_custom_words_existed(replaced=replaced,
front=front,
back=back)
def insert_custom_word(self, replaced, replace, front, back, offset, wtype, gid, season, enabled, regex, whelp,
note=None):
"""
插入自定义词
"""
ret = self.dbhelper.insert_custom_word(replaced=replaced,
replace=replace,
front=front,
back=back,
offset=offset,
wtype=wtype,
gid=gid,
season=season,
enabled=enabled,
regex=regex,
whelp=whelp,
note=note)
self.init_config()
return ret
def delete_custom_word(self, wid=None):
"""
删除自定义词
"""
ret = self.dbhelper.delete_custom_word(wid=wid)
self.init_config()
return ret
def get_custom_words(self, wid=None, gid=None, enabled=None):
"""
获取自定义词
"""
return self.dbhelper.get_custom_words(wid=wid, gid=gid, enabled=enabled)
def get_custom_word_groups(self, gid=None, tmdbid=None, gtype=None):
"""
获取自定义词组
"""
return self.dbhelper.get_custom_word_groups(gid=gid, tmdbid=tmdbid, gtype=gtype)
def is_custom_word_group_existed(self, tmdbid=None, gtype=None):
"""
判断自定义词组是否存在
"""
return self.dbhelper.is_custom_word_group_existed(tmdbid=tmdbid, gtype=gtype)
def insert_custom_word_groups(self, title, year, gtype, tmdbid, season_count, note=None):
"""
插入自定义词组
"""
ret = self.dbhelper.insert_custom_word_groups(title=title,
year=year,
gtype=gtype,
tmdbid=tmdbid,
season_count=season_count,
note=note)
self.init_config()
return ret
def delete_custom_word_group(self, gid):
"""
删除自定义词组
"""
ret = self.dbhelper.delete_custom_word_group(gid=gid)
self.init_config()
return ret
def check_custom_word(self, wid=None, enabled=None):
"""
检查自定义词
"""
ret = self.dbhelper.check_custom_word(wid=wid, enabled=enabled)
self.init_config()
return ret
| 10,990 | Python | .py | 226 | 29.181416 | 120 | 0.491607 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,067 | dict_helper.py | demigody_nas-tools/app/helper/dict_helper.py | from app.db import MainDb, DbPersist
from app.db.models import SYSTEMDICT
class DictHelper:
_db = MainDb()
@DbPersist(_db)
def set(self, dtype, key, value, note=""):
"""
设置字典值
:param dtype: 字典类型
:param key: 字典Key
:param value: 字典值
:param note: 备注
:return: True False
"""
if not dtype or not key:
return False
if self.exists(dtype, key):
return self._db.query(SYSTEMDICT).filter(SYSTEMDICT.TYPE == dtype,
SYSTEMDICT.KEY == key).update(
{
"VALUE": value
}
)
else:
return self._db.insert(SYSTEMDICT(
TYPE=dtype,
KEY=key,
VALUE=value,
NOTE=note
))
def get(self, dtype, key):
"""
查询字典值
:param dtype: 字典类型
:param key: 字典Key
:return: 返回字典值
"""
if not dtype or not key:
return ""
ret = self._db.query(SYSTEMDICT.VALUE).filter(SYSTEMDICT.TYPE == dtype,
SYSTEMDICT.KEY == key).first()
if ret:
return ret[0]
else:
return ""
@DbPersist(_db)
def delete(self, dtype, key):
"""
删除字典值
:param dtype: 字典类型
:param key: 字典Key
:return: True False
"""
if not dtype or not key:
return False
return self._db.query(SYSTEMDICT).filter(SYSTEMDICT.TYPE == dtype,
SYSTEMDICT.KEY == key).delete()
def exists(self, dtype, key):
"""
查询字典是否存在
:param dtype: 字典类型
:param key: 字典Key
:return: True False
"""
if not dtype or not key:
return False
ret = self._db.query(SYSTEMDICT).filter(SYSTEMDICT.TYPE == dtype,
SYSTEMDICT.KEY == key).count()
if ret > 0:
return True
else:
return False
def list(self, dtype):
"""
查询字典列表
"""
if not dtype:
return []
return self._db.query(SYSTEMDICT).filter(SYSTEMDICT.TYPE == dtype).all()
| 2,482 | Python | .py | 79 | 17.78481 | 84 | 0.464459 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,068 | indexer.py | demigody_nas-tools/app/indexer/indexer.py | import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
import log
from app.helper import ProgressHelper, SubmoduleHelper, DbHelper
from app.utils import ExceptionUtils, StringUtils
from app.utils.commons import singleton
from app.utils.types import SearchType, IndexerType, ProgressKey
from app.sites import Sites
@singleton
class Indexer(object):
_indexer_schemas = []
_client = None
_client_type = None
progress = None
dbhelper = None
def __init__(self):
self._indexer_schemas = SubmoduleHelper.import_submodules(
'app.indexer.client',
filter_func=lambda _, obj: hasattr(obj, 'client_id')
)
log.debug(f"【Indexer】加载索引器:{self._indexer_schemas}")
self.init_config()
def init_config(self):
self.progress = ProgressHelper()
self.dbhelper = DbHelper()
self._client = self.__get_client('builtin')
if self._client:
self._client_type = self._client.get_type()
def __build_class(self, ctype, conf):
for indexer_schema in self._indexer_schemas:
try:
if indexer_schema.match(ctype):
return indexer_schema(conf)
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
def get_indexers(self, check=False, public=True):
"""
获取当前索引器的索引站点
"""
if not self._client:
return []
return self._client.get_indexers(check=check, public=public)
def get_indexer(self, url):
"""
获取索引器的信息
"""
if not self._client:
return None
return self._client.get_indexer(url)
def get_indexer_dict(self, check=True, public=True, plugins=True):
"""
获取用户已经选择的索引器字典
"""
indexers_dicts = []
for indexer in self.get_indexers(check=check, public=public):
if indexer:
if plugins:
indexers_dicts.append({"id": indexer.id,
"name": indexer.name,
"domain": StringUtils.get_url_domain(indexer.domain),
"public": indexer.public})
else:
sites = Sites().get_sites(siteurl=indexer.domain)
if sites:
indexers_dicts.append({"id": indexer.id,
"name": indexer.name,
"domain": StringUtils.get_url_domain(indexer.domain),
"public": indexer.public})
return indexers_dicts
def get_indexer_hash_dict(self):
"""
获取全部的索引器Hash字典
"""
IndexerDict = {}
for item in self.get_indexers() or []:
IndexerDict[StringUtils.md5_hash(item.name)] = {
"id": item.id,
"name": item.name,
"public": item.public,
"builtin": item.builtin
}
return IndexerDict
def get_user_indexer_names(self):
"""
获取当前用户选中的索引器的索引站点名称
"""
return [indexer.name for indexer in self.get_indexers(check=True)]
def list_resources(self, url, page=0, keyword=None):
"""
获取内置索引器的资源列表
:param url: 站点URL
:param page: 页码
:param keyword: 搜索关键字
"""
return self._client.list(url=url, page=page, keyword=keyword)
def __get_client(self, ctype: [IndexerType, str], conf=None):
return self.__build_class(ctype=ctype, conf=conf)
def get_client(self):
"""
获取当前索引器
"""
return self._client
def get_client_type(self):
"""
获取当前索引器类型
"""
return self._client_type
def search_by_keyword(self,
key_word: [str, list],
filter_args: dict,
match_media=None,
in_from: SearchType = None):
"""
根据关键字调用 Index API 搜索
:param key_word: 搜索的关键字,不能为空
:param filter_args: 过滤条件,对应属性为空则不过滤,{"season":季, "episode":集, "year":年, "type":类型, "site":站点,
"":, "restype":质量, "pix":分辨率, "sp_state":促销状态, "key":其它关键字}
sp_state: 为UL DL,* 代表不关心,
:param match_media: 需要匹配的媒体信息
:param in_from: 搜索渠道
:return: 命中的资源媒体信息列表
"""
if not key_word:
return []
indexers = self.get_indexers(check=True)
"""
const filters = {
"site": search_site,
"restype": search_restype,
"pix": search_pix,
"sp_state": sp_state,
"rule": search_rule
};
"""
# FIXME: 需要根据filters里site是否None,如果不为None,需要找出指定的indexer进行搜索匹配 @[email protected]
if not indexers:
log.error("没有配置索引器,无法搜索!")
return []
# 计算耗时
start_time = datetime.datetime.now()
if filter_args and filter_args.get("site"):
log.info(f"【{self._client_type.value}】开始搜索 %s,站点:%s ..." % (key_word, filter_args.get("site")))
self.progress.update(ptype=ProgressKey.Search,
text="开始搜索 %s,站点:%s ..." % (key_word, filter_args.get("site")))
else:
log.info(f"【{self._client_type.value}】开始并行搜索 %s,线程数:%s ..." % (key_word, len(indexers)))
self.progress.update(ptype=ProgressKey.Search,
text="开始并行搜索 %s,线程数:%s ..." % (key_word, len(indexers)))
# 多线程
executor = ThreadPoolExecutor(max_workers=len(indexers))
all_task = []
var = self._client.search
for index in indexers:
order_seq = 100 - int(index.pri)
task = executor.submit(self._client.search,
order_seq,
index,
key_word,
filter_args,
match_media,
in_from)
all_task.append(task)
ret_array = []
finish_count = 0
for future in as_completed(all_task):
result = future.result()
finish_count += 1
self.progress.update(ptype=ProgressKey.Search,
value=round(100 * (finish_count / len(all_task))))
if result:
ret_array = ret_array + result
# 计算耗时
end_time = datetime.datetime.now()
log.info(f"【{self._client_type.value}】所有站点搜索完成,有效资源数:%s,总耗时 %s 秒"
% (len(ret_array), (end_time - start_time).seconds))
self.progress.update(ptype=ProgressKey.Search,
text="所有站点搜索完成,有效资源数:%s,总耗时 %s 秒"
% (len(ret_array), (end_time - start_time).seconds),
value=100)
return ret_array
def get_indexer_statistics(self):
"""
获取索引器统计信息
"""
return self.dbhelper.get_indexer_statistics()
| 7,914 | Python | .py | 186 | 25.860215 | 107 | 0.517496 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,069 | indexerConf.py | demigody_nas-tools/app/indexer/indexerConf.py | import json
class IndexerConf(object):
def __init__(self,
datas=None,
siteid=None,
cookie=None,
name=None,
rule=None,
public=None,
proxy=False,
parser=None,
ua=None,
render=None,
builtin=True,
language=None,
pri=None):
if not datas:
return None
# ID
self.id = datas.get('id')
# 站点ID
self.siteid = siteid
# 名称
self.name = datas.get('name') if not name else name
# 是否内置站点
self.builtin = datas.get('builtin')
# 域名
self.domain = datas.get('domain')
# 搜索
self.search = datas.get('search', {})
# 批量搜索,如果为空对象则表示不支持批量搜索
self.batch = self.search.get("batch", {}) if builtin else {}
# 解析器
self.parser = parser if parser is not None else datas.get('parser')
# 是否启用渲染
self.render = render if render is not None else datas.get("render")
# 浏览
self.browse = datas.get('browse', {})
# 种子过滤
self.torrents = datas.get('torrents', {})
# 分类
self.category = datas.get('category', {})
# Cookie
self.cookie = cookie
# User-Agent
self.ua = ua
# 过滤规则
self.rule = rule
# 是否公开站点
self.public = datas.get('public') if not public else public
# 是否使用代理
self.proxy = datas.get('proxy') if not proxy else proxy
# 仅支持的特定语种
self.language = language
# 索引器优先级
self.pri = pri if pri else 0
def to_dict(self):
return {
"id": self.id or "",
"siteid": self.siteid or "",
"name": self.name or "",
"builtin": self.builtin or True,
"domain": self.domain or "",
"search": self.search or "",
"batch": self.batch or {},
"parser": self.parser or "",
"render": self.render or False,
"browse": self.browse or {},
"torrents": self.torrents or {},
"category": self.category or {},
"cookie": self.cookie or "",
"ua": self.ua or "",
"rule": self.rule or "",
"public": self.public or False,
"proxy": self.proxy or "",
"pri": self.pri or 0
}
def to_dict_str(self, ensure_ascii=False, formatted=True):
if formatted:
return json.dumps(self.to_dict(), ensure_ascii=ensure_ascii, indent=4)
return json.dumps(self.to_dict(), ensure_ascii=ensure_ascii)
| 2,875 | Python | .py | 81 | 22.333333 | 82 | 0.495811 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,070 | _torrentleech.py | demigody_nas-tools/app/indexer/client/_torrentleech.py | from urllib.parse import quote
import log
from app.utils import RequestUtils, StringUtils
from config import Config
class TorrentLeech(object):
_indexer = None
_proxy = None
_size = 100
_searchurl = "%storrents/browse/list/query/%s"
_browseurl = "%storrents/browse/list/page/2%s"
_downloadurl = "%sdownload/%s/%s"
_pageurl = "%storrent/%s"
def __init__(self, indexer):
self._indexer = indexer
if indexer.proxy:
self._proxy = Config().get_proxies()
self.init_config()
def init_config(self):
self._size = Config().get_config('pt').get('site_search_result_num') or 100
def search(self, keyword, page=0):
if keyword:
url = self._searchurl % (self._indexer.domain, quote(keyword))
else:
url = self._browseurl % (self._indexer.domain, int(page) + 1)
res = RequestUtils(headers={
"Content-Type": "application/json; charset=utf-8",
"User-Agent": f"{self._indexer.ua}"
}, cookies=self._indexer.cookie, proxies=self._proxy, timeout=30).get_res(url)
torrents = []
if res and res.status_code == 200:
results = res.json().get('torrentList') or []
for result in results:
torrent = {
'indexer': self._indexer.id,
'title': result.get('name'),
'enclosure': self._downloadurl % (self._indexer.domain, result.get('fid'), result.get('filename')),
'pubdate': StringUtils.timestamp_to_date(result.get('addedTimestamp')),
'size': result.get('size'),
'seeders': result.get('seeders'),
'peers': result.get('leechers'),
'grabs': result.get('completed'),
'downloadvolumefactor': result.get('download_multiplier'),
'uploadvolumefactor': 1,
'page_url': self._pageurl % (self._indexer.domain, result.get('fid')),
'imdbid': result.get('imdbID')
}
torrents.append(torrent)
elif res is not None:
log.warn(f"【INDEXER】{self._indexer.name} 搜索失败,错误码:{res.status_code}")
return True, []
else:
log.warn(f"【INDEXER】{self._indexer.name} 搜索失败,无法连接 {self._indexer.domain}")
return True, []
return False, torrents
| 2,486 | Python | .py | 54 | 33.62963 | 119 | 0.559244 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,071 | _mt_spider.py | demigody_nas-tools/app/indexer/client/_mt_spider.py | import re
from typing import Tuple, List
from app.helper import DbHelper
from app.media.tmdbv3api.tmdb import logger
from app.sites import Sites
from app.sites.mt import MtFunc
from app.utils import RequestUtils
from app.utils.types import MediaType
from config import Config
class MTorrentSpider:
"""
mTorrent API,需要缓存ApiKey
"""
dbhelper = None
_indexerid = None
_domain = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_size = 100
_searchurl = "%sapi/torrent/search"
_downloadurl = "%sapi/torrent/genDlToken"
_pageurl = "%sdetail/%s"
# 电影分类
_movie_category = ['401', '419', '420', '421', '439', '405', '404']
_tv_category = ['403', '402', '435', '438', '404', '405']
# API KEY
_apikey = None
# 标签
_labels = {
"0": "",
"1": "DIY",
"2": "国配",
"3": "DIY 国配",
"4": "中字",
"5": "DIY 中字",
"6": "国配 中字",
"7": "DIY 国配 中字"
}
def __init__(self, indexer):
if indexer:
self._indexerid = indexer.id
self._siteid = indexer.siteid
self._domain = indexer.domain
self._searchurl = self._searchurl % self._domain
self._name = indexer.name
if indexer.proxy:
self._proxy = indexer.proxy
self._cookie = indexer.cookie
self._ua = indexer.ua or Config().get_ua()
self.dbhelper = DbHelper()
def search(self, keyword: str, mtype: MediaType = None, page: int = 0) -> Tuple[bool, List[dict]]:
"""
搜索
"""
# 查询ApiKey
site_info = Sites().get_sites(self._siteid)
self._apikey = site_info.get('apikey')
if not self._apikey:
return True, []
if not mtype:
categories = []
elif mtype == MediaType.TV:
categories = self._tv_category
else:
categories = self._movie_category
params = {
"keyword": keyword,
"categories": categories,
"pageNumber": int(page) + 1,
"pageSize": self._size,
"visible": 1
}
res = RequestUtils(headers={
"Content-Type": "application/json",
"User-Agent": f"{self._ua}",
"x-api-key": self._apikey
}, proxies=self._proxy, timeout=15, referer=f"{self._domain}browse").post_res(url=self._searchurl, json=params)
torrents = []
if res and res.status_code == 200:
if len(res.json().get('data', {})) == 0:
return True, []
results = res.json().get('data', {}).get("data") or []
for result in results:
category_value = result.get('category')
if category_value in self._tv_category \
and category_value not in self._movie_category:
category = MediaType.TV.value
elif category_value in self._movie_category:
category = MediaType.MOVIE.value
else:
category = MediaType.UNKNOWN.value
labels = self._labels.get(result.get('labels') or "0") or ""
mt = MtFunc(site_info)
torrent = {
'title': result.get('name'),
'description': result.get('smallDescr'),
'enclosure': mt.get_download_url(result.get('id')),
'pubdate': result.get('createdDate'),
'size': int(result.get('size') or '0'),
'seeders': int(result.get('status', {}).get("seeders") or '0'),
'peers': int(result.get('status', {}).get("leechers") or '0'),
'grabs': int(result.get('status', {}).get("timesCompleted") or '0'),
'downloadvolumefactor': self.__get_downloadvolumefactor(result.get('status', {}).get("discount")),
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('status', {}).get("discount")),
'page_url': self._pageurl % (self._domain, result.get('id')),
'imdbid': self.__find_imdbid(result.get('imdb')),
'labels': labels,
'category': category
}
torrents.append(torrent)
elif res is not None:
logger.warn(f"{self._name} 搜索失败,错误码:{res.status_code}")
return True, []
else:
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
return False, torrents
@staticmethod
def __find_imdbid(imdb: str) -> str:
"""
从imdb链接中提取imdbid
"""
if imdb:
m = re.search(r"tt\d+", imdb)
if m:
return m.group(0)
return ""
@staticmethod
def __get_downloadvolumefactor(discount: str) -> float:
"""
获取下载系数
"""
discount_dict = {
"FREE": 0,
"PERCENT_50": 0.5,
"PERCENT_70": 0.3,
"_2X_FREE": 0,
"_2X_PERCENT_50": 0.5
}
if discount:
return discount_dict.get(discount, 1)
return 1
@staticmethod
def __get_uploadvolumefactor(discount: str) -> float:
"""
获取上传系数
"""
uploadvolumefactor_dict = {
"_2X": 2.0,
"_2X_FREE": 2.0,
"_2X_PERCENT_50": 2.0
}
if discount:
return uploadvolumefactor_dict.get(discount, 1)
return 1
| 5,736 | Python | .py | 157 | 24.840764 | 119 | 0.5081 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,072 | _tnode.py | demigody_nas-tools/app/indexer/client/_tnode.py | import re
import log
from app.utils import RequestUtils, StringUtils
from config import Config
class TNodeSpider(object):
_indexerid = None
_domain = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_token = None
_size = 100
_searchurl = "%sapi/torrent/advancedSearch"
_downloadurl = "%sapi/torrent/download/%s"
_pageurl = "%storrent/info/%s"
def __init__(self, indexer):
if indexer:
self._indexerid = indexer.id
self._domain = indexer.domain
self._searchurl = self._searchurl % self._domain
self._name = indexer.name
if indexer.proxy:
self._proxy = Config().get_proxies()
self._cookie = indexer.cookie
self._ua = indexer.ua
self.init_config()
def init_config(self):
self._size = Config().get_config('pt').get('site_search_result_num') or 100
self.__get_token()
def __get_token(self):
if not self._domain:
return
res = RequestUtils(headers=self._ua, cookies=self._cookie, proxies=self._proxy,
timeout=15).get_res(url=self._domain)
if res and res.status_code == 200:
csrf_token = re.search(r'<meta name="x-csrf-token" content="(.+?)">', res.text)
if csrf_token:
self._token = csrf_token.group(1)
def search(self, keyword, page=0):
if not self._token:
log.warn(f"【INDEXER】{self._name} 未获取到token,无法搜索")
return True, []
params = {
"page": int(page) + 1,
"size": self._size,
"type": "title",
"keyword": keyword or "",
"sorter": "id",
"order": "desc",
"tags": [],
"category": [501, 502, 503, 504],
"medium": [],
"videoCoding": [],
"audioCoding": [],
"resolution": [],
"group": []
}
res = RequestUtils(headers={
'X-CSRF-TOKEN': self._token,
"Content-Type": "application/json; charset=utf-8",
"User-Agent": f"{self._ua}"
}, cookies=self._cookie, proxies=self._proxy, timeout=30).post_res(url=self._searchurl, json=params)
torrents = []
if res and res.status_code == 200:
results = res.json().get('data', {}).get("torrents") or []
for result in results:
torrent = {
'indexer': self._indexerid,
'title': result.get('title'),
'description': result.get('subtitle'),
'enclosure': self._downloadurl % (self._domain, result.get('id')),
'pubdate': StringUtils.timestamp_to_date(result.get('upload_time')),
'size': result.get('size'),
'seeders': result.get('seeding'),
'peers': result.get('leeching'),
'grabs': result.get('complete'),
'downloadvolumefactor': result.get('downloadRate'),
'uploadvolumefactor': result.get('uploadRate'),
'page_url': self._pageurl % (self._domain, result.get('id')),
'imdbid': result.get('imdb')
}
torrents.append(torrent)
elif res is not None:
log.warn(f"【INDEXER】{self._name} 搜索失败,错误码:{res.status_code}")
return True, []
else:
log.warn(f"【INDEXER】{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
return False, torrents
| 3,692 | Python | .py | 90 | 28.422222 | 108 | 0.515727 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,073 | builtin.py | demigody_nas-tools/app/indexer/client/builtin.py | import copy
import datetime
import time
import log
from app.conf import SystemConfig
from app.helper import ProgressHelper, ChromeHelper, DbHelper
from app.indexer.client._base import _IIndexClient
from app.indexer.client._mt_spider import MTorrentSpider
from app.indexer.client._render_spider import RenderSpider
from app.indexer.client._spider import TorrentSpider
from app.indexer.client._tnode import TNodeSpider
from app.indexer.client._torrentleech import TorrentLeech
from app.indexer.client._plugins import PluginsSpider
from app.sites import Sites
from app.utils import StringUtils
from app.utils.types import SearchType, IndexerType, ProgressKey, SystemConfigKey
from config import Config
from web.backend.pro_user import ProUser
class BuiltinIndexer(_IIndexClient):
# 索引器ID
client_id = "builtin"
# 索引器类型
client_type = IndexerType.BUILTIN
# 索引器名称
client_name = IndexerType.BUILTIN.value
# 私有属性
_client_config = {}
_show_more_sites = False
progress = None
sites = None
dbhelper = None
user = None
chromehelper = None
systemconfig = None
def __init__(self, config=None):
super().__init__()
self._client_config = config or {}
self.init_config()
def init_config(self):
self.sites = Sites()
self.progress = ProgressHelper()
self.dbhelper = DbHelper()
self.user = ProUser()
self.chromehelper = ChromeHelper()
self.systemconfig = SystemConfig()
self._show_more_sites = Config().get_config("laboratory").get('show_more_sites')
@classmethod
def match(cls, ctype):
return True if ctype in [cls.client_id, cls.client_type, cls.client_name] else False
def get_type(self):
return self.client_type
def get_status(self):
"""
检查连通性
:return: True、False
"""
return True
def get_indexer(self, url):
"""
获取单个索引器配置
"""
# 检查浏览器状态
chrome_ok = self.chromehelper.get_status()
site = self.sites.get_sites(siteurl=url)
if site:
return self.user.get_indexer(url=url,
siteid=site.get("id"),
cookie=site.get("cookie"),
ua=site.get("ua"),
name=site.get("name"),
rule=site.get("rule"),
pri=site.get('pri'),
public=False,
proxy=site.get("proxy"),
render=False if not chrome_ok else site.get("chrome"))
return None
def get_indexers(self, check=True, public=True):
ret_indexers = []
_indexer_domains = []
# 选中站点配置
indexer_sites = self.systemconfig.get(SystemConfigKey.UserIndexerSites) or []
# 检查浏览器状态
chrome_ok = self.chromehelper.get_status()
# 私有站点
for site in self.sites.get_sites():
url = site.get("signurl") or site.get("rssurl")
cookie = site.get("cookie")
if not url or not cookie:
continue
render = False if not chrome_ok else site.get("chrome")
indexer = self.user.get_indexer(url=url,
siteid=site.get("id"),
cookie=cookie,
ua=site.get("ua"),
name=site.get("name"),
rule=site.get("rule"),
pri=site.get('pri'),
public=False,
proxy=site.get("proxy"),
render=render)
if indexer:
if check and (not indexer_sites or indexer.id not in indexer_sites):
continue
if indexer.domain not in _indexer_domains:
_indexer_domains.append(indexer.domain)
indexer.name = site.get("name")
ret_indexers.append(indexer)
# 公开站点
if public and self._show_more_sites:
for site_url in self.user.get_public_sites():
indexer = self.user.get_indexer(url=site_url)
if indexer:
if check and (not indexer_sites or indexer.id not in indexer_sites):
continue
if indexer.domain not in _indexer_domains:
_indexer_domains.append(indexer.domain)
ret_indexers.append(indexer)
# 获取插件站点
if PluginsSpider().sites():
for indexer in PluginsSpider().sites():
if indexer:
if check and (not indexer_sites or indexer.id not in indexer_sites):
continue
if indexer.domain not in _indexer_domains:
_indexer_domains.append(indexer.domain)
ret_indexers.append(indexer)
return ret_indexers
def search(self, order_seq,
indexer,
key_word,
filter_args: dict,
match_media,
in_from: SearchType):
"""
根据关键字多线程搜索
"""
if not indexer or not key_word:
return None
# 站点流控
if self.sites.check_ratelimit(indexer.siteid):
self.progress.update(ptype=ProgressKey.Search, text=f"{indexer.name} 触发站点流控,跳过 ...")
return []
# fix 共用同一个dict时会导致某个站点的更新全局全效
if filter_args is None:
_filter_args = {}
else:
_filter_args = copy.deepcopy(filter_args)
# 不在设定搜索范围的站点过滤掉
if _filter_args.get("site") and indexer.name not in _filter_args.get("site"):
return []
# 搜索条件没有过滤规则时,使用站点的过滤规则
if not _filter_args.get("rule") and indexer.rule:
_filter_args.update({"rule": indexer.rule})
# 计算耗时
start_time = datetime.datetime.now()
log.info(f"【{self.client_name}】开始搜索Indexer:{indexer.name} ...")
# 特殊符号处理
search_word = StringUtils.handler_special_chars(text=key_word,
replace_word=" ",
allow_space=True)
# 避免对英文站搜索中文
if indexer.language == "en" and StringUtils.is_chinese(search_word):
log.warn(f"【{self.client_name}】{indexer.name} 无法使用中文名搜索")
return []
# 开始索引
result_array = []
try:
if indexer.parser == "TNodeSpider":
error_flag, result_array = TNodeSpider(indexer).search(keyword=search_word)
elif indexer.parser == "RenderSpider":
error_flag, result_array = RenderSpider(indexer).search(
keyword=search_word,
mtype=match_media.type if match_media and match_media.tmdb_info else None)
elif indexer.parser == "TorrentLeech":
error_flag, result_array = TorrentLeech(indexer).search(keyword=search_word)
elif 'm-team' in indexer.domain:
error_flag, result_array = MTorrentSpider(indexer).search(keyword=search_word)
else:
if PluginsSpider().status(indexer=indexer):
error_flag, result_array = PluginsSpider().search(keyword=search_word, indexer=indexer)
else:
error_flag, result_array = self.__spider_search(
keyword=search_word,
indexer=indexer,
mtype=match_media.type if match_media and match_media.tmdb_info else None)
except Exception as err:
error_flag = True
print(str(err))
# 索引花费的时间
seconds = round((datetime.datetime.now() - start_time).seconds, 1)
# 索引统计
self.dbhelper.insert_indexer_statistics(indexer=indexer.name,
itype=self.client_id,
seconds=seconds,
result='N' if error_flag else 'Y')
# 返回结果
if len(result_array) == 0:
log.warn(f"【{self.client_name}】{indexer.name} 未搜索到数据")
# 更新进度
self.progress.update(ptype=ProgressKey.Search, text=f"{indexer.name} 未搜索到数据")
return []
else:
log.warn(f"【{self.client_name}】{indexer.name} 返回数据:{len(result_array)}")
# 更新进度
self.progress.update(ptype=ProgressKey.Search, text=f"{indexer.name} 返回 {len(result_array)} 条数据")
# 过滤
return self.filter_search_results(result_array=result_array,
order_seq=order_seq,
indexer=indexer,
filter_args=_filter_args,
match_media=match_media,
start_time=start_time)
def list(self, url, page=0, keyword=None):
"""
根据站点ID搜索站点首页资源
"""
if not url:
return []
indexer = self.get_indexer(url)
if not indexer:
return []
# 计算耗时
start_time = datetime.datetime.now()
if indexer.parser == "RenderSpider":
error_flag, result_array = RenderSpider(indexer).search(keyword=keyword,
page=page)
elif indexer.parser == "TNodeSpider":
error_flag, result_array = TNodeSpider(indexer).search(keyword=keyword,
page=page)
elif indexer.parser == "TorrentLeech":
error_flag, result_array = TorrentLeech(indexer).search(keyword=keyword,
page=page)
elif 'm-team' in indexer.domain:
error_flag, result_array = MTorrentSpider(indexer).search(keyword=keyword,
page=page)
else:
if PluginsSpider().status(indexer=indexer):
error_flag, result_array = PluginsSpider().search(keyword=keyword,
indexer=indexer,
page=page)
else:
error_flag, result_array = self.__spider_search(indexer=indexer,
page=page,
keyword=keyword)
# 索引花费的时间
seconds = round((datetime.datetime.now() - start_time).seconds, 1)
# 索引统计
self.dbhelper.insert_indexer_statistics(indexer=indexer.name,
itype=self.client_id,
seconds=seconds,
result='N' if error_flag else 'Y')
return result_array
@staticmethod
def __spider_search(indexer, keyword=None, page=None, mtype=None, timeout=30):
"""
根据关键字搜索单个站点
:param: indexer: 站点配置
:param: keyword: 关键字
:param: page: 页码
:param: mtype: 媒体类型
:param: timeout: 超时时间
:return: 是否发生错误, 种子列表
"""
spider = TorrentSpider()
spider.setparam(indexer=indexer,
keyword=keyword,
page=page,
mtype=mtype)
spider.start()
# 循环判断是否获取到数据
sleep_count = 0
while not spider.is_complete:
sleep_count += 1
time.sleep(1)
if sleep_count > timeout:
break
# 是否发生错误
result_flag = spider.is_error
# 种子列表
result_array = spider.torrents_info_array.copy()
# 重置状态
spider.torrents_info_array.clear()
return result_flag, result_array
| 13,103 | Python | .py | 283 | 27.756184 | 109 | 0.50741 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,074 | _spider.py | demigody_nas-tools/app/indexer/client/_spider.py | import copy
import datetime
import re
from urllib.parse import quote
from jinja2 import Template
from pyquery import PyQuery
import feapder
import log
from app.helper import RedisHelper
from app.utils import StringUtils, SystemUtils, RequestUtils
from app.utils.exception_utils import ExceptionUtils
from app.utils.types import MediaType
from config import Config
from feapder.utils.tools import urlencode
class TorrentSpider(feapder.AirSpider):
_webdriver_path = SystemUtils.get_webdriver_path()
_redis_valid = RedisHelper.is_valid()
__custom_setting__ = dict(
SPIDER_THREAD_COUNT=1,
SPIDER_MAX_RETRY_TIMES=0,
REQUEST_LOST_TIMEOUT=10,
RETRY_FAILED_REQUESTS=False,
LOG_LEVEL="ERROR",
RANDOM_HEADERS=False,
REDISDB_IP_PORTS="127.0.0.1:6379",
REDISDB_USER_PASS="",
REDISDB_DB=0,
RESPONSE_CACHED_ENABLE=_redis_valid,
RESPONSE_CACHED_EXPIRE_TIME=300,
RESPONSE_CACHED_USED=_redis_valid,
WEBDRIVER=dict(
pool_size=1,
load_images=False,
proxy=None,
headless=True,
driver_type="CHROME",
timeout=20,
window_size=(1024, 800),
render_time=10,
custom_argument=["--ignore-certificate-errors"],
)
)
# 是否搜索完成标志
is_complete = False
# 是否出现错误
is_error = False
# 索引器ID
indexerid = None
# 索引器名称
indexername = None
# 站点域名
domain = None
# 站点Cookie
cookie = None
# 站点UA
ua = None
# 代理
proxies = None
# 是否渲染
render = False
# Referer
referer = None
# 搜索关键字
keyword = None
# 媒体类型
mtype = None
# 搜索路径、方式配置
search = {}
# 批量搜索配置
batch = {}
# 浏览配置
browse = {}
# 站点分类配置
category = {}
# 站点种子列表配置
list = {}
# 站点种子字段配置
fields = {}
# 页码
page = 0
# 搜索条数
result_num = 100
# 单个种子信息
torrents_info = {}
# 种子列表
torrents_info_array = []
def setparam(self, indexer,
keyword: [str, list] = None,
page=None,
referer=None,
mtype: MediaType = None):
"""
设置查询参数
:param indexer: 索引器
:param keyword: 搜索关键字,如果数组则为批量搜索
:param page: 页码
:param referer: Referer
:param mtype: 媒体类型
"""
if not indexer:
return
self.keyword = keyword
self.mtype = mtype
self.indexerid = indexer.id
self.indexername = indexer.name
self.search = indexer.search
self.batch = indexer.batch
self.browse = indexer.browse
self.category = indexer.category
self.list = indexer.torrents.get('list', {})
self.fields = indexer.torrents.get('fields')
self.render = indexer.render
self.domain = indexer.domain
self.page = page
if self.domain and not str(self.domain).endswith("/"):
self.domain = self.domain + "/"
if indexer.ua:
self.ua = indexer.ua
else:
self.ua = Config().get_ua()
if indexer.proxy:
self.proxies = Config().get_proxies()
if indexer.cookie:
self.cookie = indexer.cookie
if referer:
self.referer = referer
self.result_num = Config().get_config('pt').get('site_search_result_num') or 100
self.torrents_info_array = []
def start_requests(self):
"""
开始请求
"""
if not self.search or not self.domain:
self.is_complete = True
return
# 种子搜索相对路径
paths = self.search.get('paths', [])
torrentspath = ""
if len(paths) == 1:
torrentspath = paths[0].get('path', '')
else:
for path in paths:
if path.get("type") == "all" and not self.mtype:
torrentspath = path.get('path')
break
elif path.get("type") == "movie" and self.mtype == MediaType.MOVIE:
torrentspath = path.get('path')
break
elif path.get("type") == "tv" and self.mtype == MediaType.TV:
torrentspath = path.get('path')
break
elif path.get("type") == "anime" and self.mtype == MediaType.ANIME:
torrentspath = path.get('path')
break
# 关键字搜索
if self.keyword:
if isinstance(self.keyword, list):
# 批量查询
if self.batch:
delimiter = self.batch.get('delimiter') or ' '
space_replace = self.batch.get('space_replace') or ' '
search_word = delimiter.join([str(k).replace(' ', space_replace) for k in self.keyword])
else:
search_word = " ".join(self.keyword)
# 查询模式:或
search_mode = "1"
else:
# 单个查询
search_word = self.keyword
# 查询模式与
search_mode = "0"
# 搜索URL
if self.search.get("params"):
# 变量字典
inputs_dict = {
"keyword": search_word
}
# 查询参数
params = {
"search_mode": search_mode,
"page": self.page or 0,
"notnewword": 1
}
# 额外参数
for key, value in self.search.get("params").items():
params.update({
"%s" % key: str(value).format(**inputs_dict)
})
# 分类条件
if self.category:
if self.mtype == MediaType.MOVIE:
cats = self.category.get("movie") or []
elif self.mtype:
cats = self.category.get("tv") or []
else:
cats = (self.category.get("movie") or []) + (self.category.get("tv") or [])
for cat in cats:
if self.category.get("field"):
value = params.get(self.category.get("field"), "")
params.update({
"%s" % self.category.get("field"): value + self.category.get("delimiter",
' ') + cat.get("id")
})
else:
params.update({
"cat%s" % cat.get("id"): 1
})
searchurl = self.domain + torrentspath + "?" + urlencode(params)
else:
# 变量字典
inputs_dict = {
"keyword": quote(search_word),
"page": self.page or 0
}
# 无额外参数
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
# 列表浏览
else:
# 变量字典
inputs_dict = {
"page": self.page or 0,
"keyword": ""
}
# 有单独浏览路径
if self.browse:
torrentspath = self.browse.get("path")
if self.browse.get("start"):
start_page = int(self.browse.get("start")) + int(self.page or 0)
inputs_dict.update({
"page": start_page
})
elif self.page:
torrentspath = torrentspath + f"?page={self.page}"
# 搜索Url
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
log.info(f"【Spider】开始请求:{searchurl}")
yield feapder.Request(url=searchurl,
use_session=True,
render=False)
def download_midware(self, request):
request.headers = {
"User-Agent": self.ua
}
request.cookies = RequestUtils.cookie_parse(self.cookie)
if self.proxies:
request.proxies = self.proxies
return request
def Gettitle_default(self, torrent):
# title default
if 'title' not in self.fields:
return
selector = self.fields.get('title', {})
if 'selector' in selector:
title = torrent(selector.get('selector', '')).clone()
self.__remove(title, selector)
items = self.__attribute_or_text(title, selector)
self.torrents_info['title'] = self.__index(items, selector)
elif 'text' in selector:
render_dict = {}
if "title_default" in self.fields:
title_default_selector = self.fields.get('title_default', {})
title_default_item = torrent(title_default_selector.get('selector', '')).clone()
self.__remove(title_default_item, title_default_selector)
items = self.__attribute_or_text(title_default_item, selector)
title_default = self.__index(items, title_default_selector)
render_dict.update({'title_default': title_default})
if "title_optional" in self.fields:
title_optional_selector = self.fields.get('title_optional', {})
title_optional_item = torrent(title_optional_selector.get('selector', '')).clone()
self.__remove(title_optional_item, title_optional_selector)
items = self.__attribute_or_text(title_optional_item, title_optional_selector)
title_optional = self.__index(items, title_optional_selector)
render_dict.update({'title_optional': title_optional})
self.torrents_info['title'] = Template(selector.get('text')).render(fields=render_dict)
self.torrents_info['title'] = self.__filter_text(self.torrents_info.get('title'),
selector.get('filters'))
def Gettitle_optional(self, torrent):
# title optional
if 'description' not in self.fields:
return
selector = self.fields.get('description', {})
if "selector" in selector \
or "selectors" in selector:
description = torrent(selector.get('selector', selector.get('selectors', ''))).clone()
if description:
self.__remove(description, selector)
items = self.__attribute_or_text(description, selector)
self.torrents_info['description'] = self.__index(items, selector)
elif "text" in selector:
render_dict = {}
if "tags" in self.fields:
tags_selector = self.fields.get('tags', {})
tags_item = torrent(tags_selector.get('selector', '')).clone()
self.__remove(tags_item, tags_selector)
items = self.__attribute_or_text(tags_item, tags_selector)
tag = self.__index(items, tags_selector)
render_dict.update({'tags': tag})
if "subject" in self.fields:
subject_selector = self.fields.get('subject', {})
subject_item = torrent(subject_selector.get('selector', '')).clone()
self.__remove(subject_item, subject_selector)
items = self.__attribute_or_text(subject_item, subject_selector)
subject = self.__index(items, subject_selector)
render_dict.update({'subject': subject})
if "description_free_forever" in self.fields:
description_free_forever_selector = self.fields.get("description_free_forever", {})
description_free_forever_item = torrent(description_free_forever_selector.get("selector", '')).clone()
self.__remove(description_free_forever_item, description_free_forever_selector)
items = self.__attribute_or_text(description_free_forever_item, description_free_forever_selector)
description_free_forever = self.__index(items, description_free_forever_selector)
render_dict.update({"description_free_forever": description_free_forever})
if "description_normal" in self.fields:
description_normal_selector = self.fields.get("description_normal", {})
description_normal_item = torrent(description_normal_selector.get("selector", '')).clone()
self.__remove(description_normal_item, description_normal_selector)
items = self.__attribute_or_text(description_normal_item, description_normal_selector)
description_normal = self.__index(items, description_normal_selector)
render_dict.update({"description_normal": description_normal})
self.torrents_info['description'] = Template(selector.get('text')).render(fields=render_dict)
self.torrents_info['description'] = self.__filter_text(self.torrents_info.get('description'),
selector.get('filters'))
def Getdetails(self, torrent):
# details
if 'details' not in self.fields:
return
selector = self.fields.get('details', {})
details = torrent(selector.get('selector', '')).clone()
self.__remove(details, selector)
items = self.__attribute_or_text(details, selector)
item = self.__index(items, selector)
detail_link = self.__filter_text(item, selector.get('filters'))
if detail_link:
if not detail_link.startswith("http"):
if detail_link.startswith("//"):
self.torrents_info['page_url'] = self.domain.split(":")[0] + ":" + detail_link
elif detail_link.startswith("/"):
self.torrents_info['page_url'] = self.domain + detail_link[1:]
else:
self.torrents_info['page_url'] = self.domain + detail_link
else:
self.torrents_info['page_url'] = detail_link
def Getdownload(self, torrent):
# download link
if 'download' not in self.fields:
return
selector = self.fields.get('download', {})
download = torrent(selector.get('selector', '')).clone()
self.__remove(download, selector)
items = self.__attribute_or_text(download, selector)
item = self.__index(items, selector)
download_link = self.__filter_text(item, selector.get('filters'))
if download_link:
if not download_link.startswith("http") and not download_link.startswith("magnet"):
self.torrents_info['enclosure'] = self.domain + download_link[1:] if download_link.startswith(
"/") else self.domain + download_link
else:
self.torrents_info['enclosure'] = download_link
def Getimdbid(self, torrent):
# imdbid
if "imdbid" not in self.fields:
return
selector = self.fields.get('imdbid', {})
imdbid = torrent(selector.get('selector', '')).clone()
self.__remove(imdbid, selector)
items = self.__attribute_or_text(imdbid, selector)
item = self.__index(items, selector)
self.torrents_info['imdbid'] = item
self.torrents_info['imdbid'] = self.__filter_text(self.torrents_info.get('imdbid'),
selector.get('filters'))
def Getsize(self, torrent):
# torrent size
if 'size' not in self.fields:
return
selector = self.fields.get('size', {})
size = torrent(selector.get('selector', selector.get("selectors", ''))).clone()
self.__remove(size, selector)
items = self.__attribute_or_text(size, selector)
item = self.__index(items, selector)
if item:
self.torrents_info['size'] = StringUtils.num_filesize(item.replace("\n", "").strip())
self.torrents_info['size'] = self.__filter_text(self.torrents_info.get('size'),
selector.get('filters'))
self.torrents_info['size'] = StringUtils.num_filesize(self.torrents_info.get('size'))
def Getleechers(self, torrent):
# torrent leechers
if 'leechers' not in self.fields:
return
selector = self.fields.get('leechers', {})
leechers = torrent(selector.get('selector', '')).clone()
self.__remove(leechers, selector)
items = self.__attribute_or_text(leechers, selector)
item = self.__index(items, selector)
if item:
self.torrents_info['peers'] = item.split("/")[0]
self.torrents_info['peers'] = self.__filter_text(self.torrents_info.get('peers'),
selector.get('filters'))
else:
self.torrents_info['peers'] = 0
def Getseeders(self, torrent):
# torrent leechers
if 'seeders' not in self.fields:
return
selector = self.fields.get('seeders', {})
seeders = torrent(selector.get('selector', '')).clone()
self.__remove(seeders, selector)
items = self.__attribute_or_text(seeders, selector)
item = self.__index(items, selector)
if item:
self.torrents_info['seeders'] = item.split("/")[0]
self.torrents_info['seeders'] = self.__filter_text(self.torrents_info.get('seeders'),
selector.get('filters'))
else:
self.torrents_info['seeders'] = 0
def Getgrabs(self, torrent):
# torrent grabs
if 'grabs' not in self.fields:
return
selector = self.fields.get('grabs', {})
grabs = torrent(selector.get('selector', '')).clone()
self.__remove(grabs, selector)
items = self.__attribute_or_text(grabs, selector)
item = self.__index(items, selector)
if item:
self.torrents_info['grabs'] = item.split("/")[0]
self.torrents_info['grabs'] = self.__filter_text(self.torrents_info.get('grabs'),
selector.get('filters'))
else:
self.torrents_info['grabs'] = 0
def Getpubdate(self, torrent):
# torrent pubdate
if 'date_added' not in self.fields:
return
selector = self.fields.get('date_added', {})
pubdate = torrent(selector.get('selector', '')).clone()
self.__remove(pubdate, selector)
items = self.__attribute_or_text(pubdate, selector)
self.torrents_info['pubdate'] = self.__index(items, selector)
self.torrents_info['pubdate'] = self.__filter_text(self.torrents_info.get('pubdate'),
selector.get('filters'))
def Getelapsed_date(self, torrent):
# torrent pubdate
if 'date_elapsed' not in self.fields:
return
selector = self.fields.get('date_elapsed', {})
date_elapsed = torrent(selector.get('selector', '')).clone()
self.__remove(date_elapsed, selector)
items = self.__attribute_or_text(date_elapsed, selector)
self.torrents_info['date_elapsed'] = self.__index(items, selector)
self.torrents_info['date_elapsed'] = self.__filter_text(self.torrents_info.get('date_elapsed'),
selector.get('filters'))
def Getdownloadvolumefactor(self, torrent):
# downloadvolumefactor
selector = self.fields.get('downloadvolumefactor', {})
if not selector:
return
self.torrents_info['downloadvolumefactor'] = 1
if 'case' in selector:
for downloadvolumefactorselector in list(selector.get('case', {}).keys()):
downloadvolumefactor = torrent(downloadvolumefactorselector)
if len(downloadvolumefactor) > 0:
self.torrents_info['downloadvolumefactor'] = selector.get('case', {}).get(
downloadvolumefactorselector)
break
elif "selector" in selector:
downloadvolume = torrent(selector.get('selector', '')).clone()
self.__remove(downloadvolume, selector)
items = self.__attribute_or_text(downloadvolume, selector)
item = self.__index(items, selector)
if item:
downloadvolumefactor = re.search(r'(\d+\.?\d*)', item)
if downloadvolumefactor:
self.torrents_info['downloadvolumefactor'] = int(downloadvolumefactor.group(1))
def Getuploadvolumefactor(self, torrent):
# uploadvolumefactor
selector = self.fields.get('uploadvolumefactor', {})
if not selector:
return
self.torrents_info['uploadvolumefactor'] = 1
if 'case' in selector:
for uploadvolumefactorselector in list(selector.get('case', {}).keys()):
uploadvolumefactor = torrent(uploadvolumefactorselector)
if len(uploadvolumefactor) > 0:
self.torrents_info['uploadvolumefactor'] = selector.get('case', {}).get(
uploadvolumefactorselector)
break
elif "selector" in selector:
uploadvolume = torrent(selector.get('selector', '')).clone()
self.__remove(uploadvolume, selector)
items = self.__attribute_or_text(uploadvolume, selector)
item = self.__index(items, selector)
if item:
uploadvolumefactor = re.search(r'(\d+\.?\d*)', item)
if uploadvolumefactor:
self.torrents_info['uploadvolumefactor'] = int(uploadvolumefactor.group(1))
def Getlabels(self, torrent):
# labels
if 'labels' not in self.fields:
return
selector = self.fields.get('labels', {})
labels = torrent(selector.get("selector", "")).clone()
self.__remove(labels, selector)
items = self.__attribute_or_text(labels, selector)
if items:
self.torrents_info['labels'] = "|".join(items)
def Getinfo(self, torrent):
"""
解析单条种子数据
"""
self.torrents_info = {'indexer': self.indexerid}
try:
self.Gettitle_default(torrent)
self.Gettitle_optional(torrent)
self.Getdetails(torrent)
self.Getdownload(torrent)
self.Getgrabs(torrent)
self.Getleechers(torrent)
self.Getseeders(torrent)
self.Getsize(torrent)
self.Getimdbid(torrent)
self.Getdownloadvolumefactor(torrent)
self.Getuploadvolumefactor(torrent)
self.Getpubdate(torrent)
self.Getelapsed_date(torrent)
self.Getlabels(torrent)
except Exception as err:
ExceptionUtils.exception_traceback(err)
log.error("【Spider】%s 搜索出现错误:%s" % (self.indexername, str(err)))
return self.torrents_info
@staticmethod
def __filter_text(text, filters):
"""
对文件进行处理
"""
if not text or not filters or not isinstance(filters, list):
return text
if not isinstance(text, str):
text = str(text)
for filter_item in filters:
if not text:
break
try:
method_name = filter_item.get("name")
args = filter_item.get("args")
if method_name == "re_search" and isinstance(args, list):
text = re.search(r"%s" % args[0], text).group(args[-1])
elif method_name == "split" and isinstance(args, list):
text = text.split(r"%s" % args[0])[args[-1]]
elif method_name == "replace" and isinstance(args, list):
text = text.replace(r"%s" % args[0], r"%s" % args[-1])
elif method_name == "dateparse" and isinstance(args, str):
text = datetime.datetime.strptime(text, r"%s" % args)
elif method_name == "strip":
text = text.strip()
elif method_name == "appendleft":
text = f"{args}{text}"
except Exception as err:
ExceptionUtils.exception_traceback(err)
return text.strip()
@staticmethod
def __remove(item, selector):
"""
移除元素
"""
if selector and "remove" in selector:
removelist = selector.get('remove', '').split(', ')
for v in removelist:
item.remove(v)
@staticmethod
def __attribute_or_text(item, selector):
if not selector:
return item
if not item:
return []
if 'attribute' in selector:
items = [i.attr(selector.get('attribute')) for i in item.items() if i]
else:
items = [i.text() for i in item.items() if i]
return items
@staticmethod
def __index(items, selector):
if not selector:
return items
if not items:
return items
if "contents" in selector \
and len(items) > int(selector.get("contents")):
items = items[0].split("\n")[selector.get("contents")]
elif "index" in selector \
and len(items) > int(selector.get("index")):
items = items[int(selector.get("index"))]
elif isinstance(items, list):
items = items[0]
return items
def clean_all_sites_free(self, html):
# 匹配字符串 "全站 [Free] 生效中",不区分大小写
pattern = re.compile(r'<h1.*?>.*?全站\s+\[Free\]\s+生效中.*?</h1>', re.IGNORECASE)
# 使用 re.sub 进行替换
cleaned_html = re.sub(pattern, '', html)
return cleaned_html
def parse(self, request, response):
"""
解析整个页面
"""
try:
# 获取站点文本
html_text = response.extract()
html_text = self.clean_all_sites_free(html_text)
if not html_text:
self.is_error = True
self.is_complete = True
return
# 解析站点文本对象
html_doc = PyQuery(html_text)
# 种子筛选器
torrents_selector = self.list.get('selector', '')
# 遍历种子html列表
for torn in html_doc(torrents_selector):
self.torrents_info_array.append(copy.deepcopy(self.Getinfo(PyQuery(torn))))
if len(self.torrents_info_array) >= int(self.result_num):
break
except Exception as err:
self.is_error = True
ExceptionUtils.exception_traceback(err)
log.warn(f"【Spider】错误:{self.indexername} {str(err)}")
finally:
self.is_complete = True
| 27,794 | Python | .py | 626 | 30.439297 | 118 | 0.54184 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,075 | _plugins.py | demigody_nas-tools/app/indexer/client/_plugins.py |
from app.plugins import PluginManager
from config import Config
class PluginsSpider(object):
# 私有方法
_level = 99
_plugin = {}
_proxy = None
_indexer = None
def __int__(self, indexer):
self._indexer = indexer
if indexer.proxy:
self._proxy = Config().get_proxies()
self._plugin = PluginManager().get_plugin_apps(self._level).get(self._indexer.parser)
def status(self, indexer):
try:
plugin = PluginManager().get_plugin_apps(self._level).get(indexer.parser)
return True if plugin else False
except Exception as e:
return False
def search(self, keyword, indexer, page=0):
try:
result_array = PluginManager().run_plugin_method(pid=indexer.parser, method='search', keyword=keyword, indexer=indexer, page=page)
if not result_array:
return False, []
return True, result_array
except Exception as e:
return False, []
def sites(self):
result = []
try:
plugins = PluginManager().get_plugin_apps(self._level)
for key in plugins:
if plugins.get(key)['installed']:
result_array = PluginManager().run_plugin_method(pid=plugins.get(key)['id'], method='get_indexers')
if result_array:
result.extend(result_array)
except Exception as e:
pass
return result | 1,503 | Python | .py | 39 | 28.153846 | 142 | 0.587586 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,076 | _render_spider.py | demigody_nas-tools/app/indexer/client/_render_spider.py | # coding: utf-8
import copy
import time
from urllib.parse import quote
from pyquery import PyQuery
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as es
from selenium.webdriver.support.wait import WebDriverWait
from app.helper import ChromeHelper
from app.indexer.client._spider import TorrentSpider
from app.utils import ExceptionUtils
from config import Config
class RenderSpider(object):
torrentspider = None
torrents_info_array = []
result_num = 100
def __init__(self, indexer):
self.torrentspider = TorrentSpider()
self._indexer = indexer
self.init_config()
def init_config(self):
self.torrents_info_array = []
self.result_num = Config().get_config('pt').get('site_search_result_num') or 100
def search(self, keyword, page=None, mtype=None):
"""
开始搜索
:param: keyword: 搜索关键字
:param: indexer: 站点配置
:param: page: 页码
:param: mtype: 类型
:return: (是否发生错误,种子列表)
"""
if not keyword:
keyword = ""
if isinstance(keyword, list):
keyword = " ".join(keyword)
chrome = ChromeHelper()
if not chrome.get_status():
return True, []
# 请求路径
torrentspath = self._indexer.search.get('paths', [{}])[0].get('path', '') or ''
search_url = self._indexer.domain + torrentspath.replace("{keyword}", quote(keyword))
# 请求方式,支持GET和浏览仿真
method = self._indexer.search.get('paths', [{}])[0].get('method', '')
if method == "chrome":
# 请求参数
params = self._indexer.search.get('paths', [{}])[0].get('params', {})
# 搜索框
search_input = params.get('keyword')
# 搜索按钮
search_button = params.get('submit')
# 预执行脚本
pre_script = params.get('script')
# referer
if params.get('referer'):
referer = self._indexer.domain + params.get('referer').replace('{keyword}', quote(keyword))
else:
referer = self._indexer.domain
if not search_input or not search_button:
return True, []
# 使用浏览器打开页面
if not chrome.visit(url=search_url,
cookie=self._indexer.cookie,
ua=self._indexer.ua,
proxy=self._indexer.proxy):
return True, []
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
return True, []
# 模拟搜索操作
try:
# 执行脚本
if pre_script:
chrome.execute_script(pre_script)
# 等待可点击
submit_obj = WebDriverWait(driver=chrome.browser,
timeout=10).until(es.element_to_be_clickable((By.XPATH,
search_button)))
if submit_obj:
# 输入用户名
chrome.browser.find_element(By.XPATH, search_input).send_keys(keyword)
# 提交搜索
submit_obj.click()
else:
return True, []
except Exception as e:
ExceptionUtils.exception_traceback(e)
return True, []
else:
# referer
referer = self._indexer.domain
# 使用浏览器获取HTML文本
if not chrome.visit(url=search_url,
cookie=self._indexer.cookie,
ua=self._indexer.ua,
proxy=self._indexer.proxy):
return True, []
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
return True, []
# 等待页面加载完成
time.sleep(5)
# 获取HTML文本
html_text = chrome.get_html()
if not html_text:
return True, []
# 重新获取Cookie和UA
self._indexer.cookie = chrome.get_cookies()
self._indexer.ua = chrome.get_ua()
# 设置抓虫参数
self.torrentspider.setparam(keyword=keyword,
indexer=self._indexer,
referer=referer,
page=page,
mtype=mtype)
# 种子筛选器
torrents_selector = self._indexer.torrents.get('list', {}).get('selector', '')
if not torrents_selector:
return False, []
# 解析HTML文本
html_doc = PyQuery(html_text)
for torn in html_doc(torrents_selector):
self.torrents_info_array.append(copy.deepcopy(self.torrentspider.Getinfo(PyQuery(torn))))
if len(self.torrents_info_array) >= int(self.result_num):
break
return False, self.torrents_info_array
| 5,260 | Python | .py | 126 | 26.02381 | 107 | 0.518625 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,077 | _base.py | demigody_nas-tools/app/indexer/client/_base.py | import datetime
from abc import ABCMeta, abstractmethod
import log
from app.filter import Filter
from app.helper import ProgressHelper
from app.media import Media
from app.media.meta import MetaInfo
from app.utils.types import MediaType, SearchType, ProgressKey
from config import Config
class _IIndexClient(metaclass=ABCMeta):
# 索引器ID
client_id = ""
# 索引器类型
client_type = ""
# 索引器名称
client_name = "Indexer"
media = None
progress = None
filter = None
recognize_enhance_enable = False
def __init__(self):
self.media = Media()
self.filter = Filter()
self.progress = ProgressHelper()
laboratory = Config().get_config("laboratory")
if laboratory:
self.recognize_enhance_enable = laboratory.get("simplify_library_notification", False) or False
@abstractmethod
def match(self, ctype):
"""
匹配实例
"""
pass
@abstractmethod
def get_status(self):
"""
检查连通性
"""
pass
@abstractmethod
def get_type(self):
"""
获取类型
"""
pass
@abstractmethod
def get_indexers(self):
"""
:return: indexer 信息 [(indexerId, indexerName, url)]
"""
pass
@abstractmethod
def search(self, order_seq,
indexer,
key_word,
filter_args: dict,
match_media,
in_from: SearchType):
"""
根据关键字多线程搜索
"""
pass
def filter_search_results(self, result_array: list,
order_seq,
indexer,
filter_args: dict,
match_media,
start_time):
"""
从搜索结果中匹配符合资源条件的记录
"""
ret_array = []
index_sucess = 0
index_rule_fail = 0
index_match_fail = 0
index_error = 0
for item in result_array:
try:
# 名称
torrent_name = item.get('title')
# 描述
description = item.get('description')
if not torrent_name:
index_error += 1
continue
enclosure = item.get('enclosure')
size = item.get('size')
seeders = item.get('seeders')
peers = item.get('peers')
page_url = item.get('page_url')
uploadvolumefactor = round(float(item.get('uploadvolumefactor')), 1) if item.get(
'uploadvolumefactor') is not None else 1.0
downloadvolumefactor = round(float(item.get('downloadvolumefactor')), 1) if item.get(
'downloadvolumefactor') is not None else 1.0
imdbid = item.get("imdbid")
labels = item.get("labels")
# 全匹配模式下,非公开站点,过滤掉做种数为0的
if filter_args.get("seeders") and not indexer.public and str(seeders) == "0":
log.info(f"【{self.client_name}】{torrent_name} 做种数为0")
index_rule_fail += 1
continue
# 识别种子名称
imdbid_match = False
name_match = False
year_match = False
if match_media:
description = description if description else ""
torrent_name = torrent_name if torrent_name else ""
imdbid_match = imdbid and match_media.imdb_id and str(imdbid) == str(match_media.imdb_id)
name_match = match_media.org_string in torrent_name or \
match_media.original_title in torrent_name or \
match_media.org_string in description or \
match_media.original_title in description
year_match = (not match_media.year) or match_media.year in torrent_name or \
match_media.year in description
if (imdbid_match or name_match) and year_match and self.recognize_enhance_enable:
meta_info = MetaInfo(title=torrent_name,
subtitle=f"{labels} {description}",
mtype=match_media.media_type,
cn_name=match_media.org_string,
en_name=match_media.original_title,
tmdb_id=match_media.tmdb_id,
imdb_id=match_media.imdb_id)
meta_info.set_tmdb_info(self.media.get_tmdb_info(mtype=match_media.media_type,
tmdbid=match_media.tmdb_id,
append_to_response="all"))
else:
meta_info = MetaInfo(title=torrent_name, subtitle=f"{labels} {description}")
if not meta_info.get_name():
log.info(f"【{self.client_name}】{torrent_name} 无法识别到名称")
index_match_fail += 1
continue
# 大小及促销等
meta_info.set_torrent_info(size=size,
imdbid=imdbid,
upload_volume_factor=uploadvolumefactor,
download_volume_factor=downloadvolumefactor,
labels=labels)
# 先过滤掉可以明确的类型
if meta_info.type == MediaType.TV and filter_args.get("type") == MediaType.MOVIE:
log.info(
f"【{self.client_name}】{torrent_name} 是 {meta_info.type.value},"
f"不匹配类型:{filter_args.get('type').value}")
index_rule_fail += 1
continue
# 检查订阅过滤规则匹配
match_flag, res_order, match_msg = self.filter.check_torrent_filter(
meta_info=meta_info,
filter_args=filter_args,
uploadvolumefactor=uploadvolumefactor,
downloadvolumefactor=downloadvolumefactor)
if not match_flag:
log.info(f"【{self.client_name}】{match_msg}")
index_rule_fail += 1
continue
# 识别媒体信息
if not match_media:
# 不过滤
media_info = meta_info
else:
# 0-识别并模糊匹配;1-识别并精确匹配
if meta_info.imdb_id \
and match_media.imdb_id \
and str(meta_info.imdb_id) == str(match_media.imdb_id):
# IMDBID匹配,合并媒体数据
media_info = self.media.merge_media_info(meta_info, match_media)
else:
# 查询缓存
cache_info = self.media.get_cache_info(meta_info)
if match_media \
and str(cache_info.get("id")) == str(match_media.tmdb_id):
# 缓存匹配,合并媒体数据
media_info = self.media.merge_media_info(meta_info, match_media)
else:
# 重新识别
media_info = self.media.get_media_info(title=torrent_name, subtitle=description, chinese=False)
if not media_info:
log.warn(f"【{self.client_name}】{torrent_name} 识别媒体信息出错!")
index_error += 1
continue
elif not media_info.tmdb_info:
log.info(
f"【{self.client_name}】{torrent_name} 识别为 {media_info.get_name()} 未匹配到媒体信息")
index_match_fail += 1
continue
# TMDBID是否匹配
if str(media_info.tmdb_id) != str(match_media.tmdb_id):
log.info(
f"【{self.client_name}】{torrent_name} 识别为 "
f"{media_info.type.value}/{media_info.get_title_string()}/{media_info.tmdb_id} "
f"与 {match_media.type.value}/{match_media.get_title_string()}/{match_media.tmdb_id} 不匹配")
index_match_fail += 1
continue
# 合并媒体数据
media_info = self.media.merge_media_info(media_info, match_media)
# 过滤类型
if filter_args.get("type"):
if (filter_args.get("type") == MediaType.TV and media_info.type == MediaType.MOVIE) \
or (filter_args.get("type") == MediaType.MOVIE and media_info.type == MediaType.TV):
log.info(
f"【{self.client_name}】{torrent_name} 是 {media_info.type.value}/"
f"{media_info.tmdb_id},不是 {filter_args.get('type').value}")
index_rule_fail += 1
continue
# 洗版
if match_media.over_edition:
# 季集不完整的资源不要
'''if media_info.type != MediaType.MOVIE \
and media_info.get_episode_list():
log.info(f"【{self.client_name}】"
f"{media_info.get_title_string()}{media_info.get_season_string()} "
f"正在洗版,过滤掉季集不完整的资源:{torrent_name} {description}")
continue'''
# 检查优先级是否更好
if match_media.res_order \
and int(res_order) <= int(match_media.res_order):
log.info(
f"【{self.client_name}】"
f"{media_info.get_title_string()}{media_info.get_season_string()} "
f"正在洗版,已洗版优先级:{100 - int(match_media.res_order)},"
f"当前资源优先级:{100 - int(res_order)},"
f"跳过低优先级或同优先级资源:{torrent_name}"
)
continue
# 检查标题是否匹配季、集、年
if not self.filter.is_torrent_match_sey(media_info,
filter_args.get("season"),
filter_args.get("episode"),
filter_args.get("year")):
log.info(
f"【{self.client_name}】{torrent_name} 识别为 {media_info.type.value}/"
f"{media_info.get_title_string()}/{media_info.get_season_episode_string()} 不匹配季/集/年份")
index_match_fail += 1
continue
# 匹配到了
log.info(
f"【{self.client_name}】{torrent_name} {description} 识别为 {media_info.get_title_string()} "
f"{media_info.get_season_episode_string()} 匹配成功")
media_info.set_torrent_info(site=indexer.name,
site_order=order_seq,
enclosure=enclosure,
res_order=res_order,
filter_rule=filter_args.get("rule"),
size=size,
seeders=seeders,
peers=peers,
description=description,
page_url=page_url,
upload_volume_factor=uploadvolumefactor,
download_volume_factor=downloadvolumefactor)
if media_info not in ret_array:
index_sucess += 1
ret_array.append(media_info)
else:
index_rule_fail += 1
except Exception as err:
print(str(err))
# 循环结束
# 计算耗时
end_time = datetime.datetime.now()
log.info(
f"【{self.client_name}】{indexer.name} {len(result_array)} 条数据中,"
f"过滤 {index_rule_fail},"
f"不匹配 {index_match_fail},"
f"错误 {index_error},"
f"有效 {index_sucess},"
f"耗时 {(end_time - start_time).seconds} 秒")
self.progress.update(ptype=ProgressKey.Search,
text=f"{indexer.name} {len(result_array)} 条数据中,"
f"过滤 {index_rule_fail},"
f"不匹配 {index_match_fail},"
f"错误 {index_error},"
f"有效 {index_sucess},"
f"耗时 {(end_time - start_time).seconds} 秒")
return ret_array
| 14,281 | Python | .py | 275 | 27.472727 | 125 | 0.43825 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,078 | plugin_manager.py | demigody_nas-tools/app/plugins/plugin_manager.py | import os.path
import traceback
from threading import Thread
import log
from app.conf import SystemConfig
from app.helper import SubmoduleHelper
from app.plugins.event_manager import EventManager
from app.utils import SystemUtils, PathUtils, ImageUtils
from app.utils.commons import singleton
from app.utils.types import SystemConfigKey
from config import Config
@singleton
class PluginManager:
"""
插件管理器
"""
systemconfig = None
eventmanager = None
# 用户插件目录
user_plugin_path = None
# 内部插件目录
system_plugin_path = None
# 插件列表
_plugins = {}
# 运行态插件列表
_running_plugins = {}
# 配置Key
_config_key = "plugin.%s"
# 事件处理线程
_thread = None
# 开关
_active = False
def __init__(self):
# config/plugins 是插件py文件目录,config/plugins/xxx是插件数据目录
self.user_plugin_path = Config().get_user_plugin_path()
if not os.path.exists(self.user_plugin_path):
os.makedirs(self.user_plugin_path, exist_ok=True)
self.system_plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "modules")
if os.path.exists(self.user_plugin_path):
for plugin_file in PathUtils.get_dir_level1_files(self.user_plugin_path, [".py"]):
SystemUtils.copy(plugin_file, self.system_plugin_path)
self.init_config()
def init_config(self):
self.systemconfig = SystemConfig()
self.eventmanager = EventManager()
# 停止已有插件
self.stop_service()
# 启动插件
self.start_service()
def __run(self):
"""
事件处理线程
"""
while self._active:
event, handlers = self.eventmanager.get_event()
if event:
log.info(f"处理事件:{event.event_type} - {handlers}")
for handler in handlers:
try:
names = handler.__qualname__.split(".")
self.run_plugin(names[0], names[1], event)
except Exception as e:
log.error(f"事件处理出错:{str(e)} - {traceback.format_exc()}")
def start_service(self):
"""
启动
"""
# 加载插件
self.__load_plugins()
# 将事件管理器设为启动
self._active = True
self._thread = Thread(target=self.__run)
# 启动事件处理线程
self._thread.start()
def stop_service(self):
"""
停止
"""
# 将事件管理器设为停止
self._active = False
# 等待事件处理线程退出
if self._thread:
self._thread.join()
# 停止所有插件
self.__stop_plugins()
def __load_plugins(self):
"""
加载所有插件
"""
# 扫描插件目录
plugins = SubmoduleHelper.import_submodules(
"app.plugins.modules",
filter_func=lambda _, obj: hasattr(obj, 'module_name')
)
# 排序
plugins.sort(key=lambda x: x.module_order if hasattr(x, "module_order") else 0)
# 用户已安装插件列表
user_plugins = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
self._running_plugins = {}
self._plugins = {}
for plugin in plugins:
module_id = plugin.__name__
self._plugins[module_id] = plugin
# 未安装的跳过加载
if module_id not in user_plugins:
continue
# 生成实例
self._running_plugins[module_id] = plugin()
# 初始化配置
self.reload_plugin(module_id)
log.info(f"加载插件:{plugin}")
def run_plugin(self, pid, method, *args, **kwargs):
"""
运行插件
"""
if not self._running_plugins.get(pid):
return None
if not hasattr(self._running_plugins[pid], method):
return
try:
return getattr(self._running_plugins[pid], method)(*args, **kwargs)
except Exception as err:
print(str(err), traceback.format_exc())
def reload_plugin(self, pid):
"""
生效插件配置
"""
if not pid:
return
if not self._running_plugins.get(pid):
return
if hasattr(self._running_plugins[pid], "init_config"):
try:
self._running_plugins[pid].init_config(self.get_plugin_config(pid))
log.debug(f"生效插件配置:{pid}")
except Exception as err:
print(str(err))
def __stop_plugins(self):
"""
停止所有插件
"""
for plugin in self._running_plugins.values():
if hasattr(plugin, "stop_service"):
plugin.stop_service()
def get_plugin_config(self, pid):
"""
获取插件配置
"""
if not self._plugins.get(pid):
return {}
return self.systemconfig.get(self._config_key % pid) or {}
def get_plugin_page(self, pid):
"""
获取插件额外页面数据
:return: 标题,页面内容,确定按钮响应函数
"""
if not self._running_plugins.get(pid):
return None, None, None
if not hasattr(self._running_plugins[pid], "get_page"):
return None, None, None
return self._running_plugins[pid].get_page()
def get_plugin_script(self, pid):
"""
获取插件额外脚本
"""
if not self._running_plugins.get(pid):
return None
if not hasattr(self._running_plugins[pid], "get_script"):
return None
return self._running_plugins[pid].get_script()
def get_plugin_state(self, pid):
"""
获取插件状态
"""
if not self._running_plugins.get(pid):
return None
if not hasattr(self._running_plugins[pid], "get_state"):
return None
return self._running_plugins[pid].get_state()
def save_plugin_config(self, pid, conf):
"""
保存插件配置
"""
if not self._plugins.get(pid):
return False
return self.systemconfig.set(self._config_key % pid, conf)
@staticmethod
def __get_plugin_color(plugin):
"""
获取插件的主题色
"""
if hasattr(plugin, "module_color") and plugin.module_color:
return plugin.module_color
if hasattr(plugin, "module_icon"):
icon_path = os.path.join(Config().get_root_path(),
"web", "static", "img", "plugins",
plugin.module_icon)
return ImageUtils.calculate_theme_color(icon_path)
return ""
def get_plugins_conf(self, auth_level):
"""
获取所有插件配置
"""
all_confs = {}
for pid, plugin in self._running_plugins.items():
# 基本属性
conf = {}
# 权限
if hasattr(plugin, "auth_level") \
and plugin.auth_level > auth_level:
continue
# 名称
if hasattr(plugin, "module_name"):
conf.update({"name": plugin.module_name})
# 描述
if hasattr(plugin, "module_desc"):
conf.update({"desc": plugin.module_desc})
# 版本号
if hasattr(plugin, "module_version"):
conf.update({"version": plugin.module_version})
# 图标
if hasattr(plugin, "module_icon"):
conf.update({"icon": plugin.module_icon})
# ID前缀
if hasattr(plugin, "module_config_prefix"):
conf.update({"prefix": plugin.module_config_prefix})
# 插件额外的页面
if hasattr(plugin, "get_page"):
title, _, _ = plugin.get_page()
conf.update({"page": title})
# 插件额外的脚本
if hasattr(plugin, "get_script"):
conf.update({"script": plugin.get_script()})
# 主题色
conf.update({"color": self.__get_plugin_color(plugin)})
# 配置项
conf.update({"fields": plugin.get_fields() or {}})
# 配置值
conf.update({"config": self.get_plugin_config(pid)})
# 状态
conf.update({"state": plugin.get_state()})
# 汇总
all_confs[pid] = conf
return all_confs
def get_plugin_apps(self, auth_level):
"""
获取所有插件
"""
all_confs = {}
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
for pid, plugin in self._plugins.items():
# 基本属性
conf = {}
# 权限
if hasattr(plugin, "auth_level") \
and plugin.auth_level > auth_level:
continue
# ID
conf.update({"id": pid})
# 安装状态
if pid in installed_apps:
conf.update({"installed": True})
else:
conf.update({"installed": False})
# 名称
if hasattr(plugin, "module_name"):
conf.update({"name": plugin.module_name})
# 描述
if hasattr(plugin, "module_desc"):
conf.update({"desc": plugin.module_desc})
# 版本
if hasattr(plugin, "module_version"):
conf.update({"version": plugin.module_version})
# 图标
if hasattr(plugin, "module_icon"):
conf.update({"icon": plugin.module_icon})
# 主题色
conf.update({"color": self.__get_plugin_color(plugin)})
if hasattr(plugin, "module_author"):
conf.update({"author": plugin.module_author})
# 作者链接
if hasattr(plugin, "author_url"):
conf.update({"author_url": plugin.author_url})
# 汇总
all_confs[pid] = conf
return all_confs
def get_plugin_commands(self):
"""
获取插件命令
[{
"cmd": "/xx",
"event": EventType.xx,
"desc": "xxxx",
"data": {}
}]
"""
ret_commands = []
for _, plugin in self._running_plugins.items():
if hasattr(plugin, "get_command"):
ret_commands.append(plugin.get_command())
return ret_commands
def run_plugin_method(self, pid, method, *args, **kwargs):
"""
运行插件方法
"""
if not self._running_plugins.get(pid):
return None
if not hasattr(self._running_plugins[pid], method):
return None
return getattr(self._running_plugins[pid], method)(*args, **kwargs)
| 11,188 | Python | .py | 310 | 23.122581 | 102 | 0.528211 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,079 | event_manager.py | demigody_nas-tools/app/plugins/event_manager.py | from queue import Queue, Empty
import log
from app.utils.commons import singleton
from app.utils.types import EventType
@singleton
class EventManager:
"""
事件管理器
"""
# 事件队列
_eventQueue = None
# 事件响应函数字典
_handlers = {}
def __init__(self):
# 事件队列
self._eventQueue = Queue()
# 事件响应函数字典
self._handlers = {}
def get_event(self):
"""
获取事件
"""
try:
event = self._eventQueue.get(block=True, timeout=1)
handlerList = self._handlers.get(event.event_type)
return event, handlerList or []
except Empty:
return None, []
def add_event_listener(self, etype: EventType, handler):
"""
注册事件处理
"""
try:
handlerList = self._handlers[etype.value]
except KeyError:
handlerList = []
self._handlers[etype.value] = handlerList
if handler not in handlerList:
handlerList.append(handler)
log.debug(f"已注册事件:{etype.value}{handler}")
def remove_event_listener(self, etype: EventType, handler):
"""
移除监听器的处理函数
"""
try:
handlerList = self._handlers[etype.value]
if handler in handlerList[:]:
handlerList.remove(handler)
if not handlerList:
del self._handlers[etype.value]
except KeyError:
pass
def send_event(self, etype: EventType, data: dict = None):
"""
发送事件
"""
if etype not in EventType:
return
event = Event(etype.value)
event.event_data = data or {}
log.debug(f"发送事件:{etype.value} - {event.event_data}")
self._eventQueue.put(event)
def register(self, etype: [EventType, list]):
"""
事件注册
:param etype: 事件类型
"""
def decorator(f):
if isinstance(etype, list):
for et in etype:
self.add_event_listener(et, f)
elif type(etype) == type(EventType):
for et in etype.__members__.values():
self.add_event_listener(et, f)
else:
self.add_event_listener(etype, f)
return f
return decorator
class Event(object):
"""
事件对象
"""
def __init__(self, event_type=None):
# 事件类型
self.event_type = event_type
# 字典用于保存具体的事件数据
self.event_data = {}
# 实例引用,用于注册事件
EventHandler = EventManager()
| 2,771 | Python | .py | 89 | 19.651685 | 63 | 0.543346 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,080 | __init__.py | demigody_nas-tools/app/plugins/__init__.py | from .event_manager import EventManager, EventHandler, Event
from .plugin_manager import PluginManager
| 103 | Python | .py | 2 | 50.5 | 60 | 0.861386 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,081 | autosignin.py | demigody_nas-tools/app/plugins/modules/autosignin.py | import re
import time
from datetime import datetime, timedelta
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing.pool import ThreadPool
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.date import DateTrigger
from lxml import etree
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as es
from selenium.webdriver.support.wait import WebDriverWait
from app.helper import ChromeHelper, SubmoduleHelper, SiteHelper
from app.helper.cloudflare_helper import under_challenge
from app.message import Message
from app.plugins import EventHandler, EventManager
from app.plugins.modules._base import _IPluginModule
from app.sites.mt import MtFunc
from app.sites.siteconf import SiteConf
from app.sites.sites import Sites
from app.utils import RequestUtils, ExceptionUtils, StringUtils, SchedulerUtils
from app.utils.types import EventType
from config import Config
from jinja2 import Template
import random
class AutoSignIn(_IPluginModule):
# 插件名称
module_name = "站点自动签到"
# 插件描述
module_desc = "站点自动签到保号,支持重试。"
# 插件图标
module_icon = "signin.png"
# 主题色
module_color = "#4179F4"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
module_config_prefix = "autosignin_"
# 加载顺序
module_order = 0
# 可使用的用户级别
auth_level = 2
# 上次运行结果属性
_last_run_results_list = []
# 私有属性
eventmanager = None
siteconf = None
_scheduler = None
# 设置开关
_enabled = False
# 任务执行间隔
_site_schema = []
_cron = None
_sign_sites = None
_queue_cnt = None
_retry_keyword = None
_special_sites = None
_onlyonce = False
_notify = False
_clean = False
_auto_cf = None
_missed_detection = False
_missed_schedule = None
# 退出事件
_event = Event()
@staticmethod
def get_fields():
sites = {site.get("id"): site for site in Sites().get_site_dict()}
return [
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启定时签到',
'required': "",
'tooltip': '开启后会根据周期定时签到指定站点。',
'type': 'switch',
'id': 'enabled',
},
{
'title': '漏签检测',
'required': "",
'tooltip': '开启后会在指定时段内对未签到站点进行补签(每小时一次,时间随机)。',
'type': 'switch',
'id': 'missed_detection',
},
{
'title': '运行时通知',
'required': "",
'tooltip': '运行签到任务后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'notify',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次',
'type': 'switch',
'id': 'onlyonce',
},
{
'title': '清理缓存',
'required': "",
'tooltip': '清理本日已签到(开启后全部站点将会签到一次)',
'type': 'switch',
'id': 'clean',
}
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '签到周期',
'required': "",
'tooltip': '自动签到时间,四种配置方法:1、配置间隔,单位小时,比如23.5;2、配置固定时间,如08:00;3、配置时间范围,如08:00-09:00,表示在该时间范围内随机执行一次;4、配置5位cron表达式,如:0 */6 * * *;配置为空则不启用自动签到功能。',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '漏签检测时段',
'required': "",
'tooltip': '配置时间范围,如08:00-23:59(每小时执行一次,执行时间随机)',
'type': 'text',
'content': [
{
'id': 'missed_schedule',
'placeholder': '08:00-23:59',
'default': '08:00-23:59'
}
]
},
{
'title': '签到队列',
'required': "",
'tooltip': '同时并行签到的站点数量,默认10(根据机器性能,缩小队列数量会延长签到时间,但可以提升成功率)',
'type': 'text',
'content': [
{
'id': 'queue_cnt',
'placeholder': '10',
}
]
},
{
'title': '重试关键词',
'required': "",
'tooltip': '重新签到关键词,支持正则表达式;每天首次全签,后续如果设置了重试词则只签到命中重试词的站点,否则全签。',
'type': 'text',
'content': [
{
'id': 'retry_keyword',
'placeholder': '失败|错误',
}
]
},
{
'title': '自动优选',
'required': "",
'tooltip': '命中重试词数量达到设置数量后,自动优化IP(0为不开启,需要正确配置自定义Hosts插件和优选IP插件)',
'type': 'text',
'content': [
{
'id': 'auto_cf',
'placeholder': '0',
}
]
},
]
]
},
{
'type': 'details',
'summary': '签到站点',
'tooltip': '只有选中的站点才会执行签到任务,不选则默认为全选',
'content': [
# 同一行
[
{
'id': 'sign_sites',
'type': 'form-selectgroup',
'content': sites
},
]
]
},
{
'type': 'details',
'summary': '特殊站点',
'tooltip': '选中的站点无论是否匹配重试关键词都会进行重签(如无需要可不设置)',
'content': [
# 同一行
[
{
'id': 'special_sites',
'type': 'form-selectgroup',
'content': sites
},
]
]
},
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
{% if ResultsCount > 0 %}
<tr>
<th>签到时间</th>
<th>签到站点</th>
<th>站点地址</th>
<th>签到结果</th>
</tr>
{% endif %}
</thead>
<tbody>
{% if ResultsCount > 0 %}
{% for Item in Results %}
<tr id="indexer_{{ Item["id"] }}">
<td>{{ Item["date"] }}</td>
<td>{{ Item["name"] }}</td>
<td>{{ Item["signurl"] }}</td>
<td>{{ Item["result"] }}</td>
</tr>
{% endfor %}
{% endif %}
</tbody>
</table>
</div>
"""
return "签到记录", Template(template).render(ResultsCount=len(self._last_run_results_list),
Results=self._last_run_results_list), None
def init_config(self, config=None):
self.siteconf = SiteConf()
self.eventmanager = EventManager()
# 读取配置
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._retry_keyword = config.get("retry_keyword")
self._sign_sites = config.get("sign_sites")
self._special_sites = config.get("special_sites") or []
self._notify = config.get("notify")
self._queue_cnt = config.get("queue_cnt")
self._onlyonce = config.get("onlyonce")
self._clean = config.get("clean")
self._auto_cf = config.get("auto_cf")
self._missed_detection = config.get("missed_detection")
self._missed_schedule = config.get("missed_schedule")
if self.is_valid_time_range(self._missed_schedule):
self._missed_schedule = re.sub(r'\s', '', str(self._missed_schedule)).replace('24:00', '23:59')
else:
self._missed_detection = False
self._missed_schedule = None
# 遍历列表并删除日期超过7天的字典项
today = datetime.now()
seven_days_ago = today - timedelta(days=7)
for item in self._last_run_results_list[:]:
date_str = item.get("date")
if date_str:
date_obj = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
if date_obj < seven_days_ago:
self._last_run_results_list.remove(item)
# 停止现有任务
self.stop_service()
# 启动服务
if self._enabled or self._onlyonce:
# 加载模块
self._site_schema = SubmoduleHelper.import_submodules('app.plugins.modules._autosignin',
filter_func=lambda _, obj: hasattr(obj, 'match'))
self.debug(f"加载站点签到:{self._site_schema}")
# 定时服务
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
# 清理缓存即今日历史
if self._clean:
self.delete_history(key=datetime.today().strftime('%Y-%m-%d'))
# 运行一次
if self._onlyonce:
self.info(f"签到服务启动,立即运行一次")
self._scheduler.add_job(self.sign_in, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 漏签检测服务
if self._missed_detection and self.is_valid_time_range(self._missed_schedule):
self.info(f"漏签检测服务启动,检测时段:{self._missed_schedule}")
self.check_missed_signs()
if self._onlyonce or self._clean:
# 关闭一次性开关|清理缓存开关
self._clean = False
self._onlyonce = False
self.update_config({
"enabled": self._enabled,
"cron": self._cron,
"retry_keyword": self._retry_keyword,
"sign_sites": self._sign_sites,
"special_sites": self._special_sites,
"notify": self._notify,
"onlyonce": self._onlyonce,
"queue_cnt": self._queue_cnt,
"clean": self._clean,
"auto_cf": self._auto_cf,
"missed_detection": self._missed_detection,
"missed_schedule": self._missed_schedule,
})
# 周期运行
if self._cron:
self.info(f"定时签到服务启动,周期:{self._cron}")
SchedulerUtils.start_job(scheduler=self._scheduler,
func=self.sign_in,
func_desc="自动签到",
cron=str(self._cron))
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
@staticmethod
def get_command():
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return {
"cmd": "/pts",
"event": EventType.SiteSignin,
"desc": "站点签到",
"data": {}
}
@staticmethod
def is_valid_time_range(input_str):
input_str = re.sub(r'\s', '', input_str).replace('24:00', '23:59')
pattern = r'^\d{2}:\d{2}-\d{2}:\d{2}$'
# 验证时间范围是否合理
if re.match(pattern, input_str):
start_time, end_time = input_str.split('-')
start_hour, start_minute = map(int, start_time.split(':'))
end_hour, end_minute = map(int, end_time.split(':'))
if (0 <= start_hour <= 23 and 0 <= start_minute <= 59 and
0 <= end_hour <= 23 and 0 <= end_minute <= 59 and
(start_hour < end_hour or (start_hour == end_hour and start_minute < end_minute))):
return True
return False
@staticmethod
def calculate_time_range(time_range, current_time):
# 解析时间范围字符串
start_str, end_str = time_range.split('-')
start_str = start_str.strip()
end_str = end_str.strip()
# 解析开始时间和结束时间
start_hour, start_minute = map(int, start_str.split(':'))
end_hour, end_minute = map(int, end_str.split(':'))
start_time = datetime(current_time.year, current_time.month, current_time.day, start_hour, start_minute, 0)
end_time = datetime(current_time.year, current_time.month, current_time.day, end_hour, end_minute, 59)
if not isinstance(current_time, datetime):
current_time = datetime.now()
# 计算时间
if start_time <= current_time < end_time: # 时间段内
start_time = current_time.replace(minute=0, second=0) + timedelta(hours=1)
if start_time > end_time:
start_time = datetime(current_time.year, current_time.month, current_time.day + 1, start_hour,
start_minute, 0)
end_time = datetime(current_time.year, current_time.month, current_time.day + 1, start_hour, 59, 59)
return '时段内', start_time, end_time
if start_time + timedelta(minutes=59, seconds=59) < end_time:
end_time = start_time + timedelta(minutes=59, seconds=59)
return '时段内', start_time, end_time
elif current_time >= end_time: # 时间段后
start_time = datetime(current_time.year, current_time.month, current_time.day + 1, start_hour, start_minute,
0)
end_time = datetime(current_time.year, current_time.month, current_time.day + 1, start_hour, 59, 59)
return '时段后', start_time, end_time
elif current_time < start_time: # 时间段前
start_time = datetime(current_time.year, current_time.month, current_time.day, start_hour, start_minute, 0)
end_time = datetime(current_time.year, current_time.month, current_time.day, start_hour, 59, 59)
return '时段前', start_time, end_time
else:
return None, None, None
@EventHandler.register(EventType.SiteSignin)
# 漏签检测服务
def check_missed_signs(self):
# 日期
today = datetime.today()
# 查看今天有没有签到历史
today = today.strftime('%Y-%m-%d')
today_history = self.get_history(key=today)
# 今日没数据
if not today_history:
sign_sites = self._sign_sites
else:
# 今天已签到需要重签站点
retry_sites = today_history['retry']
# 今天已签到站点
already_sign_sites = today_history['sign']
# 今日未签站点
no_sign_sites = [site_id for site_id in self._sign_sites if site_id not in already_sign_sites]
# 签到站点 = 需要重签+今日未签
sign_sites = list(set(retry_sites + no_sign_sites))
if len(sign_sites) > 0:
status, start_time, end_time = self.calculate_time_range(self._missed_schedule, datetime.now())
if status == '时段内' and not self._onlyonce:
self.info(f"漏签检测服务启动,即将进行补签!")
self._scheduler.add_job(self.sign_in, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
random_minute = random.randint(start_time.minute, end_time.minute)
random_second = random.randint(0, 59)
run_time = start_time.replace(minute=random_minute, second=random_second)
self.info(f"下一次检测时间:{run_time.strftime('%H:%M:%S')}")
self._scheduler.add_job(self.check_missed_signs, DateTrigger(run_date=run_time))
else:
status, start_time, end_time = self.calculate_time_range(self._missed_schedule,
datetime.now().replace(hour=0, minute=0, second=0)
+ timedelta(days=1))
random_minute = random.randint(start_time.minute, end_time.minute)
random_second = random.randint(0, 59)
run_time = start_time.replace(minute=random_minute, second=random_second)
self.info(f"下一次检测时间:{run_time.strftime('%H:%M:%S')}")
self._scheduler.add_job(self.check_missed_signs, DateTrigger(run_date=run_time))
@EventHandler.register(EventType.SiteSignin)
def sign_in(self, event=None):
"""
自动签到
"""
# 日期
today = datetime.today()
yesterday = today - timedelta(days=1)
yesterday_str = yesterday.strftime('%Y-%m-%d')
# 删除昨天历史
self.delete_history(yesterday_str)
# 查看今天有没有签到历史
today = today.strftime('%Y-%m-%d')
today_history = self.get_history(key=today)
# 今日没数据
if not today_history:
sign_sites = self._sign_sites
self.info(f"今日 {today} 未签到,开始签到已选站点")
else:
# 今天已签到需要重签站点
retry_sites = today_history['retry']
# 今天已签到站点
already_sign_sites = today_history['sign']
# 今日未签站点
no_sign_sites = [site_id for site_id in self._sign_sites if site_id not in already_sign_sites]
# 签到站点 = 需要重签+今日未签+特殊站点
sign_sites = list(set(retry_sites + no_sign_sites + self._special_sites))
if sign_sites:
self.info(f"今日 {today} 已签到,开始重签重试站点、特殊站点、未签站点")
else:
self.info(f"今日 {today} 已签到,无重新签到站点,本次任务结束")
return
# 查询签到站点
sign_sites = Sites().get_sites(siteids=sign_sites)
if not sign_sites:
self.info("没有可签到站点,停止运行")
return
# 执行签到
self.info("开始执行签到任务")
with ThreadPool(min(len(sign_sites), int(self._queue_cnt) if self._queue_cnt else 10)) as p:
status = p.map(self.signin_site, sign_sites)
if status:
self.info("站点签到任务完成!")
# 命中重试词的站点id
retry_sites = []
# 命中重试词的站点签到msg
retry_msg = []
# 登录成功
login_success_msg = []
# 签到成功
sign_success_msg = []
# 已签到
already_sign_msg = []
# 仿真签到成功
fz_sign_msg = []
# 失败|错误
failed_msg = []
sites = {site.get('name'): site.get("id") for site in Sites().get_site_dict()}
for s in status:
site_names = re.findall(r'【(.*?)】', s[0])
site_id = sites.get(site_names[0], None) if site_names else None
# 记录本次命中重试关键词的站点
if self._retry_keyword:
match = re.search(self._retry_keyword, s[0])
if match and site_id:
self.debug(f"站点 {site_names[0]} 命中重试关键词 {self._retry_keyword}")
retry_sites.append(str(site_id))
# 命中的站点
retry_msg.append(s[0])
continue
if "登录成功" in s[0]:
login_success_msg.append(s[0])
elif "仿真签到成功" in s[0]:
fz_sign_msg.append(s[0])
elif "签到成功" in s[0]:
sign_success_msg.append(s[0])
elif "已签到" in s[0]:
already_sign_msg.append(s[0])
else:
failed_msg.append(s[0])
retry_sites.append(str(site_id))
if site_id:
status = re.search(r'【.*】(.*)', s[0]).group(1) or None
_result = {'id': site_id, 'date': s[1], 'name': site_names[0], 'signurl': s[2], 'result': status}
self._last_run_results_list.insert(0, _result)
if not self._retry_keyword:
# 没设置重试关键词则重试已选站点
retry_sites = self._sign_sites
self.debug(f"下次签到重试站点 {retry_sites}")
# 存入历史
if not today_history:
self.history(key=today,
value={
"sign": self._sign_sites,
"retry": retry_sites
})
else:
self.update_history(key=today,
value={
"sign": self._sign_sites,
"retry": retry_sites
})
# 触发CF优选
if self._auto_cf and len(retry_sites) >= (int(self._auto_cf) or 0) > 0:
# 获取自定义Hosts插件、CF优选插件,判断是否触发优选
customHosts = self.get_config("CustomHosts")
cloudflarespeedtest = self.get_config("CloudflareSpeedTest")
if customHosts and customHosts.get("enable") and cloudflarespeedtest and cloudflarespeedtest.get(
"cf_ip"):
self.info(f"命中重试数量 {len(retry_sites)},开始触发优选IP插件")
self.eventmanager.send_event(EventType.PluginReload,
{
"plugin_id": "CloudflareSpeedTest"
})
else:
self.info(f"命中重试数量 {len(retry_sites)},优选IP插件未正确配置,停止触发优选IP")
# 发送通知
if self._notify:
# 签到详细信息 登录成功、签到成功、已签到、仿真签到成功、失败--命中重试
signin_message = login_success_msg + sign_success_msg + already_sign_msg + fz_sign_msg + failed_msg
if len(retry_msg) > 0:
signin_message.append("——————命中重试—————")
signin_message += retry_msg
Message().send_site_signin_message(signin_message)
next_run_time = self._scheduler.get_jobs()[0].next_run_time.strftime('%Y-%m-%d %H:%M:%S')
# 签到汇总信息
self.send_message(title="【自动签到任务完成】",
text=f"本次签到数量: {len(sign_sites)} \n"
f"命中重试数量: {len(retry_sites) if self._retry_keyword else 0} \n"
f"强制签到数量: {len(self._special_sites)} \n"
f"下次签到数量: {len(set(retry_sites + self._special_sites))} \n"
f"下次签到时间: {next_run_time} \n"
f"详见签到消息")
else:
self.error("站点签到任务失败!")
def __build_class(self, url):
for site_schema in self._site_schema:
try:
if site_schema.match(url):
return site_schema
except Exception as e:
ExceptionUtils.exception_traceback(e)
return None
def signin_site(self, site_info):
"""
签到一个站点
"""
signurl = site_info.get("signurl")
site_module = self.__build_class(signurl)
home_url = StringUtils.get_base_url(signurl)
signinTime = datetime.now(tz=pytz.timezone(Config().get_timezone())).strftime('%Y-%m-%d %H:%M:%S')
if site_module and hasattr(site_module, "signin"):
try:
status, msg = site_module().signin(site_info)
# 特殊站点直接返回签到信息,防止仿真签到、模拟登陆有歧义
return msg, signinTime, home_url
except Exception as e:
return f"【{site_info.get('name')}】签到失败:{str(e)}", signinTime, home_url
else:
return self.signin_base(site_info), signinTime, home_url
def signin_base(self, site_info):
"""
通用签到处理
:param site_info: 站点信息
:return: 签到结果信息
"""
if not site_info:
return ""
site = site_info.get("name")
try:
site_url = site_info.get("signurl")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua") or Config().get_ua()
if not site_url or not site_cookie:
self.warn("未配置 %s 的站点地址或Cookie,无法签到" % str(site))
return ""
chrome = ChromeHelper()
if site_info.get("chrome") and chrome.get_status():
# 首页
self.info("开始站点仿真签到:%s" % site)
home_url = StringUtils.get_base_url(site_url)
if "1ptba" in home_url:
home_url = f"{home_url}/index.php"
if not chrome.visit(url=home_url, ua=ua, cookie=site_cookie, proxy=site_info.get("proxy")):
self.warn("%s 无法打开网站" % site)
return f"【{site}】仿真签到失败,无法打开网站!"
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
self.warn("%s 跳转站点失败" % site)
return f"【{site}】仿真签到失败,跳转站点失败!"
# 判断是否已签到
html_text = chrome.get_html()
if not html_text:
self.warn("%s 获取站点源码失败" % site)
return f"【{site}】仿真签到失败,获取站点源码失败!"
# 查找签到按钮
html = etree.HTML(html_text)
xpath_str = None
for xpath in self.siteconf.get_checkin_conf():
if html.xpath(xpath):
xpath_str = xpath
break
if re.search(r'已签|签到已得', html_text, re.IGNORECASE):
self.info("%s 今日已签到" % site)
return f"【{site}】今日已签到"
if not xpath_str:
if SiteHelper.is_logged_in(html_text):
self.warn("%s 未找到签到按钮,模拟登录成功" % site)
return f"【{site}】模拟登录成功,已签到或无需签到"
else:
self.info("%s 未找到签到按钮,且模拟登录失败" % site)
return f"【{site}】模拟登录失败!"
# 开始仿真
try:
checkin_obj = WebDriverWait(driver=chrome.browser, timeout=6).until(
es.element_to_be_clickable((By.XPATH, xpath_str)))
if checkin_obj:
checkin_obj.click()
# 检测是否过cf
time.sleep(3)
if under_challenge(chrome.get_html()):
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
self.info("%s 仿真签到失败,无法通过Cloudflare" % site)
return f"【{site}】仿真签到失败,无法通过Cloudflare!"
# 判断是否已签到 [签到已得125, 补签卡: 0]
if re.search(r'已签|签到已得', chrome.get_html(), re.IGNORECASE):
return f"【{site}】签到成功"
self.info("%s 仿真签到成功" % site)
return f"【{site}】仿真签到成功"
except Exception as e:
ExceptionUtils.exception_traceback(e)
self.warn("%s 仿真签到失败:%s" % (site, str(e)))
return f"【{site}】签到失败!"
# 模拟登录
else:
if site_url.find("attendance.php") != -1:
checkin_text = "签到"
else:
checkin_text = "模拟登录"
self.info(f"开始站点{checkin_text}:{site}")
# 访问链接
if "m-team" in site_url:
mt = MtFunc(site_info)
if mt.signin():
return f"【{site}】签到成功"
else:
return f"【{site}】签到失败"
else:
res = RequestUtils(headers=ua, cookies=site_cookie, proxies=Config().get_proxies() if site_info.get(
"proxy") else None).get_res(url=site_url)
if res and res.status_code in [200, 500, 403]:
if not SiteHelper.is_logged_in(res.text):
if under_challenge(res.text):
msg = "站点被Cloudflare防护,请开启浏览器仿真"
elif res.status_code == 200:
msg = "Cookie已失效"
else:
msg = f"状态码:{res.status_code}"
self.warn(f"{site} {checkin_text}失败,{msg}")
return f"【{site}】{checkin_text}失败,{msg}!"
else:
self.info(f"{site} {checkin_text}成功")
return f"【{site}】{checkin_text}成功"
elif res is not None:
self.warn(f"{site} {checkin_text}失败,状态码:{res.status_code}")
return f"【{site}】{checkin_text}失败,状态码:{res.status_code}!"
else:
self.warn(f"{site} {checkin_text}失败,无法打开网站")
return f"【{site}】{checkin_text}失败,无法打开网站!"
except Exception as e:
ExceptionUtils.exception_traceback(e)
self.warn("%s 签到失败:%s" % (site, str(e)))
return f"【{site}】签到失败:{str(e)}!"
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
def get_state(self):
return self._enabled and self._cron
| 35,365 | Python | .py | 728 | 26.175824 | 172 | 0.450065 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,082 | torrenttransfer.py | demigody_nas-tools/app/plugins/modules/torrenttransfer.py | import os.path
from copy import deepcopy
from datetime import datetime, timedelta
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from bencode import bdecode, bencode
from app.downloader import Downloader
from app.media.meta import MetaInfo
from app.plugins.modules._base import _IPluginModule
from app.utils import Torrent
from app.utils.types import DownloaderType
from config import Config
class TorrentTransfer(_IPluginModule):
# 插件名称
module_name = "自动转移做种"
# 插件描述
module_desc = "定期转移下载器中的做种任务到另一个下载器。"
# 插件图标
module_icon = "torrenttransfer.jpg"
# 主题色
module_color = "#272636"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "torrenttransfer_"
# 加载顺序
module_order = 20
# 可使用的用户级别
user_level = 2
# 私有属性
_scheduler = None
downloader = None
sites = None
# 限速开关
_enable = False
_cron = None
_onlyonce = False
_fromdownloader = None
_todownloader = None
_frompath = None
_topath = None
_notify = False
_nolabels = None
_nopaths = None
_deletesource = False
_fromtorrentpath = None
_autostart = False
# 退出事件
_event = Event()
# 待检查种子清单
_recheck_torrents = {}
_is_recheck_running = False
# 任务标签
_torrent_tags = ["已整理", "转移做种"]
@staticmethod
def get_fields():
downloaders = {k: v for k, v in Downloader().get_downloader_conf_simple().items()
if v.get("type") in ["qbittorrent", "transmission"] and v.get("enabled")}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启自动转移做种',
'required': "",
'tooltip': '开启后,定期将源下载器中已完成的种子任务迁移至目的下载器,任务转移后会自动暂停,校验通过且完整后才开始做种。',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '执行周期',
'required': "required",
'tooltip': '设置移转做种任务执行的时间周期,支持5位cron表达式;应避免任务执行过于频繁',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '不转移种子标签',
'required': "",
'tooltip': '下载器中的种子有以下标签时不进行移转做种,多个标签使用英文,分隔',
'type': 'text',
'content': [
{
'id': 'nolabels',
'placeholder': '使用,分隔多个标签',
}
]
}
]
]
},
{
'type': 'details',
'summary': '源下载器',
'tooltip': '只有选中的下载器才会执行转移任务,只能选择一个',
'content': [
# 同一行
[
{
'id': 'fromdownloader',
'type': 'form-selectgroup',
'radio': True,
'onclick': 'torrenttransfer_check(this);',
'content': downloaders
},
],
[
{
'title': '种子文件路径',
'required': "required",
'tooltip': '源下载器保存种子文件的路径,需要是NAStool可访问的路径,QB一般为BT_backup,TR一般为torrents',
'type': 'text',
'content': [
{
'id': 'fromtorrentpath',
'placeholder': 'xxx/BT_backup、xxx/torrents',
}
]
},
{
'title': '数据文件根路径',
'required': "required",
'tooltip': '源下载器中的种子数据文件保存根目录路径,必须是下载器能访问的路径,用于转移时转换种子数据文件路径使用;留空不进行路径转换,使用种子的数据文件保存目录',
'type': 'text',
'content': [
{
'id': 'frompath',
'placeholder': '根路径,留空不进行路径转换',
}
]
}
]
]
},
{
'type': 'details',
'summary': '目的下载器',
'tooltip': '将做种任务转移到这个下载器,只能选择一个',
'content': [
# 同一行
[
{
'id': 'todownloader',
'type': 'form-selectgroup',
'radio': True,
'onclick': 'torrenttransfer_check(this);',
'content': downloaders
},
],
[
{
'title': '数据文件根路径',
'required': "required",
'tooltip': '目的下载器的种子数据文件保存目录根路径,必须是下载器能访问的路径,将会使用该路径替换源下载器中种子数据文件保存路径中的源目录根路径,替换后的新路径做为目的下载器种子数据文件的保存路径,需要准确填写,否则可能导致移转做种后找不到数据文件无法做种;留空不进行路径转换,使用种子的数据文件保存路径',
'type': 'text',
'content': [
{
'id': 'topath',
'placeholder': '根路径,留空不进行路径转换',
}
]
}
]
]
},
{
'type': 'div',
'content': [
[
{
'title': '不转移数据文件目录',
'required': "",
'tooltip': '以下数据文件目录的任务不进行转移,指下载器可访问的目录,每一行一个目录',
'type': 'textarea',
'content': {
'id': 'nopaths',
'placeholder': '每一行一个目录',
'rows': 3
}
}
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '校验完成后自动开始',
'required': "",
'tooltip': '自动开始目的下载器中校验完成且100%完整的种子,校验不完整的不会处理',
'type': 'switch',
'default': True,
'id': 'autostart',
},
{
'title': '删除源种子',
'required': "",
'tooltip': '转移成功后删除源下载器中的种子,首次运行请不要打开,避免种子丢失',
'type': 'switch',
'id': 'deletesource',
}
],
[
{
'title': '运行时通知',
'required': "",
'tooltip': '运行任务后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'notify',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
}
]
]
}
]
@staticmethod
def get_script():
"""
返回插件额外的JS代码
"""
return """
function torrenttransfer_check(obj) {
let val = $(obj).val();
let name = $(obj).attr("name") === "torrenttransfer_fromdownloader" ? "torrenttransfer_todownloader" : "torrenttransfer_fromdownloader";
if ($(obj).prop("checked")) {
$(`input[name^=${name}][type=checkbox]`).each(function () {
if ($(this).val() === val) {
$(this).prop('checked',false).prop('disabled', true);
} else {
$(this).prop('disabled', false);
}
});
} else {
$(`input[name^=${name}][type=checkbox]`).each(function () {
if ($(this).val() === val) {
$(this).prop('disabled', false);
}
});
}
}
"""
def init_config(self, config=None):
self.downloader = Downloader()
# 读取配置
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._notify = config.get("notify")
self._nolabels = config.get("nolabels")
self._frompath = config.get("frompath")
self._topath = config.get("topath")
self._fromdownloader = config.get("fromdownloader")
self._todownloader = config.get("todownloader")
self._deletesource = config.get("deletesource")
self._fromtorrentpath = config.get("fromtorrentpath")
self._nopaths = config.get("nopaths")
self._autostart = config.get("autostart")
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
# 检查配置
if self._fromtorrentpath and not os.path.exists(self._fromtorrentpath):
self.error(f"源下载器种子文件保存路径不存在:{self._fromtorrentpath}")
return
if isinstance(self._fromdownloader, list) and len(self._fromdownloader) > 1:
self.error(f"源下载器只能选择一个")
return
if isinstance(self._todownloader, list) and len(self._todownloader) > 1:
self.error(f"目的下载器只能选择一个")
return
if self._fromdownloader == self._todownloader:
self.error(f"源下载器和目的下载器不能相同")
return
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"移转做种服务启动,周期:{self._cron}")
self._scheduler.add_job(self.transfer,
CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"移转做种服务启动,立即运行一次")
self._scheduler.add_job(self.transfer, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enable": self._enable,
"onlyonce": self._onlyonce,
"cron": self._cron,
"notify": self._notify,
"nolabels": self._nolabels,
"frompath": self._frompath,
"topath": self._topath,
"fromdownloader": self._fromdownloader,
"todownloader": self._todownloader,
"deletesource": self._deletesource,
"fromtorrentpath": self._fromtorrentpath,
"nopaths": self._nopaths,
"autostart": self._autostart
})
if self._scheduler.get_jobs():
if self._autostart:
# 追加种子校验服务
self._scheduler.add_job(self.check_recheck, 'interval', minutes=3)
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return True if self._enable \
and self._cron \
and self._fromdownloader \
and self._todownloader \
and self._fromtorrentpath else False
def transfer(self):
"""
开始移转做种
"""
if not self._enable \
or not self._fromdownloader \
or not self._todownloader \
or not self._fromtorrentpath:
self.warn("移转做种服务未启用或未配置")
return
self.info("开始移转做种任务 ...")
# 源下载器
downloader = self._fromdownloader[0]
# 源下载器类型
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
# 目的下载器
todownloader = self._todownloader[0]
# 目的下载器类型
to_downloader_type = self.downloader.get_downloader_type(downloader_id=todownloader)
# 获取下载器中已完成的种子
torrents = self.downloader.get_completed_torrents(downloader_id=downloader)
if torrents:
self.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}")
else:
self.info(f"下载器 {downloader} 没有已完成种子")
return
# 过滤种子,记录保存目录
hash_strs = []
for torrent in torrents:
if self._event.is_set():
self.info(f"移转服务停止")
return
# 获取种子hash
hash_str = self.__get_hash(torrent, downloader_type)
# 获取保存路径
save_path = self.__get_save_path(torrent, downloader_type)
if self._nopaths and save_path:
# 过滤不需要移转的路径
nopath_skip = False
for nopath in self._nopaths.split('\n'):
if os.path.normpath(save_path).startswith(os.path.normpath(nopath)):
self.info(f"种子 {hash_str} 保存路径 {save_path} 不需要移转,跳过 ...")
nopath_skip = True
break
if nopath_skip:
continue
# 获取种子标签
torrent_labels = self.__get_label(torrent, downloader_type)
if torrent_labels and self._nolabels:
is_skip = False
for label in self._nolabels.split(','):
if label in torrent_labels:
self.info(f"种子 {hash_str} 含有不转移标签 {label},跳过 ...")
is_skip = True
break
if is_skip:
continue
hash_strs.append({
"hash": hash_str,
"save_path": save_path
})
# 开始转移任务
if hash_strs:
self.info(f"需要移转的种子数:{len(hash_strs)}")
# 记数
total = len(hash_strs)
success = 0
fail = 0
for hash_item in hash_strs:
# 检查种子文件是否存在
torrent_file = os.path.join(self._fromtorrentpath,
f"{hash_item.get('hash')}.torrent")
if not os.path.exists(torrent_file):
self.error(f"种子文件不存在:{torrent_file}")
fail += 1
continue
# 查询hash值是否已经在目的下载器中
torrent_info = self.downloader.get_torrents(downloader_id=todownloader,
ids=[hash_item.get('hash')])
if torrent_info:
self.debug(f"{hash_item.get('hash')} 已在目的下载器中,跳过 ...")
continue
# 转换保存路径
download_dir = self.__convert_save_path(hash_item.get('save_path'),
self._frompath,
self._topath)
if not download_dir:
self.error(f"转换保存路径失败:{hash_item.get('save_path')}")
fail += 1
continue
# 如果是QB检查是否有Tracker,没有的话补充解析
if downloader_type == DownloaderType.QB:
# 读取种子内容、解析种子文件
content, _, _, retmsg = Torrent().read_torrent_content(torrent_file)
if not content:
self.error(f"读取种子文件失败:{retmsg}")
fail += 1
continue
# 读取trackers
try:
torrent_main = bdecode(content)
main_announce = torrent_main.get('announce')
except Exception as err:
self.error(f"解析种子文件 {torrent_file} 失败:{err}")
fail += 1
continue
if not main_announce:
self.info(f"{hash_item.get('hash')} 未发现tracker信息,尝试补充tracker信息...")
# 读取fastresume文件
fastresume_file = os.path.join(self._fromtorrentpath,
f"{hash_item.get('hash')}.fastresume")
if not os.path.exists(fastresume_file):
self.error(f"fastresume文件不存在:{fastresume_file}")
fail += 1
continue
# 尝试补充trackers
try:
with open(fastresume_file, 'rb') as f:
fastresume = f.read()
# 解析fastresume文件
torrent_fastresume = bdecode(fastresume)
# 读取trackers
fastresume_trackers = torrent_fastresume.get('trackers')
if isinstance(fastresume_trackers, list) \
and len(fastresume_trackers) > 0 \
and fastresume_trackers[0]:
# 重新赋值
torrent_main['announce'] = fastresume_trackers[0][0]
# 替换种子文件路径
torrent_file = os.path.join(Config().get_temp_path(),
f"{hash_item.get('hash')}.torrent")
# 编码并保存到临时文件
with open(torrent_file, 'wb') as f:
f.write(bencode(torrent_main))
except Exception as err:
self.error(f"解析fastresume文件 {fastresume_file} 失败:{err}")
fail += 1
continue
# 发送到另一个下载器中下载:默认暂停、传输下载路径、关闭自动管理模式
_, download_id, retmsg = self.downloader.download(
media_info=MetaInfo("自动转移做种"),
torrent_file=torrent_file,
is_paused=True,
tag=deepcopy(self._torrent_tags),
downloader_id=todownloader,
download_dir=download_dir,
download_setting="-2",
)
if not download_id:
# 下载失败
self.warn(f"添加转移任务出错,"
f"错误原因:{retmsg or '下载器添加任务失败'},"
f"种子文件:{torrent_file}")
fail += 1
continue
else:
# 追加校验任务
self.info(f"添加校验检查任务:{download_id} ...")
if not self._recheck_torrents.get(todownloader):
self._recheck_torrents[todownloader] = []
self._recheck_torrents[todownloader].append(download_id)
# 下载成功
self.info(f"成功添加转移做种任务,种子文件:{torrent_file}")
# TR会自动校验
if to_downloader_type == DownloaderType.QB:
# 开始校验种子
self.downloader.recheck_torrents(downloader_id=todownloader, ids=[download_id])
# 删除源种子,不能删除文件!
if self._deletesource:
self.downloader.delete_torrents(downloader_id=downloader,
ids=[download_id],
delete_file=False)
success += 1
# 插入转种记录
history_key = "%s-%s" % (int(self._fromdownloader[0]), hash_item.get('hash'))
self.history(key=history_key,
value={
"to_download": int(self._todownloader[0]),
"to_download_id": download_id,
"delete_source": self._deletesource,
})
# 触发校验任务
if success > 0 and self._autostart:
self.check_recheck()
# 发送通知
if self._notify:
self.send_message(
title="【移转做种任务执行完成】",
text=f"总数:{total},成功:{success},失败:{fail}"
)
else:
self.info(f"没有需要移转的种子")
self.info("移转做种任务执行完成")
def check_recheck(self):
"""
定时检查下载器中种子是否校验完成,校验完成且完整的自动开始辅种
"""
if not self._recheck_torrents:
return
if not self._todownloader:
return
if self._is_recheck_running:
return
downloader = self._todownloader[0]
# 需要检查的种子
recheck_torrents = self._recheck_torrents.get(downloader, [])
if not recheck_torrents:
return
self.info(f"开始检查下载器 {downloader} 的校验任务 ...")
self._is_recheck_running = True
# 下载器类型
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
# 获取下载器中的种子
torrents = self.downloader.get_torrents(downloader_id=downloader,
ids=recheck_torrents)
if torrents:
can_seeding_torrents = []
for torrent in torrents:
# 获取种子hash
hash_str = self.__get_hash(torrent, downloader_type)
if self.__can_seeding(torrent, downloader_type):
can_seeding_torrents.append(hash_str)
if can_seeding_torrents:
self.info(f"共 {len(can_seeding_torrents)} 个任务校验完成,开始辅种 ...")
self.downloader.start_torrents(downloader_id=downloader, ids=can_seeding_torrents)
# 去除已经处理过的种子
self._recheck_torrents[downloader] = list(
set(recheck_torrents).difference(set(can_seeding_torrents)))
elif torrents is None:
self.info(f"下载器 {downloader} 查询校验任务失败,将在下次继续查询 ...")
else:
self.info(f"下载器 {downloader} 中没有需要检查的校验任务,清空待处理列表 ...")
self._recheck_torrents[downloader] = []
self._is_recheck_running = False
@staticmethod
def __get_hash(torrent, dl_type):
"""
获取种子hash
"""
try:
return torrent.get("hash") if dl_type == DownloaderType.QB else torrent.hashString
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_label(torrent, dl_type):
"""
获取种子标签
"""
try:
return torrent.get("tags") or [] if dl_type == DownloaderType.QB else torrent.labels or []
except Exception as e:
print(str(e))
return []
@staticmethod
def __get_save_path(torrent, dl_type):
"""
获取种子保存路径
"""
try:
return torrent.get("save_path") if dl_type == DownloaderType.QB else torrent.download_dir
except Exception as e:
print(str(e))
return ""
@staticmethod
def __can_seeding(torrent, dl_type):
"""
判断种子是否可以做种并处于暂停状态
"""
try:
return torrent.get("state") == "pausedUP" and torrent.get("tracker") if dl_type == DownloaderType.QB \
else (torrent.status.stopped and torrent.percent_done == 1 and torrent.trackers)
except Exception as e:
print(str(e))
return False
@staticmethod
def __convert_save_path(save_path, from_root, to_root):
"""
转换保存路径
"""
try:
# 没有保存目录,以目的根目录为准
if not save_path:
return to_root
# 没有设置根目录时返回save_path
if not to_root or not from_root:
return save_path
# 统一目录格式
save_path = os.path.normpath(save_path).replace("\\", "/")
from_root = os.path.normpath(from_root).replace("\\", "/")
to_root = os.path.normpath(to_root).replace("\\", "/")
# 替换根目录
if save_path.startswith(from_root):
return save_path.replace(from_root, to_root, 1)
except Exception as e:
print(str(e))
return None
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
| 29,855 | Python | .py | 650 | 22.34 | 187 | 0.423361 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,083 | webhook.py | demigody_nas-tools/app/plugins/modules/webhook.py | from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import RequestUtils
from app.utils.types import EventType
class Webhook(_IPluginModule):
# 插件名称
module_name = "Webhook"
# 插件描述
module_desc = "事件发生时向第三方地址发送请求。"
# 插件图标
module_icon = "webhook.png"
# 主题色
module_color = "#C73A63"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "webhook_"
# 加载顺序
module_order = 4
# 可使用的用户级别
user_level = 2
# 私有属性
_save_tmp_path = None
_webhook_url = None
_method = None
def init_config(self, config: dict = None):
if config:
self._webhook_url = config.get("webhook_url")
self._method = config.get('method')
def get_state(self):
return self._webhook_url and self._method
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '请求方式',
'required': "required",
'tooltip': 'GET方式通过URL传递数据,POST方式通过JSON报文传递数据',
'type': 'select',
'content': [
{
'id': 'method',
'default': 'post',
'options': {
"get": "GET",
"post": "POST"
},
}
]
},
{
'title': 'Webhook地址',
'required': "required",
'type': 'text',
'content': [
{
'id': 'webhook_url',
'placeholder': 'http://127.0.0.1/webhook'
}
]
}
]
]
}
]
def stop_service(self):
pass
@EventHandler.register(EventType)
def send(self, event):
"""
向第三方Webhook发送请求
"""
if not self._webhook_url:
return
event_info = {
"type": event.event_type,
"data": event.event_data
}
if self._method == 'post':
ret = RequestUtils(content_type="application/json").post_res(self._webhook_url, json=event_info)
else:
ret = RequestUtils().get_res(self._webhook_url, params=event_info)
if ret:
self.info(f"发送成功:{self._webhook_url}")
elif ret is not None:
self.error(f"发送失败,状态码:{ret.status_code},返回信息:{ret.text} {ret.reason}")
else:
self.error(f"发送失败,未获取到返回信息")
| 3,469 | Python | .py | 98 | 17.744898 | 108 | 0.410315 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,084 | doubansync.py | demigody_nas-tools/app/plugins/modules/doubansync.py | import random
from datetime import datetime, timedelta
from threading import Event, Lock
from time import sleep
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from jinja2 import Template
from app.downloader import Downloader
from app.media import DouBan
from app.media.meta import MetaInfo
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.searcher import Searcher
from app.subscribe import Subscribe
from app.utils import ExceptionUtils
from app.utils.types import SearchType, RssType, EventType, MediaType
from config import Config
from web.backend.web_utils import WebUtils
lock = Lock()
class DoubanSync(_IPluginModule):
# 插件名称
module_name = "豆瓣同步"
# 插件描述
module_desc = "同步豆瓣在看、想看、看过记录,自动添加订阅或搜索下载。"
# 插件图标
module_icon = "douban.png"
# 主题色
module_color = "#05B711"
# 插件版本
module_version = "1.2"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "doubansync_"
# 加载顺序
module_order = 17
# 可使用的用户级别
auth_level = 2
# 退出事件
_event = Event()
# 私有属性
douban = None
searcher = None
downloader = None
subscribe = None
_enable = False
_onlyonce = False
_sync_type = False
_rss_interval = 0
_interval = 0
_auto_search = False
_auto_rss = False
_users = []
_days = 0
_types = []
_cookie = None
_scheduler = None
def init_config(self, config: dict = None):
self.douban = DouBan()
self.searcher = Searcher()
self.downloader = Downloader()
self.subscribe = Subscribe()
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._sync_type = config.get("sync_type")
if self._sync_type == '1':
self._interval = 0
rss_interval = config.get("rss_interval")
if rss_interval and str(rss_interval).isdigit():
self._rss_interval = int(rss_interval)
if self._rss_interval < 300:
self._rss_interval = 300
else:
self._rss_interval = 0
else:
self._rss_interval = 0
interval = config.get("interval")
if interval and str(interval).isdigit():
self._interval = int(interval)
else:
self._interval = 0
self._auto_search = config.get("auto_search")
self._auto_rss = config.get("auto_rss")
self._cookie = config.get("cookie")
self._users = config.get("users") or []
if self._users:
if isinstance(self._users, str):
self._users = self._users.split(',')
self._days = config.get("days")
if self._days and str(self._days).isdigit():
self._days = int(self._days)
else:
self._days = 0
self._types = config.get("types") or []
if self._types:
if isinstance(self._types, str):
self._types = self._types.split(',')
# 停止现有任务
self.stop_service()
# 启动服务
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._interval:
self.info(f"豆瓣全量同步服务启动,周期:{self._interval} 小时,类型:{self._types},用户:{self._users}")
self._scheduler.add_job(self.sync, 'interval',
hours=self._interval)
if self._rss_interval:
self.info(
f"豆瓣近期动态同步服务启动,周期:{self._rss_interval} 秒,类型:{self._types},用户:{self._users}")
self._scheduler.add_job(self.sync, 'interval',
seconds=self._rss_interval)
if self._onlyonce:
self.info("豆瓣同步服务启动,立即运行一次")
self._scheduler.add_job(self.sync, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": self._onlyonce,
"enable": self._enable,
"sync_type": self._sync_type,
"interval": self._interval,
"rss_interval": self._rss_interval,
"auto_search": self._auto_search,
"auto_rss": self._auto_rss,
"cookie": self._cookie,
"users": self._users,
"days": self._days,
"types": self._types
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return self._enable \
and self._users \
and self._types \
and ((self._sync_type == '1' and self._rss_interval)
or (self._sync_type != '1' and self._interval))
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启豆瓣同步',
'required': "",
'tooltip': '开启后,定时同步豆瓣在看、想看、看过记录,有新内容时自动添加订阅或者搜索下载,支持全量同步及近期动态两种模式,分别设置同步间隔',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照刮削周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
}
],
[
{
'title': '豆瓣用户ID',
'required': "required",
'tooltip': '需要同步数据的豆瓣用户ID,在豆瓣个人主页地址栏/people/后面的数字;如有多个豆瓣用户ID,使用英文逗号,分隔',
'type': 'text',
'content': [
{
'id': 'users',
'placeholder': '用户1,用户2,用户3',
}
]
},
{
'title': '同步内容',
'required': "required",
'tooltip': '同步哪些类型的收藏数据:do 在看,wish 想看,collect 看过,用英文逗号,分隔配置',
'type': 'text',
'content': [
{
'id': 'types',
'placeholder': 'do,wish,collect',
}
]
},
{
'title': '同步方式',
'required': "required",
'tooltip': '选择使用哪种方式同步豆瓣数据:全量同步(根据同步范围全量同步所有数据)、近期动态(同步用户近期的10条动态数据)',
'type': 'select',
'content': [
{
'id': 'sync_type',
'options': {
'0': '全量同步',
'1': '近期动态'
},
'default': '0',
'onchange': 'DoubanSync_sync_rss_change(this)'
}
]
}
],
[
{
'title': '全量同步范围(天)',
'required': "required",
'tooltip': '同步多少天内的记录,0表示同步全部,仅适用于全量同步',
'type': 'text',
'content': [
{
'id': 'days',
'placeholder': '30',
}
]
},
{
'title': '全量同步间隔(小时)',
'required': "required",
'tooltip': '间隔多久同步一次时间范围内的用户标记的数据,为了避免被豆瓣封禁IP,应尽可能拉长间隔时间',
'type': 'text',
'content': [
{
'id': 'interval',
'placeholder': '6',
}
]
},
{
'title': '近期动态同步间隔(秒)',
'required': "required",
'tooltip': '豆瓣近期动态的同步时间间隔,最小300秒,可设置较小的间隔同步用户近期动态数据,但无法同步全部标记数据',
'type': 'text',
'content': [
{
'id': 'rss_interval',
'placeholder': '300',
}
]
}
],
[
{
'title': '豆瓣Cookie',
'required': '',
'tooltip': '受豆瓣限制,部分电影需要配置Cookie才能同步到数据;通过浏览器抓取',
'type': 'textarea',
'content':
{
'id': 'cookie',
'placeholder': '',
'rows': 5
}
}
],
[
{
'title': '自动搜索下载',
'required': "",
'tooltip': '开启后豆瓣同步的数据会自动进行站点聚合搜索下载',
'type': 'switch',
'id': 'auto_search',
},
{
'title': '自动添加订阅',
'required': "",
'tooltip': '开启后未进行搜索下载的或搜索下载不完整的将加入订阅',
'type': 'switch',
'id': 'auto_rss',
}
],
]
}
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
results = self.get_history()
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th></th>
<th>标题</th>
<th>类型</th>
<th>状态</th>
<th>添加时间</th>
<th></th>
</tr>
</thead>
<tbody>
{% if HistoryCount > 0 %}
{% for Item in DoubanHistory %}
<tr id="douban_history_{{ Item.id }}">
<td class="w-5">
<img class="rounded w-5" src="{{ Item.image }}"
onerror="this.src='../static/img/no-image.png'" alt=""
style="min-width: 50px"/>
</td>
<td>
<div>{{ Item.name }} ({{ Item.year }})</div>
{% if Item.rating %}
<div class="text-muted text-nowrap">
评份:{{ Item.rating }}
</div>
{% endif %}
</td>
<td>
{{ Item.type }}
</td>
<td>
{% if Item.state == 'DOWNLOADED' %}
<span class="badge bg-green">已下载</span>
{% elif Item.state == 'RSS' %}
<span class="badge bg-blue">已订阅</span>
{% elif Item.state == 'NEW' %}
<span class="badge bg-blue">新增</span>
{% else %}
<span class="badge bg-orange">处理中</span>
{% endif %}
</td>
<td>
<small>{{ Item.add_time or '' }}</small>
</td>
<td>
<div class="dropdown">
<a href="#" class="btn-action" data-bs-toggle="dropdown"
aria-expanded="false">
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-dots-vertical {{ class }}"
width="24" height="24" viewBox="0 0 24 24"
stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<circle cx="12" cy="12" r="1"></circle>
<circle cx="12" cy="19" r="1"></circle>
<circle cx="12" cy="5" r="1"></circle>
</svg>
</a>
<div class="dropdown-menu dropdown-menu-end">
<a class="dropdown-item text-danger"
href='javascript:DoubanSync_delete_douban_history("{{ Item.id }}")'>
删除
</a>
</div>
</div>
</td>
</tr>
{% endfor %}
{% else %}
<tr>
<td colspan="6" align="center">没有数据</td>
</tr>
{% endif %}
</tbody>
</table>
</div>
"""
return "同步历史", Template(template).render(HistoryCount=len(results),
DoubanHistory=results), None
@staticmethod
def get_script():
"""
删除豆瓣历史记录的JS脚本
"""
return """
// 删除豆瓣历史记录
function DoubanSync_delete_douban_history(id){
ajax_post("run_plugin_method", {"plugin_id": 'DoubanSync', 'method': 'delete_sync_history', 'douban_id': id}, function (ret) {
$("#douban_history_" + id).remove();
});
}
// 同步方式切换
function DoubanSync_sync_rss_change(obj){
if ($(obj).val() == '1') {
$('#doubansync_rss_interval').parent().parent().show();
$('#doubansync_interval').parent().parent().hide();
$('#doubansync_days').parent().parent().hide();
}else{
$('#doubansync_rss_interval').parent().parent().hide();
$('#doubansync_interval').parent().parent().show();
$('#doubansync_days').parent().parent().show();
}
}
// 初始化完成后执行的方法
function DoubanSync_PluginInit(){
DoubanSync_sync_rss_change('#doubansync_sync_type');
}
"""
@staticmethod
def get_command():
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return {
"cmd": "/db",
"event": EventType.DoubanSync,
"desc": "豆瓣同步",
"data": {}
}
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
def delete_sync_history(self, douban_id):
"""
删除同步历史
"""
return self.delete_history(key=douban_id)
@EventHandler.register(EventType.DoubanSync)
def sync(self, event=None):
"""
同步豆瓣数据
"""
if not self._interval and not self._rss_interval:
self.info("豆瓣配置:同步间隔未配置或配置不正确")
return
with lock:
# 拉取豆瓣数据
medias = self.__get_all_douban_movies()
# 开始搜索
for media in medias:
if not media or not media.get_name():
continue
try:
# 查询数据库状态
history = self.get_history(media.douban_id)
if not history or history.get("state") == "NEW":
if self._auto_search:
# 需要搜索
media_info = WebUtils.get_mediainfo_from_id(mtype=media.type,
mediaid=f"DB:{media.douban_id}",
wait=True)
# 不需要自动加订阅,则直接搜索
if not media_info or not media_info.tmdb_info:
self.warn("%s 未查询到媒体信息" % media.get_name())
continue
# 检查是否存在,电视剧返回不存在的集清单
exist_flag, no_exists, _ = self.downloader.check_exists_medias(meta_info=media_info)
# 已经存在
if exist_flag:
# 更新为已下载状态
self.info("%s 已存在" % media_info.title)
self.__update_history(media=media_info, state="DOWNLOADED")
continue
if not self._auto_rss:
# 开始搜索
search_result, no_exists, search_count, download_count = self.searcher.search_one_media(
media_info=media_info,
in_from=SearchType.DB,
no_exists=no_exists,
user_name=media_info.user_name)
if search_result:
# 下载全了更新为已下载,没下载全的下次同步再次搜索
self.__update_history(media=media_info, state="DOWNLOADED")
else:
# 需要加订阅,则由订阅去搜索
self.info(
"%s %s 更新到%s订阅中..." % (media_info.title,
media_info.year,
media_info.type.value))
code, msg, _ = self.subscribe.add_rss_subscribe(mtype=media_info.type,
name=media_info.title,
year=media_info.year,
channel=RssType.Auto,
mediaid=f"DB:{media_info.douban_id}",
in_from=SearchType.DB)
if code != 0:
self.error("%s 添加订阅失败:%s" % (media_info.title, msg))
# 订阅已存在
if code == 9:
self.__update_history(media=media_info, state="RSS")
else:
# 插入为已RSS状态
self.__update_history(media=media_info, state="RSS")
else:
# 不需要搜索
if self._auto_rss:
# 加入订阅,使状态为R
self.info("%s %s 更新到%s订阅中..." % (
media.get_name(), media.year, media.type.value))
code, msg, _ = self.subscribe.add_rss_subscribe(mtype=media.type,
name=media.get_name(),
year=media.year,
mediaid=f"DB:{media.douban_id}",
channel=RssType.Auto,
state="R",
in_from=SearchType.DB)
if code != 0:
self.error("%s 添加订阅失败:%s" % (media.get_name(), msg))
# 订阅已存在
if code == 9:
self.__update_history(media=media, state="RSS")
else:
# 插入为已RSS状态
self.__update_history(media=media, state="RSS")
elif not history:
self.info("%s %s 更新到%s列表中..." % (
media.get_name(), media.year, media.type.value))
self.__update_history(media=media, state="NEW")
else:
self.info(f"{media.douban_id} {media.get_name()} {media.year} 已处理过")
except Exception as err:
self.error(f"{media.douban_id} {media.get_name()} {media.year} 处理失败:{str(err)}")
ExceptionUtils.exception_traceback(err)
continue
self.info("豆瓣数据同步完成")
def __update_history(self, media, state):
"""
插入历史记录
"""
value = {
"id": media.douban_id,
"name": media.title or media.get_name(),
"year": media.year,
"type": media.type.value,
"rating": media.vote_average,
"image": media.get_poster_image(),
"state": state,
"add_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
if self.get_history(key=media.douban_id):
self.update_history(key=media.douban_id, value=value)
else:
self.history(key=media.douban_id, value=value)
def __get_all_douban_movies(self):
"""
获取每一个用户的每一个类型的豆瓣标记
:return: 搜索到的媒体信息列表(不含TMDB信息)
"""
self.info(f"同步方式:{'近期动态' if str({self._sync_type}) == '1' else '全量同步'}")
# 返回媒体列表
media_list = []
# 豆瓣ID列表
douban_ids = {}
# 每一个用户
for user in self._users:
if not user:
continue
# 查询用户名称
user_name = ""
userinfo = self.douban.get_user_info(userid=user)
if userinfo:
user_name = userinfo.get("name")
if self._sync_type == '0':
# 每页条数
perpage_number = 15
# 所有类型成功数量
user_succnum = 0
for mtype in self._types:
if not mtype:
continue
self.info(f"开始获取 {user_name or user} 的 {mtype} 数据...")
# 开始序号
start_number = 0
# 类型成功数量
user_type_succnum = 0
# 每一页
while True:
# 页数
page_number = int(start_number / perpage_number + 1)
# 当前页成功数量
sucess_urlnum = 0
# 是否继续下一页
continue_next_page = True
self.debug(f"开始解析第 {page_number} 页数据...")
try:
items = self.douban.get_douban_wish(dtype=mtype, userid=user, start=start_number, wait=True)
if not items:
self.warn(f"第 {page_number} 页未获取到数据")
break
# 解析豆瓣ID
for item in items:
# 时间范围
date = item.get("date")
if not date:
continue_next_page = False
break
else:
mark_date = datetime.strptime(date, '%Y-%m-%d')
if self._days and not (datetime.now() - mark_date).days < int(self._days):
continue_next_page = False
break
doubanid = item.get("id")
if str(doubanid).isdigit():
self.info("解析到媒体:%s" % doubanid)
if doubanid not in douban_ids:
douban_ids[doubanid] = {
"user_name": user_name
}
sucess_urlnum += 1
user_type_succnum += 1
user_succnum += 1
self.debug(
f"{user_name or user} 第 {page_number} 页解析完成,共获取到 {sucess_urlnum} 个媒体")
except Exception as err:
ExceptionUtils.exception_traceback(err)
self.error(f"{user_name or user} 第 {page_number} 页解析出错:%s" % str(err))
break
# 继续下一页
if continue_next_page:
start_number += perpage_number
else:
break
# 当前类型解析结束
self.debug(f"用户 {user_name or user} 的 {mtype} 解析完成,共获取到 {user_type_succnum} 个媒体")
self.info(f"用户 {user_name or user} 解析完成,共获取到 {user_succnum} 个媒体")
else:
all_items = self.douban.get_latest_douban_interests(dtype='all', userid=user, wait=True)
self.debug(f"开始解析 {user_name or user} 的数据...")
self.debug(f"共获取到 {len(all_items)} 条数据")
# 所有类型成功数量
user_succnum = 0
for mtype in self._types:
# 类型成功数量
user_type_succnum = 0
items = list(filter(lambda x: x.get("type") == mtype, all_items))
for item in items:
# 时间范围
date = item.get("date")
if not date:
continue
else:
mark_date = datetime.strptime(date, '%Y-%m-%d')
if self._days and not (datetime.now() - mark_date).days < int(self._days):
continue
doubanid = item.get("id")
if str(doubanid).isdigit():
self.info("解析到媒体:%s" % doubanid)
if doubanid not in douban_ids:
douban_ids[doubanid] = {
"user_name": user_name
}
user_type_succnum += 1
user_succnum += 1
self.debug(f"用户 {user_name or user} 的 {mtype} 解析完成,共获取到 {user_type_succnum} 个媒体")
self.debug(f"用户 {user_name or user} 解析完成,共获取到 {user_succnum} 个媒体")
self.info(f"所有用户解析完成,共获取到 {len(douban_ids)} 个媒体")
# 查询豆瓣详情
for doubanid, info in douban_ids.items():
douban_info = self.douban.get_douban_detail(doubanid=doubanid, wait=True)
# 组装媒体信息
if not douban_info:
self.warn("%s 未正确获取豆瓣详细信息,尝试使用网页获取" % doubanid)
douban_info = self.douban.get_media_detail_from_web(doubanid)
if not douban_info:
self.warn("%s 无权限访问,需要配置豆瓣Cookie" % doubanid)
# 随机休眠
sleep(round(random.uniform(1, 5), 1))
continue
media_type = MediaType.TV if douban_info.get("episodes_count") else MediaType.MOVIE
self.info("%s:%s %s".strip() % (media_type.value, douban_info.get("title"), douban_info.get("year")))
meta_info = MetaInfo(title="%s %s" % (douban_info.get("title"), douban_info.get("year") or ""))
meta_info.douban_id = doubanid
meta_info.type = media_type
meta_info.overview = douban_info.get("intro")
meta_info.poster_path = douban_info.get("cover_url")
rating = douban_info.get("rating", {}) or {}
meta_info.vote_average = rating.get("value") or ""
meta_info.imdb_id = douban_info.get("imdbid")
meta_info.user_name = info.get("user_name")
if meta_info not in media_list:
media_list.append(meta_info)
# 随机休眠
sleep(round(random.uniform(1, 5), 1))
return media_list
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e)) | 33,938 | Python | .py | 702 | 22.7849 | 138 | 0.379621 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,085 | synctimer.py | demigody_nas-tools/app/plugins/modules/synctimer.py | from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.plugins.modules._base import _IPluginModule
from app.sync import Sync
from config import Config
class SyncTimer(_IPluginModule):
# 插件名称
module_name = "定时目录同步"
# 插件描述
module_desc = "定时对同步目录进行整理。"
# 插件图标
module_icon = "synctimer.png"
# 主题色
module_color = "#53BA48"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "synctimer_"
# 加载顺序
module_order = 5
# 可使用的用户级别
user_level = 1
# 私有属性
_sync = None
_scheduler = None
# 限速开关
_cron = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '同步周期',
'required': "required",
'tooltip': '支持5位cron表达式;仅适用于挂载网盘或网络共享等目录同步监控无法正常工作的场景下使用,正常挂载本地目录无法同步的,应优先查看日志解决问题,留空则不启动',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 */2 * *',
}
]
}
]
]
}
]
def init_config(self, config=None):
self._sync = Sync()
# 读取配置
if config:
self._cron = config.get("cron")
# 停止现有任务
self.stop_service()
# 启动定时任务
if self._cron:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
self._scheduler.add_job(func=self.__timersync,
trigger=CronTrigger.from_crontab(self._cron))
self._scheduler.print_jobs()
self._scheduler.start()
self.info(f"目录定时同步服务启动,周期:{self._cron}")
def get_state(self):
return True if self._cron else False
def __timersync(self):
"""
开始同步
"""
self.info("开始定时同步 ...")
self._sync.transfer_sync()
self.info("定时同步完成")
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
print(str(e))
| 3,168 | Python | .py | 92 | 18.108696 | 119 | 0.472494 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,086 | autobackup.py | demigody_nas-tools/app/plugins/modules/autobackup.py | import glob
import os
import time
from datetime import datetime, timedelta
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from threading import Event
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
from config import Config
from web.action import WebAction
class AutoBackup(_IPluginModule):
# 插件名称
module_name = "自动备份"
# 插件描述
module_desc = "自动备份NAStool数据和配置文件。"
# 插件图标
module_icon = "backup.png"
# 主题色
module_color = "bg-green"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
module_config_prefix = "autobackup_"
# 加载顺序
module_order = 22
# 可使用的用户级别
auth_level = 1
# 私有属性
_scheduler = None
# 设置开关
_enabled = False
# 任务执行间隔
_cron = None
_cnt = None
_full = None
_bk_path = None
_onlyonce = False
_notify = False
# 退出事件
_event = Event()
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启定时备份',
'required': "",
'tooltip': '开启后会根据周期定时备份NAStool',
'type': 'switch',
'id': 'enabled',
},
{
'title': '是否完整版备份',
'required': "",
'tooltip': '开启后会备份完整数据库,保留有历史记录',
'type': 'switch',
'id': 'full',
},
{
'title': '运行时通知',
'required': "",
'tooltip': '运行任务后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'notify',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次',
'type': 'switch',
'id': 'onlyonce',
},
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '备份周期',
'required': "",
'tooltip': '设置自动备份时间周期,支持5位cron表达式',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '最大保留备份数',
'required': "",
'tooltip': '最大保留备份数量,优先删除较早备份',
'type': 'text',
'content': [
{
'id': 'cnt',
'placeholder': '10',
}
]
},
{
'title': '自定义备份路径',
'required': "",
'tooltip': '自定义备份路径(默认备份路径/config/backup_file/)',
'type': 'text',
'content': [
{
'id': 'bk_path',
'placeholder': '/config/backup_file',
}
]
} if not SystemUtils.is_docker() else {}
]
]
}
]
def init_config(self, config=None):
# 读取配置
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._cnt = config.get("cnt")
self._full = config.get("full")
self._bk_path = config.get("bk_path")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
# 停止现有任务
self.stop_service()
# 启动服务
if self._enabled or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
# 运行一次
if self._onlyonce:
self.info(f"备份服务启动,立即运行一次")
self._scheduler.add_job(self.__backup, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enabled": self._enabled,
"cron": self._cron,
"cnt": self._cnt,
"full": self._full,
"bk_path": self._bk_path,
"notify": self._notify,
"onlyonce": self._onlyonce,
})
# 周期运行
if self._cron:
self.info(f"定时备份服务启动,周期:{self._cron}")
self._scheduler.add_job(self.__backup,
CronTrigger.from_crontab(self._cron))
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __backup(self):
"""
自动备份、删除备份
"""
self.info(f"当前时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} 开始备份")
# docker用默认路径
if SystemUtils.is_docker():
bk_path = os.path.join(Config().get_config_path(), "backup_file")
else:
# 无自定义路径则用默认
bk_path = self._bk_path or os.path.join(Config().get_config_path(), "backup_file")
# 备份
zip_file = WebAction().backup(bk_path=bk_path,
full_backup=self._full)
if zip_file:
self.info(f"备份完成 备份文件 {zip_file} ")
else:
self.error("创建备份失败")
# 清理备份
bk_cnt = 0
del_cnt = 0
if self._cnt:
# 获取指定路径下所有以"bk"开头的文件,按照创建时间从旧到新排序
files = sorted(glob.glob(bk_path + "/bk**"), key=os.path.getctime)
bk_cnt = len(files)
# 计算需要删除的文件数
del_cnt = bk_cnt - int(self._cnt)
if del_cnt > 0:
self.info(
f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 需要删除备份文件数量 {del_cnt}")
# 遍历并删除最旧的几个备份
for i in range(del_cnt):
os.remove(files[i])
self.debug(f"删除备份文件 {files[i]} 成功")
else:
self.info(
f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 无需删除")
# 发送通知
if self._notify:
next_run_time = self._scheduler.get_jobs()[0].next_run_time.strftime('%Y-%m-%d %H:%M:%S')
self.send_message(title="【自动备份任务完成】",
text=f"创建备份{'成功' if zip_file else '失败'}\n"
f"清理备份数量 {del_cnt}\n"
f"剩余备份数量 {bk_cnt - del_cnt} \n"
f"下次备份时间: {next_run_time}")
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
def get_state(self):
return self._enabled and self._cron
| 9,382 | Python | .py | 233 | 19.004292 | 117 | 0.385846 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,087 | media_library_archive.py | demigody_nas-tools/app/plugins/modules/media_library_archive.py | import os
from app.plugins.modules._base import _IPluginModule
from threading import Event
from app.utils import SystemUtils
from config import Config
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import pytz
from datetime import datetime, timedelta
from app.mediaserver import MediaServer
import functools
from jinja2 import Template
from app.utils import StringUtils
import re
def ResponseBody(func):
"""
rest api 结果包装
"""
def wrapper(*args, **kwargs):
try:
data = func(*args, **kwargs)
return {'code': 0, 'data': data}
except Exception as e:
return {'code': 1, 'msg': str(e)}
return wrapper
class MediaLibraryArchive(_IPluginModule):
# 插件名称
module_name = "媒体库归档"
# 插件描述
module_desc = "定期归档媒体库,留存记录以备查验。"
# 插件图标
module_icon = "'); background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXEAAAFxCAYAAACFh5ikAAAN3UlEQVR4nO3da4wd5XnA8cd7X99v2IC9JrZxCCbBESIhDhAiotI0gUgJipQWWiVpVPWLXZR8aKoKKVGqqvmQNMVfilrFVUWiFKVpJJKSW9M6IRcKdQotBmLjhfUFbIyv2Lvr3fWp5mC3Bu9u7d312fPM/H7S0SIwSO/7jv8e5szMO6NWq9UCgJRaLBtAXiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkJiIAyQm4gCJiThAYiIOkFibxWMq7NjRFw8//NPo69sbzz+/N44cOWZeJ2HBgrmxalVPLF9+adx003Wxbt1VacfCxTWjVqvVzDGTsXXrtvjsZ79sDi+iDRvujjvueG9px8fEORNnUr7xjYfjq1/9R5N4kW3a9EC0trbEBz7wnlKPkwvnmjgT9uMfPyrgDfSVr/x9PPXUjsqMl/Mj4kzI4OBQbN78Tyavwb70pb+LwcGTlRoz4xNxJmTPnn2xb98Bk9dgu3e/FLt2vVSpMTM+EWdC9u7db+KmSRFyOEPEmZCdO3eZuGnS1/diJcfN6EScCSnuYWZ6rFhxmZnnf4k4E3L55UtM3DQpHgCCM0ScCVm2bGksXbrY5DVYEfCeHhHn/4g4E9LZ2R6f+MSHTV6DfeYzH4/Ozo5KjZnxiTgTduutN8QnP3mnCWyQe+75vbjmmisrMVbOn3enMGnenXLxeXcKYxFxpoS3GE4tbzHkfIk4QGKuiQMkJuIAiYk4QGIiDpCYnX0qyt0k1eJul/Jyd0oFua8b952XhzPxirEnJmHPzlJxTbxC7InJ2ezZWQ4iXhH2xGQ09uzMT8Qrwp6YjMaenfmJeEXYE5Ox2LMzNxGvCHtiMhZ7duYm4hVhT0zGYs/O3ES8IuyJyVjs2ZmbiFeEPTEZjT078xPxirAnJqOxZ2d+Il4h9sTkbPbsLAfvTqkg707Bu1PKQ8QrylsMq8VbDMtLxAESc00cIDERB0hMxAESE3GAxOzsM42GhoZjy5bHord3d+zcubv+kqpDh45Wdj4on0WL5seaNVfU74wp3tGyfv266O7ustJTyN0p02TPnv31LbKKe7ahKlav7okNG+6KtWs9ZDRVRHwaPProk3HvvfdVbtxwxsaNd8ftt3vYaCq4Jt5g3/nOvwk4lXfffQ/UT2aYPGfiDbRt2464556/qMx44f+zefOfx7JlXpM8Gc7EG6S/fyA2bfpaJcYK56v4Xqj4gp+JE/EG+cUvnojnnrNFGpyt+GK/uEOLiRPxBrGPIYyuuMWWiRPxBrFRMYyueEaCiRPxBtm+/YVKjBMulBOcyRHxBnnllcOVGCdcKE8pT46IAyQm4gCJiThAYiIOkJhX0Sbwgx/8bdWn4KJ7fPDF+Hb/r+O/hl4u+Ugvnv67vlfWoTU1EafSBmsj8Tev/mf8cKC36lNBUiJOZb186kRsOvp4PDG030FAWiJOJT03fDj+6thj8cLwEQcAqYk4lfOrky/F5448YuEpBRGnUrYM9MWXj/27Rac0RJzKePD40/G1E09ZcEpFxKmELx79Zfx80NvyKB8Rp/T+8OD34sWRVy00pSTilNqdB74Vw7VTFpnS8tg9pfX7B78r4JSeiFNKnz70L3FgpN/iUnoiTul8/sgj8dzwIQtLJYg4pbLp2OOx9eRLFpXKEHFK4x9OPB0/GnjeglIpIk4p/OvAC/H14x7koXpEnPS2DR2I+1/9lYWkkkSc1AZqw/X3gffXhi0klSTipPatE8/GzuHDFpHKEnHS2j58ML554hkLSKWJOGl98/gzMRI1C0iliTgp/WigN355cq/Fo/JEnJT+uX+nhaPyQsTJ6PsDOz1WD6eJOOl8v7/XosFpIk4qzsLh9UScVH7o3SjwOiJOGo+e3Bvbhw5aMDiLiJPGTwb6LBa8gYiTQu/wkXjEbvVwDhEnhZ8OOguH0Yg4KfzHoN16YDQiTtM7fGownh85YqFgFCJO0+v1qlkYk4jT9LwvHMYm4jS9Z4ZesUgwBhGn6bkeDmMTcZpasYfm/pHjFgnGIOI0tV0jxywQjEPEaWp9wy6lwHhEnKbmUgqMT8QBEhNxgMREHCAxEQdITMRparNaOiwQjEPEaWqLWrotEIxDxGlqIg7jE3Ga2iWtMy0QjEPEaWrOxGF8Ik7TW9u+2CLBGEScpne1iMOYRJymt7ptvkWCMYg4TW+ViMOYRJymd1nr7Hhb+yUWCkYh4qRwU2ePhYJRiDgp3Ny53O2GMAoRJ4XiHSo3OxuHc4g4abynqyfaHLLwOn5HkMbqtgXxoZlrLBicRcRJ5UPda+KSFu9TaTafmr2u6lMwbUScVBa0dDkbbzK3dr0p7ui2JtNFxEmnOBt333hzeFfnsvijOddXfRqmlYiT0l2z3hptMxy+0+ljM6+OP5m7vroT0CT8LiClq9sXxe/MvMbiTZMi3r89y/w3AxEnrTtnXhXv6LjMAjZQ8QqE+xe+v34ZheYg4qT2u7PeGktaZ1nEBnhn5+Xx1wvfH5e2zi79WDMRcVK7om1ebJx9vYeALrIPd785/nTuu0s9xqwc+aT3to5L4nPzbraQF8HMGe2xcc718fHZ15ZubGUh4pRCEfI/m3eLxZxCb+9YWv/D8X1dbyrNmMpIxCmNIuT3L/ytaHVYT8rilu74g9lvj8/Puzmual+YeCTV4GinVC5tnRWbF30wrm1fYmEn4IPdV8YXF9xa/0kObdaJspnX0hn3zr8x/vLoY/Hzwd3W9zwUl06KLy+Ln+Qi4pRSR7TGH899V3y3f0c8eOKZOHxqwEKPorh08pGZVznzTkzEKbUiTuval8aD/U/HloE+i32WYm6KgC+2Y1JqIk7pLW+bE5+e8856zL994tnoGzla6UW/sXN53Na10qWTkhBxKuN9XVfEjZ3L4qH+HfFQ//Y4cmqwMmNf0Ta3Hu/i09M695x/Tl4iTqV0zWiLj858S32rt4dOvBbzMjsT7uJDOYk4lbS0ZVZ9N5rf7F4ZPxvcXf/0DZfjMktx1v3ujmVxU1ePs+4KEHEqrYjcx2aurX/OxPxnSW9LdNZdTSIOp50J4L5Tx+PJk/vjyaGX44mT+5ry2nlxWWht++K4sm1B/bOmfWEsbOk659dRfiIOb1BcavmNrpX1z0BtuB70/x46ENuHD8azQ6/ESNTO+XcutuIBpmvaF8fVp8NdRLvdA9eVFyIO4yvOeIv3aBefQhH1Z4cOxq+HX/u8PHI8Dp8arJ+tn5qCuHfOaIu5MzpifktX/dbIItxvaVsUPW2ubTM6EYcLUER9XceS+ueNjtVOxuGRgdeiXivCPnD652AcGhmIwRipn1EXgS5C/fq/7or5LZ31/z5cCEcMTJE5MzpiTltH9JhQGshFNYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDE2ixe87vttk9VfQqAMTgTB0hMxAESE3GAxEQcIDERb5BFi+ZXYpxwoRYsmGvOJkHEG2TNmisqMU64UKtW9ZizSRDxBnGgwuhWrVo+6t/n/Ih4g6xYcVklxgkXauVKEZ8MEW+Q9evXxerVzsbhbNddtzZuueUd5mQSRLxBuru7YsOGuyoxVjhfGzbcHe3tHhyfDBFvoLVrr4yNG++uzHhhPF/4wsZYtmzJOL+C8yHiDXb77e+tH7xQZcXJzA03XOsYmAIzarVaLf0oEtqzZ39s2vRAbN26repTQYUU3wsVlxWL/ytlaoj4NBoaGo4tWx6L3t7dsXNn8dkVhw4drex8UD7FQ27FMxLFLbbFHVrFF/zF90NMHREHSMw1cYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYDERBwgMREHSEzEARITcYCsIuJ/ABNC/3gAaOENAAAAAElFTkSuQmCC"
# 主题色
module_color = "#1592a6"
# 插件版本
module_version = "1.1"
# 插件作者
module_author = "hotlcc"
# 作者主页
author_url = "https://gitee.com/hotlcc"
# 插件配置项ID前缀
module_config_prefix = "com.hotlcc.media-library-archive."
# 加载顺序
module_order = 22
# 可使用的用户级别
user_level = 2
# 私有属性
__timezone = None
# 调度器
__scheduler = None
# 退出事件
__exit_event = Event()
# 任务运行中状态
__running_state = Event()
# 配置对象
__config_obj = None
@classmethod
def get_fields(cls):
default_archive_path = cls.__get_default_archive_path()
fields = [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'id': 'enable',
'type': 'switch',
'title': '启动插件',
'tooltip': '插件总开关',
},
{
'id': 'enable_notify',
'type': 'switch',
'title': '运行时通知',
'tooltip': '运行任务后会发送通知(需要打开插件消息通知)',
},
{
'id': 'run_once',
'type': 'switch',
'title': '立即运行一次',
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照周期运行(同时上次触发运行的任务如果在运行中也会停止)',
}
]
]
},
{
'type': 'div',
'content': [
[
{
'type': 'text',
'title': '归档周期',
'required': 'required',
'tooltip': '设置自动归档执行周期,支持5位cron表达式;应避免任务执行过于频繁',
'content': [
{
'id': 'cron',
'placeholder': '0 0 * * *',
}
]
},
{
'type': 'text',
'title': '最大保留归档数',
'tooltip': '最大保留归档数量,优先删除较早归档(缺省时为10)。',
'content': [
{
'id': 'max_count',
'placeholder': '10',
}
]
},
{
'title': '自定义归档路径',
'tooltip': f'自定义归档路径(缺省时为:{default_archive_path})',
'type': 'text',
'content': [
{
'id': 'archive_path',
'placeholder': default_archive_path,
}
]
} if not SystemUtils.is_docker() else {}
]
]
}
]
return fields
@classmethod
def __get_default_archive_path(cls):
"""
获取默认归档路径
"""
return os.path.join(Config().get_config_path(), 'archive_file', 'media_library')
def __init_scheduler(self, timezone = None):
"""
初始化调度器
"""
if (self.__scheduler):
return
if (not timezone):
timezone = Config().get_timezone()
self.__scheduler = BackgroundScheduler(timezone = timezone)
self.debug(f"服务调度器初始化完成")
def __check_config(self, config):
"""
检查配置
"""
if (not config):
return True
max_count = config.get('max_count')
if (max_count):
try:
int(max_count)
except Exception as e:
self.error('最大保留归档数必须为数字')
return False
return True
def init_config(self, config = None):
self.debug(f"初始化配置")
self.__timezone = Config().get_timezone()
self.__config_obj = config or {}
# 停止现有任务
self.stop_service()
self.debug(f"停止现有服务成功")
# 检查配置
check_result = self.__check_config(config)
if (not check_result):
self.error('插件配置有误')
return
# 启动插件服务
if (self.get_state()):
self.__init_scheduler(self.__timezone)
cron = self.__config_obj.get('cron')
self.__scheduler.add_job(self.__do_task, CronTrigger.from_crontab(cron))
self.info(f"定时任务已启动,周期: cron = {cron}")
else:
self.warn(f"插件配置无效,服务未启动")
# 如果需要立即运行一次
if (self.__config_obj.get('run_once')):
self.__init_scheduler(self.__timezone)
self.__scheduler.add_job(self.__do_task, 'date', run_date = datetime.now(tz = pytz.timezone(self.__timezone)) + timedelta(seconds = 3))
self.info(f"立即运行一次成功")
# 关闭一次性开关
self.__config_obj['run_once'] = False
self.update_config(self.__config_obj)
# 启动服务调度器
if (self.__scheduler):
self.__scheduler.print_jobs()
self.__scheduler.start()
self.debug(f"服务调度器初启动成功")
def get_state(self):
"""
插件生效状态
"""
state = True if self.__config_obj \
and self.__config_obj.get('enable') \
and self.__config_obj.get('cron') else False
self.debug(f"插件状态: {state}")
return state
def stop_service(self):
"""
退出插件
"""
try:
if self.__scheduler:
self.__scheduler.remove_all_jobs()
if self.__scheduler.running:
self.__exit_event.set()
self.__scheduler.shutdown()
self.__exit_event.clear()
self.__scheduler = None
self.debug(f"插件服务停止成功")
except Exception as e:
self.error(f"插件服务停止异常: {str(e)}")
def __do_task(self):
"""
执行任务
"""
if (self.__running_state.is_set()):
self.debug('已有进行中的任务,本次不执行')
return
try:
self.info('执行任务开始')
self.__running_state.set()
archive_file = self.__do_archive()
if (not archive_file):
return
clear_count = self.__do_clear()
self.__send_notify(archive_file, clear_count)
finally:
self.__running_state.clear()
self.info('执行任务结束')
def __do_archive(self):
"""
执行归档
:return 归档文件路径
"""
media_server = MediaServer()
if (not media_server or not media_server.server):
self.warn('媒体服务器不存在,请配置')
return None
media_trees = self.__build_media_trees(media_server)
if (not media_trees):
self.warn('媒体服务器中不存在媒体库,无需归档')
return None
self.info('从媒体服务器获取数据完成')
archive_file = self.__save_archive_file(media_trees)
self.info(f'归档文件生成完成: {archive_file}')
return archive_file
def __build_media_trees(self, media_server: MediaServer):
"""
构造媒体树
"""
if (not media_server or not media_server.server):
return None
libraries = media_server.get_libraries()
if (not libraries):
return None
media_trees = []
for library in libraries:
if (not library):
continue
id = library.get('id')
name = library.get('name')
if (not id or not name):
continue
media_trees.append({
'name': name,
'children': self.__get_media_items(parent = id, media_server = media_server)
})
return media_trees
def __cmp_obj(self, obj1, obj2):
"""
比较对象
"""
if (not obj1):
return 1
if (not obj2):
return -1
return 0
def __get_media_items(self, parent, media_server: MediaServer):
"""
级联获取媒体库中全部items
"""
if (not parent or not media_server):
return None
items = media_server.get_items(parent)
items = list(items)
# 按照年份排序
def cmp_item(item1, item2):
cmp = self.__cmp_obj(item1, item2)
if (cmp == 0):
year1 = item1.get('year')
year2 = item2.get('year')
cmp = self.__cmp_obj(year1, year2)
if (cmp == 0 and year1 != year2):
cmp = 1 if year1 > year2 else -1
if (cmp == 0):
title1 = item1.get('title')
title2 = item2.get('title')
cmp = self.__cmp_obj(title1, title2)
if (cmp == 0 and title1 != title2):
cmp = 1 if title1 > title2 else -1
return cmp
items.sort(key = functools.cmp_to_key(cmp_item))
media_items = []
for item in items:
if (not item):
continue
id = item.get('id')
name = self.__build_item_name(item)
if (not id or not name):
continue
"""
media_items.append({
'name': name,
'children': self.__get_media_items(parent = id, media_server = media_server)
})
"""
media_items.append({
'name': name
})
return media_items
def __build_item_name(self, item = None):
"""
构造item名称
"""
if (not item):
return None
title = item.get('title')
if (not title):
return None
name = title
year = item.get('year')
if (year):
name += f' ({year})'
tmdbid = item.get('tmdbid')
if (tmdbid):
name += f' [TMDB:{tmdbid}]'
imdbid = item.get('imdbid')
if (imdbid):
name += f' [IMDB:{imdbid}]'
return name
def __get_archive_path(self):
"""
获取归档目录
"""
if (SystemUtils.is_docker()):
return self.__get_default_archive_path()
else:
archive_path = self.__config_obj.get('archive_path')
if (archive_path):
return archive_path
else:
return self.__get_default_archive_path()
def __get_or_create_archive_path(self):
"""
获取归档目录,不存在时创建
"""
archive_path = self.__get_archive_path()
if (not os.path.exists(archive_path)):
os.makedirs(archive_path, exist_ok=True)
return archive_path
def __save_archive_file(self, media_trees = None):
"""
保存归档文件
:return archive_file
"""
markdown_content = self.__build_markdown_content(media_trees)
if (not markdown_content):
return None
archive_path = self.__get_or_create_archive_path()
datetime_str = datetime.now().strftime('%Y%m%d%H%M%S')
file_name = f"归档_{datetime_str}.md"
file_path = os.path.join(archive_path, file_name)
with open(file_path, 'w', encoding = 'utf8') as file:
file.write(markdown_content)
return file_path
def __build_markdown_content(self, media_trees, prefix = None):
"""
构建md内容
"""
if (not media_trees):
return None
if (not prefix):
prefix = '- '
content = ''
for tree in media_trees:
if (not tree):
continue
name = tree.get('name')
if (not name):
continue
content += prefix + name + '\n'
children = tree.get('children')
if (not children):
continue
content += self.__build_markdown_content(children, ' ' + prefix)
return content
def __send_notify(self, archive_file, clear_count):
"""
发送通知
"""
if (self.__config_obj.get('enable_notify')):
text = f'归档文件: {archive_file}\n' \
+ f'清理数量: {clear_count}'
self.send_message(
title = f"{self.module_name}任务执行完成",
text = text
)
def get_page(self):
"""
归档记录页面
"""
archive_files = self.__get_archive_files() or []
template = """
<div class="modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th>归档名称</th>
<th>归档大小</th>
<th>归档时间</th>
<th>操作</th>
</tr>
</thead>
<tbody>
{% for archive_file in archive_files %}
<tr title="{{ archive_file.path }}">
<td>{{ archive_file.name }}</td>
<td>{{ archive_file.size }}</td>
<td>{{ archive_file.createTime }}</td>
<td>
<a data-name="{{ archive_file.name }}" href="javascript:void(0);" onclick="MediaLibraryArchive.remove(this)">删除</a>
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
"""
return '归档记录', Template(template).render(archive_files = archive_files), "MediaLibraryArchive.goBack()"
@staticmethod
def get_script():
"""
页面JS脚本
"""
return """
(function() {
var MediaLibraryArchive = {
id: "MediaLibraryArchive"
}
window.MediaLibraryArchive = MediaLibraryArchive;
var goBack = function() {
$("#modal-plugin-page").modal('hide');
$("#modal-plugin-" + MediaLibraryArchive.id).modal('show');
};
MediaLibraryArchive.goBack = goBack;
var remove = function(elem) {
var $elem = $(elem),
file_name = $elem.attr("data-name"),
$tr = $elem.parent().parent();
ajax_post("run_plugin_method", {"plugin_id": MediaLibraryArchive.id, 'method': 'remove_archive_file_for_api', "file_name": file_name}, function (ret) {
if (ret.result.code === 0) {
$tr.remove();
show_success_modal("归档删除成功!");
} else {
show_fail_modal(ret.result.msg);
}
});
};
MediaLibraryArchive.remove = remove;
})();
"""
def __get_archive_files(self):
"""
获取归档的文件信息
"""
archive_path = self.__get_or_create_archive_path()
file_names = os.listdir(archive_path)
if (not file_names):
return None
archive_files = []
for file_name in file_names:
if (not file_name):
continue
file_path = os.path.join(archive_path, file_name)
if (os.path.exists(file_path) and os.path.isfile(file_path) and re.fullmatch('归档_\\d{14}\.md', file_name)):
size = os.path.getsize(file_path)
datetime_str = file_name.replace('归档_', '').replace('.md', '')
archive_files.append({
'name': file_name,
'path': file_path,
'size': StringUtils.str_filesize(size),
'cmp_value': int(datetime_str),
'createTime': datetime.strftime(datetime.strptime(datetime_str,'%Y%m%d%H%M%S'), '%Y-%m-%d %H:%M:%S')
})
archive_files.sort(key = lambda archive_file: archive_file.get('cmp_value'), reverse = True)
return archive_files
def __remove_archive_file(self, file_name):
"""
移除归档文件
:param file_name: 归档文件名
"""
if (not file_name):
return False
archive_path = self.__get_or_create_archive_path()
file_path = os.path.join(archive_path, file_name)
if (os.path.exists(file_path) and os.path.isfile(file_path) and re.fullmatch('归档_\\d{14}\.md', file_name)):
os.remove(file_path)
self.info(f'归档文件[{file_name}]移除成功')
return True
else:
self.warn(f'归档文件[{file_name}]不存在,无需移除')
return False
@ResponseBody
def remove_archive_file_for_api(self, file_name):
"""
提供给接口 移除归档文件
"""
return self.__remove_archive_file(file_name)
def __get_max_count(self):
"""
获取最大归档限制数量
"""
max_count = self.__config_obj.get('max_count')
if (not max_count):
return 10
if (type(max_count) != int):
max_count = int(max_count)
return max_count
def __do_clear(self):
"""
清理超出数量的归档文件
"""
max_count = self.__get_max_count()
archive_files = self.__get_archive_files()
if (not archive_files):
return 0
archive_path = self.__get_or_create_archive_path()
index = 0
clear_count = 0
for archive_file in archive_files:
index += 1
if (index <= max_count):
continue
file_path = os.path.join(archive_path, archive_file.get('name'))
try:
os.remove(file_path)
clear_count += 1
except Exception as e:
self.error(f'清理超出数量的归档文件发生异常: {str(e)}')
self.info(f'清理超出数量的归档记录完成,清理数量:{clear_count}')
return clear_count
| 25,692 | Python | .py | 557 | 30.039497 | 4,877 | 0.564892 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,088 | cookiecloud.py | demigody_nas-tools/app/plugins/modules/cookiecloud.py | from collections import defaultdict
from datetime import datetime, timedelta
from threading import Event
from datetime import datetime
from jinja2 import Template
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.plugins.modules._base import _IPluginModule
from app.sites import Sites
from app.utils import RequestUtils, StringUtils
from config import Config
from web.backend.pro_user import ProUser
from app.indexer.indexerConf import IndexerConf
import re
class CookieCloudRunResult:
def __init__(self, date=None, flag=False, msg=""):
self.date = date
self.flag = flag
self.msg = msg
def __str__(self):
return f"CookieCloudRunResult(date={self.date}, flag={self.flag}, msg={self.msg})"
class CookieCloud(_IPluginModule):
# 插件名称
module_name = "CookieCloud同步"
# 插件描述
module_desc = "从CookieCloud云端同步数据,自动新增站点或更新已有站点Cookie。"
# 插件图标
module_icon = "cloud.png"
# 主题色
module_color = "#77B3D4"
# 插件版本
module_version = "1.2"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "cookiecloud_"
# 加载顺序
module_order = 21
# 可使用的用户级别
auth_level = 2
# 私有属性
sites = None
_scheduler = None
# 当前用户
_user = None
# 上次运行结果属性
_last_run_results_list = None
# 设置开关
_req = None
_server = None
_key = None
_password = None
_enabled = False
# 任务执行间隔
_cron = None
_onlyonce = False
# 通知
_notify = False
# 退出事件
_event = Event()
# 需要忽略的Cookie
_ignore_cookies = ['CookieAutoDeleteBrowsingDataCleanup']
# 黑白名单
_synchronousMode = 'all_mode'
_black_list = None
_white_list = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '服务器地址',
'required': "required",
'tooltip': '参考https://github.com/easychen/CookieCloud搭建私有CookieCloud服务器;也可使用默认的公共服务器,公共服务器不会存储任何非加密用户数据,也不会存储用户KEY、端对端加密密码,但要注意千万不要对外泄露加密信息,否则Cookie数据也会被泄露!',
'type': 'text',
'content': [
{
'id': 'server',
'placeholder': 'http://127.0.0.1/cookiecloud'
}
]
},
{
'title': '执行周期',
'required': "",
'tooltip': '设置自动同步时间周期,支持5位cron表达式',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '同步模式',
'required': "",
'tooltip': '选择Cookie同步模式',
'type': 'select',
'content': [
{
'id': 'synchronousMode',
'options': {
'all_mode':'全部',
'black_mode': '黑名单',
'white_mode': '白名单'
},
'default': 'all_mode'
}
]
},
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '用户KEY',
'required': 'required',
'tooltip': '浏览器CookieCloud插件中获取,使用公共服务器时注意不要泄露该信息',
'type': 'text',
'content': [
{
'id': 'key',
'placeholder': '',
}
]
},
{
'title': '端对端加密密码',
'required': "",
'tooltip': '浏览器CookieCloud插件中获取,使用公共服务器时注意不要泄露该信息',
'type': 'text',
'content': [
{
'id': 'password',
'placeholder': ''
}
]
}
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '运行时通知',
'required': "",
'tooltip': '运行任务后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'notify',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照定时周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
},
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '黑名单列表',
'required': "",
'tooltip': '黑名单列表(需开启黑名单模式,以","或换行分隔)',
'type': 'textarea',
'content':
{
'id': 'black_list',
'placeholder': '',
'rows': 5
}
},
{
'title': '白名单列表',
'required': "",
'tooltip': '白名单列表(需开启白名单模式,以","或换行分隔)',
'type': 'textarea',
'content':
{
'id': 'white_list',
'placeholder': '',
'rows': 5
}
}
]
]
}
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
if not isinstance(self._last_run_results_list, list) or len(self._last_run_results_list) <= 0:
self.info("未获取到上次运行结果")
return None, None, None
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
{% if ResultsCount > 0 %}
<tr>
<th>运行开始时间</th>
<th>运行消息</th>
<th>是否连通</th>
<th></th>
</tr>
{% endif %}
</thead>
<tbody>
{% if ResultsCount > 0 %}
{% for Item in Results %}
<tr id="indexer_{{ Item.id }}">
<td>{{ Item.date }}</td>
<td>{{ Item.msg }}</td>
<td>{{ Item.flag }}</td>
</tr>
{% endfor %}
{% endif %}
</tbody>
</table>
</div>
"""
return "同步记录", Template(template).render(ResultsCount=len(self._last_run_results_list), Results=self._last_run_results_list), None
def init_config(self, config=None):
self.sites = Sites()
self._last_run_results_list = []
self._user = ProUser()
# 读取配置
if config:
self._server = config.get("server")
self._cron = config.get("cron")
self._key = config.get("key")
self._password = config.get("password")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
self._synchronousMode = config.get("synchronousMode", "all_mode") or "all_mode"
self._black_list = config.get("black_list", "") or ""
self._white_list = config.get("white_list", "") or ""
self._req = RequestUtils(content_type="application/json")
if self._server:
if not self._server.startswith("http"):
self._server = "http://%s" % self._server
if self._server.endswith("/"):
self._server = self._server[:-1]
# 测试
_, msg, flag = self.__download_data()
_last_run_date = self.__get_current_date_str()
_last_run_msg = msg if StringUtils.is_string_and_not_empty(msg) else "测试连通性成功"
_result = CookieCloudRunResult(date=_last_run_date, flag=flag, msg=_last_run_msg)
self._last_run_results_list.append(_result)
if flag:
self._enabled = True
else:
self._enabled = False
self.info(msg)
# 停止现有任务
self.stop_service()
# 启动服务
if self._enabled:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
# 运行一次
if self._onlyonce:
self.info(f"同步服务启动,立即运行一次")
self._scheduler.add_job(self.__cookie_sync, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"server": self._server,
"cron": self._cron,
"key": self._key,
"password": self._password,
"notify": self._notify,
"onlyonce": self._onlyonce,
"synchronousMode": self._synchronousMode,
"black_list": self._black_list,
"white_list": self._white_list,
})
# 周期运行
if self._cron:
self.info(f"同步服务启动,周期:{self._cron}")
self._scheduler.add_job(self.__cookie_sync,
CronTrigger.from_crontab(self._cron))
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return self._enabled and self._cron
def __get_current_date_str(self):
"""
获取当前日期字符串,格式为:2023-08-03 19:00:00
"""
# 获取当前时间并添加 1 秒
new_time = datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(seconds=1)
# 将时间格式化为指定格式
return new_time.strftime('%Y-%m-%d %H:%M:%S')
def __download_data(self) -> [dict, str, bool]:
"""
从CookieCloud下载数据
"""
if not self._server or not self._key or not self._password:
return {}, "CookieCloud参数不正确", False
req_url = "%s/get/%s" % (self._server, self._key)
ret = self._req.post_res(url=req_url, json={"password": self._password})
if ret and ret.status_code == 200:
result = ret.json()
if not result:
return {}, "", True
if result.get("cookie_data"):
return result.get("cookie_data"), "", True
return result, "", True
elif ret:
return {}, "同步CookieCloud失败,错误码:%s" % ret.status_code, False
else:
return {}, "CookieCloud请求失败,请检查服务器地址、用户KEY及加密密码是否正确", False
def __cookie_sync(self):
"""
同步站点Cookie
"""
# 同步数据
self.info(f"同步服务开始 ...")
# 最多显示50条同步数据
if len(self._last_run_results_list) > 50:
self._last_run_results_list = []
_last_run_date = self.__get_current_date_str()
contents, msg, flag = self.__download_data()
if not flag:
self.error(msg)
self.__send_message(msg)
_result = CookieCloudRunResult(date=_last_run_date, flag=flag, msg=msg)
self._last_run_results_list.append(_result)
return
if not contents:
self.info(f"未从CookieCloud获取到数据")
self.__send_message(msg)
_result = CookieCloudRunResult(date=_last_run_date, flag=flag, msg=msg)
self._last_run_results_list.append(_result)
return
# 整理数据,使用domain域名的最后两级作为分组依据
domain_groups = defaultdict(list)
domain_black_list = [".".join(re.search(r"(https?://)?(?P<domain>[a-zA-Z0-9.-]+)", _url).group("domain").split(".")[-2:]) \
for _url in re.split(",|\n|,|\t| ", self._black_list) if _url != "" and re.search(r"(https?://)?(?P<domain>[a-zA-Z0-9.-]+)", _url)]
domain_white_list = [".".join(re.search(r"(https?://)?(?P<domain>[a-zA-Z0-9.-]+)", _url).group("domain").split(".")[-2:]) \
for _url in re.split(",|\n|,|\t| ", self._white_list) if _url != "" and re.search(r"(https?://)?(?P<domain>[a-zA-Z0-9.-]+)", _url)]
for site, cookies in contents.items():
for cookie in cookies:
domain_parts = cookie["domain"].split(".")[-2:]
if self._synchronousMode and self._synchronousMode == "black_mode" and ".".join(domain_parts) in domain_black_list:
continue
elif self._synchronousMode and self._synchronousMode == "white_mode" and ".".join(domain_parts) not in domain_white_list:
continue
domain_key = tuple(domain_parts)
domain_groups[domain_key].append(cookie)
# 计数
update_count = 0
add_count = 0
# 索引器
for domain, content_list in domain_groups.items():
if self._event.is_set():
self.info(f"同步服务停止")
_result = CookieCloudRunResult(date=_last_run_date, flag=flag, msg=msg)
self._last_run_results_list.append(_result)
return
if not content_list:
continue
# 域名
domain_url = ".".join(domain)
# 只有cf的cookie过滤掉
cloudflare_cookie = True
for content in content_list:
if content["name"] != "cf_clearance":
cloudflare_cookie = False
break
if cloudflare_cookie:
continue
# Cookie
cookie_str = ";".join(
[f"{content.get('name')}={content.get('value')}"
for content in content_list
if content.get("name") and content.get("name") not in self._ignore_cookies]
)
# 查询站点
site_info = self.sites.get_sites_by_suffix(domain_url)
if site_info:
# 检查站点连通性
success, _, _ = self.sites.test_connection(site_id=site_info.get("id"))
if not success:
# 已存在且连通失败的站点更新Cookie
self.sites.update_site_cookie(siteid=site_info.get("id"), cookie=cookie_str)
update_count += 1
else:
# 查询是否在索引器范围
indexer_conf = self._user.get_indexer(url=domain_url)
indexer_info = None
if isinstance(indexer_conf, IndexerConf):
indexer_info = indexer_conf.to_dict()
if indexer_info:
# 支持则新增站点
site_pri = self.sites.get_max_site_pri() + 1
self.sites.add_site(
name=indexer_info.get("name"),
site_pri=site_pri,
signurl=indexer_info.get("domain"),
cookie=cookie_str,
rss_uses='T'
)
add_count += 1
# 发送消息
if update_count or add_count:
msg = f"更新了 {update_count} 个站点的Cookie数据,新增了 {add_count} 个站点"
else:
msg = f"同步完成,但未更新任何站点数据!"
self.info(msg)
_result = CookieCloudRunResult(date=_last_run_date, flag=flag, msg=msg)
self._last_run_results_list.append(_result)
# 发送消息
if self._notify:
self.__send_message(msg)
def __send_message(self, msg):
"""
发送通知
"""
self.send_message(
title="【CookieCloud同步任务执行完成】",
text=f"{msg}"
)
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
| 19,754 | Python | .py | 464 | 22.390086 | 186 | 0.424386 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,089 | customreleasegroups.py | demigody_nas-tools/app/plugins/modules/customreleasegroups.py | from app.media.meta.release_groups import ReleaseGroupsMatcher
from app.plugins.modules._base import _IPluginModule
class CustomReleaseGroups(_IPluginModule):
# 插件名称
module_name = "自定义制作组/字幕组"
# 插件描述
module_desc = "添加无法识别的制作组/字幕组,自定义多个组间分隔符"
# 插件图标
module_icon = "teamwork.png"
# 主题色
module_color = "#00ADEF"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "Shurelol"
# 作者主页
author_url = "https://github.com/Shurelol"
# 插件配置项ID前缀
module_config_prefix = "customreleasegroups_"
# 加载顺序
module_order = 6
# 可使用的用户级别
auth_level = 1
# 私有属性
_custom_release_groups = None
_custom_separator = None
_release_groups_matcher = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '',
'required': '',
'tooltip': '',
'type': 'textarea',
'content':
{
'id': 'release_groups',
'placeholder': '多个制作组/字幕组请用;或换行分隔,支持正则表达式,特殊字符注意转义',
'rows': 5
}
},
],
[
{
'title': '自定义分隔符',
'required': "",
'tooltip': '当匹配到多个制作组/字幕组时,使用此分隔符进行分隔,留空使用@;如名称中识别出A组和B组,分隔符为@,则结果为A@B',
'type': 'text',
'content': [
{
'id': 'separator',
'placeholder': '请不要使用文件名中禁止使用的符号!',
}
]
},
]
]
}
]
def init_config(self, config=None):
self._release_groups_matcher = ReleaseGroupsMatcher()
# 读取配置
if config:
custom_release_groups = config.get('release_groups')
custom_separator = config.get('separator')
if custom_release_groups:
if custom_release_groups.startswith(';'):
custom_release_groups = custom_release_groups[1:]
if custom_release_groups.endswith(';'):
custom_release_groups = custom_release_groups[:-1]
custom_release_groups = custom_release_groups.replace(";", "|").replace("\n", "|")
if custom_release_groups or custom_separator:
if custom_release_groups:
self.info("自定义制作组/字幕组已加载")
if custom_separator:
self.info(f"自定义分隔符 {custom_separator} 已加载")
self._release_groups_matcher.update_custom(custom_release_groups, custom_separator)
self._custom_release_groups = custom_release_groups
self._custom_separator = custom_separator
def get_state(self):
return True if self._custom_release_groups or self._custom_separator else False
def stop_service(self):
"""
退出插件
"""
pass
| 3,882 | Python | .py | 93 | 21.344086 | 100 | 0.449445 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,090 | diskspacesaver.py | demigody_nas-tools/app/plugins/modules/diskspacesaver.py | import copy
import datetime
import hashlib
import json
import os
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
class DiskSpaceSaver(_IPluginModule):
# 插件名称
module_name = "磁盘空间释放"
# 插件描述
module_desc = "计算文件SHA1,同磁盘下相同SHA1的文件只保留一个,其他的用硬链接替换。"
# 插件图标
module_icon = "diskusage.jpg"
# 主题色
module_color = "#FE9003"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "link2fun"
# 作者主页
author_url = "https://github.com/link2fun"
# 插件配置项ID前缀
module_config_prefix = "diskspace_saver_"
# 加载顺序
module_order = 13
# 可使用的用户级别
auth_level = 1
# 私有属性
_path = ''
_size = 100
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 文件后缀
[
{
'title': '文件后缀',
'required': "required",
'tooltip': '只识别这些后缀的文件,多个后缀用英文逗号隔开,如:.mkv,.mp4',
'type': 'text',
'content':
[{
'id': 'ext_list',
'placeholder': '文件后缀, 多个后缀用英文逗号隔开'
}]
},
{
'title': '文件大小(MB)',
'required': "required",
'tooltip': '单位 MB, 大于该大小的文件才会进行SHA1计算',
'type': 'text',
'content':
[{
'id': 'file_size',
'placeholder': '文件大小, 单位MB'
}]
}
],
[
{
'title': '磁盘目录(目录下的文件应均属于同一个分区)',
'required': '',
'tooltip': '要进行SHA1计算的文件路径,每行一个路径,请确保路径正确 且路径下均属于同一个磁盘分区',
'type': 'textarea',
'content':
{
'id': 'path_list',
'placeholder': '每行一个路径',
'rows': 5
}
}
],
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '目前不支持定时, 只有勾选了才会运行一次',
'type': 'switch',
'id': 'run_now',
},
{
'title': '仅查重',
'required': "",
'tooltip': '仅查重,不进行删除和硬链接替换',
'type': 'switch',
'id': 'dry_run',
},
{
'title': '快速模式',
'required': "",
'tooltip': '快速模式,不计算文件整体SHA1,只计算文件头部/中间/尾部的SHA1,速度快,但有可能会误判,请谨慎使用',
'type': 'switch',
'id': 'fast',
}
]
]
}
]
def init_config(self, config=None):
# 如果没有配置信息, 则不处理
if not config:
return
# config.get('path_list') 用 \n 分割为 list 并去除重复值和空值
path_list = list(set(config.get('path_list').split('\n')))
# file_size 转成数字
file_size = config.get('file_size')
# config.get('ext_list') 用 , 分割为 list 并去除重复值
ext_list = list(set(config.get('ext_list').split(',')))
result_path = os.path.join(self.get_data_path(), "sha1.json")
# 兼容旧配置
old_result_path = config.get("result_path")
if old_result_path:
del config["result_path"]
if os.path.exists(old_result_path) and not os.path.exists(result_path):
SystemUtils.move(old_result_path, result_path)
dry_run = config.get('dry_run', False)
fast = config.get('fast', False)
run_now = config.get('run_now')
if not run_now:
return
config['run_now'] = False
self.update_config(config)
# 如果没有配置信息, 则不处理
if not path_list or not file_size or not ext_list or not result_path:
self.info(f"磁盘空间释放配置信息不完整,不进行处理")
return
self.info(f"磁盘空间释放配置信息:{config}")
# 依次处理每个目录
for path in path_list:
self.info(f"开始处理目录:{path}")
# 如果目录不存在, 则不处理
if not os.path.exists(path):
self.info(f"目录不存在,不进行处理")
continue
# 如果目录不是文件夹, 则不处理
if not os.path.isdir(path):
self.info(f"目录不是文件夹,不进行处理")
continue
# 如果目录不是绝对路径, 则不处理
if not os.path.isabs(path):
self.info(f"目录不是绝对路径,不进行处理")
continue
_last_result = self.load_last_result(result_path)
self.info(f"加载上次处理结果,共有 {len(_last_result['file_info'])} 个文件。")
_duplicates = self.find_duplicates(path, ext_list, int(file_size), _last_result, fast)
self.info(f"找到 {len(_duplicates)} 个重复文件。")
self.process_duplicates(_duplicates, dry_run)
self.info(f"处理完毕。")
self.save_last_result(result_path, _last_result)
self.info(f"保存处理结果。")
def get_state(self):
return False
def stop_service(self):
"""
退出插件
"""
pass
@staticmethod
def get_sha1(file_path, buffer_size=1024 * 1024, fast=False):
"""
计算文件的 SHA1 值, fast 为 True 时读取文件前中后buffer_size大小的数据计算SHA1值
"""
h = hashlib.sha1()
buffer = bytearray(buffer_size)
# using a memoryview so that we can slice the buffer without copying it
buffer_view = memoryview(buffer)
with open(file_path, 'rb', buffering=0) as f:
if fast:
# 获取文件大小
file_size = os.path.getsize(file_path)
# 读取文件前buffer_size大小的数据计算SHA1值
n = f.readinto(buffer)
h.update(buffer_view[:n])
# 读取文件中间buffer_size大小的数据计算SHA1值
if file_size > buffer_size * 2:
f.seek(file_size // 2)
n = f.readinto(buffer)
h.update(buffer_view[:n])
# 读取文件后buffer_size大小的数据计算SHA1值
f.seek(-buffer_size, os.SEEK_END)
n = f.readinto(buffer)
h.update(buffer_view[:n])
else:
# 读取文件所有数据计算SHA1值
for n in iter(lambda: f.readinto(buffer), 0):
h.update(buffer_view[:n])
return h.hexdigest()
def find_duplicates(self, folder_path, _ext_list, _file_size, last_result, fast=False):
"""
查找重复的文件,返回字典,key 为文件的 SHA1 值,value 为文件路径的列表
"""
duplicates = {}
file_group_by_size = {}
# 先进行依次过滤
for dirpath, dirnames, filenames in os.walk(folder_path):
for filename in filenames:
file_path = os.path.join(dirpath, filename)
file_ext = os.path.splitext(file_path)[1]
file_stat = os.stat(file_path)
file_size = file_stat.st_size
file_ino = f"{file_stat.st_dev}_{file_stat.st_ino}"
if file_ext.lower() not in _ext_list:
continue
if file_size < _file_size * 1024 * 1024:
continue
file_mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
if file_group_by_size.get(file_size) is None:
file_group_by_size[file_size] = []
file_group_by_size[file_size].append(
{'filePath': file_path, 'fileExt': file_ext, 'fileSize': file_size,
'fileModifyTime': str(file_mtime), 'fileIno': file_ino, 'sameIno': False})
# 循环 file_group_by_size
for file_size, file_list in file_group_by_size.items():
# 如果文件数量大于1,进行sha1计算
if len(file_list) <= 1:
# 没有大小一样的 不需要处理
self.debug(f'{file_list[0]["filePath"]} 大小相同的文件数量为1,无需计算sha1')
continue
file_list = copy.deepcopy(file_list)
# file_list中fileIno相同的文件,只保留一个
for file_info in file_list:
if file_info['sameIno']:
continue
file_ino = file_info['fileIno']
for info in file_list:
if file_ino == info['fileIno'] and file_info['filePath'] != info['filePath']:
info['sameIno'] = True
file_list = [file_info for file_info in file_list if not file_info['sameIno']]
# 如果文件数量大于1,进行sha1计算
if len(file_list) <= 1:
# 没有大小一样的 不需要处理
self.debug(f'{file_list[0]["filePath"]} 排除硬链接后大小相同的文件数量为1,无需计算sha1')
continue
for file_info in file_list:
file_path = file_info['filePath']
file_mtime = file_info['fileModifyTime']
file_size = file_info['fileSize']
sha1 = None
# 查找是否存在相同路径的文件
for info in last_result['file_info']:
if file_path == info['filePath']:
# 如果文件大小和修改时间都一致,则直接使用之前计算的 sha1 值
if file_size == info['fileSize'] and str(file_mtime) == info['fileModifyTime']:
self.info(
f'文件 {file_path} 的大小和修改时间与上次处理结果一致,直接使用上次处理结果')
sha1 = info['fileSha1']
break
if sha1 is None:
self.info(f'计算文件 {file_path} 的 SHA1 值')
sha1 = self.get_sha1(file_path, fast=fast)
file_info = {'filePath': file_path,
'fileSize': file_size,
'fileModifyTime': str(file_mtime),
'fileSha1': sha1}
last_result['file_info'].append(file_info)
if sha1 in duplicates:
duplicates[sha1].append(file_path)
else:
duplicates[sha1] = [file_path]
return duplicates
def process_duplicates(self, duplicates, dry_run=False):
"""
处理重复的文件,保留一个文件,其他的用硬链接替换
"""
for sha1, files in duplicates.items():
if len(files) > 1:
for file_path in files[1:]:
stat_first = os.stat(files[0])
stat_compare = os.stat(file_path)
if stat_first.st_dev == stat_compare.st_dev:
if stat_first.st_ino == stat_compare.st_ino:
self.info(f'文件 {files[0]} 和 {file_path} 是同一个文件,无需处理')
else:
if dry_run:
self.info(f'文件 {files[0]} 和 {file_path} 是重复文件,dry_run中,不做处理')
continue
# 使用try catch
try:
# 先备份原文件
os.rename(file_path, file_path + '.bak')
# 用硬链接替换原文件
os.link(files[0], file_path)
# 删除备份文件
os.remove(file_path + '.bak')
self.info(f'文件 {files[0]} 和 {file_path} 是重复文件,已用硬链接替换')
except Exception as err:
print(str(err))
# 如果硬链接失败,则将备份文件改回原文件名
os.rename(file_path + '.bak', file_path)
self.info(f'文件 {files[0]} 和 {file_path} 是重复文件,'
'硬链接替换失败,已恢复原文件')
else:
self.info(f'文件 {files[0]} 和 {file_path} 不在同一个磁盘,无法用硬链接替换')
continue
@staticmethod
def load_last_result(last_result_path):
"""
加载上次处理的结果
"""
if os.path.exists(last_result_path):
with open(last_result_path, 'r') as f:
return json.load(f)
else:
return {'file_info': [], 'inode_info': []}
@staticmethod
def save_last_result(last_result_path, last_result):
"""
保存处理结果到文件
"""
with open(last_result_path, 'w') as f:
json.dump(last_result, f)
| 15,051 | Python | .py | 316 | 24.009494 | 103 | 0.438784 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,091 | cloudflarespeedtest.py | demigody_nas-tools/app/plugins/modules/cloudflarespeedtest.py | import os
from datetime import datetime, timedelta
from pathlib import Path
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.plugins import EventManager, EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils, RequestUtils, IpUtils
from app.utils.types import EventType
from config import Config
class CloudflareSpeedTest(_IPluginModule):
# �件�称
module_name = "Cloudflare IP优选"
# �件�述
module_desc = "🌩 测试 Cloudflare CDN 延迟和速度,自动优选IP。"
# æ�’ä»¶å›¾æ ‡
module_icon = "cloudflare.jpg"
# 主题色
module_color = "#F6821F"
# �件版本
module_version = "1.0"
# �件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# �件�置项ID�缀
module_config_prefix = "cloudflarespeedtest_"
# åŠ è½½é¡ºåº�
module_order = 12
# �使用的用户级别
auth_level = 1
# �有�性
eventmanager = None
_customhosts = False
_cf_ip = None
_scheduler = None
_cron = None
_onlyonce = False
_ipv4 = False
_ipv6 = False
_version = None
_additional_args = None
_re_install = False
_notify = False
_check = False
_cf_path = None
_cf_ipv4 = None
_cf_ipv6 = None
_result_file = None
_release_prefix = 'https://github.com/XIU2/CloudflareSpeedTest/releases/download'
_binary_name = 'CloudflareST'
# 退出事件
_event = Event()
@staticmethod
def get_fields():
return [
# �一��
{
'type': 'div',
'content': [
# �一行
[
{
'title': '优选IP',
'required': "required",
'tooltip': '第一次使用,请先将 自定义Hostsæ�’ä»¶ 䏿‰€æœ‰ Cloudflare CDN IP 统一改为一个 IP。å��ç»ä¼šè‡ªåЍå�˜æ›´ã€‚需æ�é…�[自定义Hosts]æ�’件使用',
'type': 'text',
'content': [
{
'id': 'cf_ip',
'placeholder': '121.121.121.121',
}
]
},
{
'title': '优选周期',
'required': "required",
'tooltip': 'Cloudflare CDN优选周期,支�5�cron表达�',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': 'CloudflareSpeedTest版本',
'required': "",
'tooltip': '如当�版本�CloudflareSpeedTest最新版本�一致,�开��装��行��新版本',
'type': 'text',
'content': [
{
'id': 'version',
'placeholder': '暂未安装',
}
]
}
],
[
{
'title': 'IPv4',
'required': "",
'tooltip': '优选测速ipv4;v4和v6必须其一,都�选择则默认ipv4',
'type': 'switch',
'id': 'ipv4',
},
{
'title': 'IPv6',
'required': "",
'tooltip': '优选测速ipv6ï¼›v4å’Œv6必须其一,都ä¸�选择则默认ipv4。选择ipv6ä¼šå¤§å¤§åŠ é•¿æµ‹é€Ÿæ—¶é—´ã€‚',
'type': 'switch',
'id': 'ipv6',
},
{
'title': 'è‡ªåŠ¨æ ¡å‡†',
'required': "",
'tooltip': 'å¼€å�¯å��,会自动查询自定义hostsæ�’ä»¶ä¸å‡ºç�°æ¬¡æ•°æœ€å¤šçš„ip替æ�¢åˆ°ä¼˜é€‰IP。(如æ�œå‡ºç�°æ¬¡æ•°æœ€å¤šçš„ipä¸�æ¢ä¸€ä¸ªï¼Œåˆ™ä¸�å�šå…¼å®¹å¤„ç�†ï¼‰',
'type': 'switch',
'id': 'check',
},
],
[
{
'title': '立��行一次',
'required': "",
'tooltip': '打开å��ç«‹å�³è¿�行一次(点击æ¤å¯¹è¯�框的确定按钮å��å�³ä¼šè¿�行,周期未设置也会è¿�行),关é—å��将仅按照优选周期è¿�行(å�Œæ—¶ä¸Šæ¬¡è§¦å�‘è¿�行的任务如æ�œåœ¨è¿�行ä¸ä¹Ÿä¼šå�œæ¢ï¼‰',
'type': 'switch',
'id': 'onlyonce',
},
{
'title': '�装��行',
'required': "",
'tooltip': '开��,�次会�新下载CloudflareSpeedTest,网络�好�选',
'type': 'switch',
'id': 're_install',
},
{
'title': '�行时通知',
'required': "",
'tooltip': '�行任务�会��通知(需�打开�件消�通知)',
'type': 'switch',
'id': 'notify',
},
]
]
},
{
'type': 'details',
'summary': '高级�数',
'tooltip': 'CloudflareSpeedTest的高级�数,请勿��修改(请勿新�-f -o�数)',
'content': [
[
{
'required': "",
'type': 'text',
'content': [
{
'id': 'additional_args',
'placeholder': '-dd'
}
]
}
]
]
}
]
@staticmethod
def get_script():
"""
è¿”å›�æ�’ä»¶é¢�外的JS代ç �
"""
return """
$(document).ready(function () {
$('#cloudflarespeedtest_version').prop('disabled', true);
});
"""
def init_config(self, config=None):
self.eventmanager = EventManager()
# 读��置
if config:
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._cf_ip = config.get("cf_ip")
self._version = config.get("version")
self._ipv4 = config.get("ipv4")
self._ipv6 = config.get("ipv6")
self._re_install = config.get("re_install")
self._additional_args = config.get("additional_args")
self._notify = config.get("notify")
self._check = config.get("check")
# å�œæ¢ç�°æœ‰ä»»åŠ¡
self.stop_service()
# �动定时任务 & 立��行一次
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"Cloudflare CDN优选�务�动,周期:{self._cron}")
self._scheduler.add_job(self.__cloudflareSpeedTest, CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"Cloudflare CDN优选�务�动,立��行一次")
self._scheduler.add_job(self.__cloudflareSpeedTest, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# å…³é—一次性开关
self._onlyonce = False
self.__update_config()
if self._cron or self._onlyonce:
# �动�务
self._scheduler.print_jobs()
self._scheduler.start()
def __cloudflareSpeedTest(self):
"""
CloudflareSpeedTest优选
"""
self._cf_path = self.get_data_path()
self._cf_ipv4 = os.path.join(self._cf_path, "ip.txt")
self._cf_ipv6 = os.path.join(self._cf_path, "ipv6.txt")
self._result_file = os.path.join(self._cf_path, "result_hosts.txt")
# è�·å�–自定义Hostsæ�’ä»¶ï¼Œè‹¥æ— è®¾ç½®åˆ™å�œæ¢
customHosts = self.get_config("CustomHosts")
self._customhosts = customHosts and customHosts.get("enable")
if self._cf_ip and not customHosts or not customHosts.get("hosts"):
self.error(f"Cloudflare CDN优选�赖�自定义Hosts,请先维护hosts")
return
if not self._cf_ip:
self.error("CloudflareSpeedTeståŠ è½½æˆ�功,首次è¿�行,需è¦�é…�置优选ip")
return
# ipv4和ipv6必须其一
if not self._ipv4 and not self._ipv6:
self._ipv4 = True
self.__update_config()
self.warn(f"Cloudflare CDN优选未指定ip类�,默认ipv4")
err_flag, release_version = self.__check_envirment()
if err_flag and release_version:
# 更新版本
self._version = release_version
self.__update_config()
hosts = customHosts.get("hosts")
if isinstance(hosts, str):
hosts = str(hosts).split('\n')
# æ ¡æ£ä¼˜é€‰ip
if self._check:
self.__check_cf_if(hosts=hosts)
# 开始优选
if err_flag:
self.info("æ£åœ¨è¿›è¡ŒCLoudflare CDN优选,请è€�心ç‰å¾…")
# 执行优选命令,-dd�测速
cf_command = f'cd {self._cf_path} && ./{self._binary_name} {self._additional_args} -o {self._result_file}' + (
f' -f {self._cf_ipv4}' if self._ipv4 else '') + (f' -f {self._cf_ipv6}' if self._ipv6 else '')
self.info(f'æ£åœ¨æ‰§è¡Œä¼˜é€‰å‘½ä»¤ {cf_command}')
os.system(cf_command)
# ��优选�最优ip
best_ip = SystemUtils.execute("sed -n '2,1p' " + self._result_file + " | awk -F, '{print $1}'")
self.info(f"\n��到最优ip==>[{best_ip}]")
# 替�自定义Hosts�件数�库hosts
if IpUtils.is_ipv4(best_ip) or IpUtils.is_ipv6(best_ip):
if best_ip == self._cf_ip:
self.info(f"CloudflareSpeedTest CDN优选ip未�,��处�")
else:
# 替�优选ip
err_hosts = customHosts.get("err_hosts")
enable = customHosts.get("enable")
# 处�ip
new_hosts = []
for host in hosts:
if host and host != '\n':
host_arr = str(host).split()
if host_arr[0] == self._cf_ip:
new_hosts.append(host.replace(self._cf_ip, best_ip))
else:
new_hosts.append(host)
# 更新自定义Hosts
self.update_config({
"hosts": new_hosts,
"err_hosts": err_hosts,
"enable": enable
}, "CustomHosts")
# 更新优选ip
old_ip = self._cf_ip
self._cf_ip = best_ip
self.__update_config()
self.info(f"Cloudflare CDN优选ip [{best_ip}] 已替�自定义Hosts�件")
# 解�自定义hosts�件�载
self.info("通知CustomHosts�件�载 ...")
self.eventmanager.send_event(EventType.PluginReload,
{
"plugin_id": "CustomHosts"
})
if self._notify:
self.send_message(
title="�Cloudflare优选任务完�】",
text=f"�ip:{old_ip}\n"
f"新ip:{best_ip}"
)
else:
self.error("è�·å�–到最优ipæ ¼å¼�错误,请é‡�试")
self._onlyonce = False
self.__update_config()
self.stop_service()
def __check_cf_if(self, hosts):
"""
æ ¡æ£cf优选ip
防æ¢ç‰¹æ®Šæƒ…况下cf优选ip和自定义hostsæ�’ä»¶ä¸ipä¸�一致
"""
# 统计�个IP地�出�的次数
ip_count = {}
for host in hosts:
ip = host.split()[0]
if ip in ip_count:
ip_count[ip] += 1
else:
ip_count[ip] = 1
# 找出出�次数最多的IP地�
max_ips = [] # ä¿�å˜æœ€å¤šå‡ºç�°çš„IP地å�€
max_count = 0
for ip, count in ip_count.items():
if count > max_count:
max_ips = [ip] # 更新最多的IP地�
max_count = count
elif count == max_count:
max_ips.append(ip)
# 如æ�œå‡ºç�°æ¬¡æ•°æœ€å¤šçš„ipä¸�æ¢ä¸€ä¸ªï¼Œåˆ™ä¸�å�šå…¼å®¹å¤„ç�†
if len(max_ips) != 1:
return
if max_ips[0] != self._cf_ip:
self._cf_ip = max_ips[0]
self.info(f"è�·å�–到自定义hostsæ�’ä»¶ä¸ip {max_ips[0]} 出ç�°æ¬¡æ•°æœ€å¤šï¼Œå·²è‡ªåŠ¨æ ¡æ£ä¼˜é€‰ip")
def __check_envirment(self):
"""
�境检查
"""
# 是å�¦å®‰è£…æ ‡è¯†
install_flag = False
# 是��新安装
if self._re_install:
install_flag = True
os.system(f'rm -rf {self._cf_path}')
self.info(f'åˆ é™¤CloudflareSpeedTest目录 {self._cf_path},开始é‡�新安装')
# 判æ–目录是å�¦å˜åœ¨
cf_path = Path(self._cf_path)
if not cf_path.exists():
os.mkdir(self._cf_path)
# ��CloudflareSpeedTest最新版本
release_version = self.__get_release_version()
if not release_version:
# 如æ�œå�‡çº§å¤±è´¥ä½†æ˜¯æœ‰å�¯æ‰§è¡Œæ–‡ä»¶CloudflareST,则å�¯ç»§ç»è¿�行,å��之å�œæ¢
if Path(f'{self._cf_path}/{self._binary_name}').exists():
self.warn(f"è�·å�–CloudflareSpeedTest版本失败,å˜åœ¨å�¯æ‰§è¡Œç‰ˆæœ¬ï¼Œç»§ç»è¿�行")
return True, None
elif self._version:
self.error(f"��CloudflareSpeedTest版本失败,��上次�行版本{self._version},开始安装")
install_flag = True
else:
release_version = "v2.2.2"
self._version = release_version
self.error(f"��CloudflareSpeedTest版本失败,��默认版本{release_version},开始安装")
install_flag = True
# 有更新
if not install_flag and release_version != self._version:
self.info(f"检测到CloudflareSpeedTest有版本[{release_version}]更新,开始安装")
install_flag = True
# �装�数�库有版本数�,但是本地没有则�装
if not install_flag and release_version == self._version and not Path(
f'{self._cf_path}/{self._binary_name}').exists():
self.warn(f"未检测到CloudflareSpeedTest本地版本,�新安装")
install_flag = True
if not install_flag:
self.info(f"CloudflareSpeedTestæ— æ–°ç‰ˆæœ¬ï¼Œå˜åœ¨å�¯æ‰§è¡Œç‰ˆæœ¬ï¼Œç»§ç»è¿�行")
return True, None
# 检查�境�安装
if SystemUtils.is_windows():
# todo
self.error(f"CloudflareSpeedTest暂�支�windows平�")
return False, None
elif SystemUtils.is_macos():
# mac
uname = SystemUtils.execute('uname -m')
arch = 'amd64' if uname == 'x86_64' else 'arm64'
cf_file_name = f'CloudflareST_darwin_{arch}.zip'
download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}'
return self.__os_install(download_url, cf_file_name, release_version,
f"ditto -V -x -k --sequesterRsrc {self._cf_path}/{cf_file_name} {self._cf_path}")
else:
# docker
uname = SystemUtils.execute('uname -m')
arch = 'amd64' if uname == 'x86_64' else 'arm64'
cf_file_name = f'CloudflareST_linux_{arch}.tar.gz'
download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}'
return self.__os_install(download_url, cf_file_name, release_version,
f"tar -zxf {self._cf_path}/{cf_file_name} -C {self._cf_path}")
def __os_install(self, download_url, cf_file_name, release_version, unzip_command):
"""
macos docker安装cloudflare
"""
# 手动下载安装包å��ï¼Œæ— éœ€åœ¨æ¤ä¸‹è½½
if not Path(f'{self._cf_path}/{cf_file_name}').exists():
# 首次下载或下载新版�缩包
proxies = Config().get_proxies()
https_proxy = proxies.get("https") if proxies and proxies.get("https") else None
if https_proxy:
os.system(
f'wget -P {self._cf_path} --no-check-certificate -e use_proxy=yes -e https_proxy={https_proxy} {download_url}')
else:
os.system(f'wget -P {self._cf_path} https://ghproxy.com/{download_url}')
# åˆ¤æ–æ˜¯å�¦ä¸‹è½½å¥½å®‰è£…包
if Path(f'{self._cf_path}/{cf_file_name}').exists():
try:
# 解�
os.system(f'{unzip_command}')
# åˆ é™¤å�‹ç¼©åŒ…
os.system(f'rm -rf {self._cf_path}/{cf_file_name}')
if Path(f'{self._cf_path}/{self._binary_name}').exists():
self.info(f"CloudflareSpeedTest安装�功,当�版本:{release_version}")
return True, release_version
else:
self.error(f"CloudflareSpeedTest安装失败,请检查")
os.removedirs(self._cf_path)
return False, None
except Exception as err:
# 如æ�œå�‡çº§å¤±è´¥ä½†æ˜¯æœ‰å�¯æ‰§è¡Œæ–‡ä»¶CloudflareST,则å�¯ç»§ç»è¿�行,å��之å�œæ¢
if Path(f'{self._cf_path}/{self._binary_name}').exists():
self.error(f"CloudflareSpeedTest安装失败:{str(err)},继ç»ä½¿ç”¨ç�°ç‰ˆæœ¬è¿�行")
return True, None
else:
self.error(f"CloudflareSpeedTest安装失败:{str(err)}ï¼Œæ— å�¯ç”¨ç‰ˆæœ¬ï¼Œå�œæ¢è¿�行")
os.removedirs(self._cf_path)
return False, None
else:
# 如æ�œå�‡çº§å¤±è´¥ä½†æ˜¯æœ‰å�¯æ‰§è¡Œæ–‡ä»¶CloudflareST,则å�¯ç»§ç»è¿�行,å��之å�œæ¢
if Path(f'{self._cf_path}/{self._binary_name}').exists():
self.warn(f"CloudflareSpeedTest安装失败,å˜åœ¨å�¯æ‰§è¡Œç‰ˆæœ¬ï¼Œç»§ç»è¿�行")
return True, None
else:
self.error(f"CloudflareSpeedTestå®‰è£…å¤±è´¥ï¼Œæ— å�¯ç”¨ç‰ˆæœ¬ï¼Œå�œæ¢è¿�行")
os.removedirs(self._cf_path)
return False, None
@EventHandler.register(EventType.PluginReload)
def reload(self, event):
"""
触�cf优选
"""
plugin_id = event.event_data.get("plugin_id")
if not plugin_id:
return
if plugin_id != self.__class__.__name__:
return
self.__cloudflareSpeedTest()
def __update_config(self):
"""
更新优选�件�置
"""
self.update_config({
"onlyonce": False,
"cron": self._cron,
"cf_ip": self._cf_ip,
"version": self._version,
"ipv4": self._ipv4,
"ipv6": self._ipv6,
"re_install": self._re_install,
"additional_args": self._additional_args,
"notify": self._notify,
"check": self._check
})
@staticmethod
def __get_release_version():
"""
��CloudflareSpeedTest最新版本
"""
version_res = RequestUtils().get_res(
"https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest")
if not version_res:
version_res = RequestUtils(proxies=Config().get_proxies()).get_res(
"https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest")
if version_res:
ver_json = version_res.json()
version = f"{ver_json['tag_name']}"
return version
else:
return None
def get_state(self):
return self._cf_ip and True if self._cron else False
def stop_service(self):
"""
退出�件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e)) | 22,342 | Python | .py | 498 | 29.022088 | 264 | 0.44371 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,092 | doubanrank.py | demigody_nas-tools/app/plugins/modules/doubanrank.py | import re
import xml.dom.minidom
from datetime import datetime, timedelta
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from jinja2 import Template
from app.helper import RssHelper
from app.media import Media
from app.mediaserver import MediaServer
from app.plugins.modules._base import _IPluginModule
from app.subscribe import Subscribe
from app.utils import RequestUtils, DomUtils, StringUtils
from app.utils.types import MediaType, SearchType, RssType
from config import Config
from web.backend.web_utils import WebUtils
class DoubanRank(_IPluginModule):
# 插件名称
module_name = "豆瓣榜单订阅"
# 插件描述
module_desc = "监控豆瓣热门榜单,自动添加订阅。"
# 插件图标
module_icon = "movie.jpg"
# 主题色
module_color = "#01B3E3"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "doubanrank_"
# 加载顺序
module_order = 16
# 可使用的用户级别
auth_level = 2
# 退出事件
_event = Event()
# 私有属性
mediaserver = None
subscribe = None
rsshelper = None
media = None
_douban_address = {
'movie-ustop': 'https://rsshub.app/douban/movie/ustop',
'movie-weekly': 'https://rsshub.app/douban/movie/weekly',
'movie-real-time': 'https://rsshub.app/douban/movie/weekly/subject_real_time_hotest',
'show-domestic': 'https://rsshub.app/douban/movie/weekly/show_domestic',
'movie-hot-gaia': 'https://rsshub.app/douban/movie/weekly/movie_hot_gaia',
'tv-hot': 'https://rsshub.app/douban/movie/weekly/tv_hot',
'movie-top250': 'https://rsshub.app/douban/movie/weekly/movie_top250',
}
_enable = False
_onlyonce = False
_cron = ""
_rss_addrs = []
_ranks = []
_vote = 0
_release_date = 0
_scheduler = None
def init_config(self, config: dict = None):
self.mediaserver = MediaServer()
self.subscribe = Subscribe()
self.rsshelper = RssHelper()
self.media = Media()
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._release_date = int(config.get("release_date")) if config.get("release_date") else 0
self._vote = float(config.get("vote")) if config.get("vote") else 0
rss_addrs = config.get("rss_addrs")
if rss_addrs:
if isinstance(rss_addrs, str):
self._rss_addrs = rss_addrs.split('\n')
else:
self._rss_addrs = rss_addrs
else:
self._rss_addrs = []
self._ranks = config.get("ranks") or []
# 停止现有任务
self.stop_service()
# 启动服务
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"订阅服务启动,周期:{self._cron}")
self._scheduler.add_job(self.__refresh_rss,
CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"订阅服务启动,立即运行一次")
self._scheduler.add_job(self.__refresh_rss, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"enable": self._enable,
"cron": self._cron,
"ranks": self._ranks,
"release_date": self._release_date,
"vote": self._vote,
"rss_addrs": "\n".join(self._rss_addrs)
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return self._enable and self._cron and (self._ranks or self._rss_addrs)
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启豆瓣榜单订阅',
'required': "",
'tooltip': '开启后,自动监控豆瓣榜单变化,有新内容时如媒体服务器不存在且未订阅过,则会添加订阅,仅支持rsshub的豆瓣RSS',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照刮削周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
}
],
[
{
'title': '刷新周期',
'required': "required",
'tooltip': '榜单数据刷新的时间周期,支持5位cron表达式;应根据榜单更新的周期合理设置刷新时间,避免刷新过于频繁',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '上映日期',
'required': "",
'tooltip': '大于或者等于该评分的才会被订阅(以TMDB上映日期为准),不填则不限制',
'type': 'text',
'content': [
{
'id': 'release_date',
'placeholder': '0',
}
]
},
{
'title': '评分',
'required': "",
'tooltip': '大于该评分的才会被订阅(以TMDB评分为准),不填则不限制',
'type': 'text',
'content': [
{
'id': 'vote',
'placeholder': '0',
}
]
}
],
[
{
'title': 'RssHub订阅地址',
'required': '',
'tooltip': '每一行一个RSS地址,访问 https://docs.rsshub.app/social-media.html#dou-ban 查询可用地址',
'type': 'textarea',
'content':
{
'id': 'rss_addrs',
'placeholder': 'https://rsshub.app/douban/movie/classification/:sort?/:score?/:tags?',
'rows': 5
}
}
]
]
},
{
'type': 'details',
'summary': '热门榜单',
'tooltip': '内建支持的豆瓣榜单,使用https://rsshub.app数据源,可直接选择订阅',
'content': [
# 同一行
[
{
'id': 'ranks',
'type': 'form-selectgroup',
'content': {
'movie-ustop': {
'id': 'movie-ustop',
'name': '北美电影票房榜',
},
'movie-weekly': {
'id': 'movie-weekly',
'name': '一周电影口碑榜',
},
'movie-real-time': {
'id': 'movie-real-time',
'name': '实时热门榜',
},
'movie-hot-gaia': {
'id': 'movie-hot-gaia',
'name': '热门电影',
},
'movie-top250': {
'id': 'movie-top250',
'name': '电影TOP10',
},
'tv-hot': {
'id': 'tv-hot',
'name': '热门剧集',
},
'show-domestic': {
'id': 'show-domestic',
'name': '热门综艺',
}
}
},
]
]
}
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
results = self.get_history()
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th></th>
<th>标题</th>
<th>类型</th>
<th>状态</th>
<th>添加时间</th>
<th></th>
</tr>
</thead>
<tbody>
{% if HistoryCount > 0 %}
{% for Item in DoubanRankHistory %}
<tr id="movie_rank_history_{{ Item.id }}">
<td class="w-5">
<img class="rounded w-5" src="{{ Item.image }}"
onerror="this.src='../static/img/no-image.png'" alt=""
style="min-width: 50px"/>
</td>
<td>
<div>{{ Item.name }} ({{ Item.year }})</div>
{% if Item.rating %}
<div class="text-muted text-nowrap">
评份:{{ Item.rating }}
</div>
{% endif %}
</td>
<td>
{{ Item.type }}
</td>
<td>
{% if Item.state == 'DOWNLOADED' %}
<span class="badge bg-green">已下载</span>
{% elif Item.state == 'RSS' %}
<span class="badge bg-blue">已订阅</span>
{% elif Item.state == 'NEW' %}
<span class="badge bg-blue">新增</span>
{% else %}
<span class="badge bg-orange">处理中</span>
{% endif %}
</td>
<td>
<small>{{ Item.add_time or '' }}</small>
</td>
<td>
<div class="dropdown">
<a href="#" class="btn-action" data-bs-toggle="dropdown"
aria-expanded="false">
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-dots-vertical {{ class }}"
width="24" height="24" viewBox="0 0 24 24"
stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<circle cx="12" cy="12" r="1"></circle>
<circle cx="12" cy="19" r="1"></circle>
<circle cx="12" cy="5" r="1"></circle>
</svg>
</a>
<div class="dropdown-menu dropdown-menu-end">
<a class="dropdown-item text-danger"
href='javascript:DoubanRank_delete_history("{{ Item.id }}")'>
删除
</a>
</div>
</div>
</td>
</tr>
{% endfor %}
{% else %}
<tr>
<td colspan="6" align="center">没有数据</td>
</tr>
{% endif %}
</tbody>
</table>
</div>
"""
return "订阅历史", Template(template).render(HistoryCount=len(results),
DoubanRankHistory=results), None
@staticmethod
def get_script():
"""
删除随机电影历史记录的JS脚本
"""
return """
// 删除随机电影历史记录
function DoubanRank_delete_history(id){
ajax_post("run_plugin_method", {"plugin_id": 'DoubanRank', 'method': 'delete_rank_history', 'tmdb_id': id}, function (ret) {
$("#movie_rank_history_" + id).remove();
});
}
"""
def delete_rank_history(self, tmdb_id):
"""
删除同步历史
"""
return self.delete_history(key=tmdb_id)
def __update_history(self, media, state):
"""
插入历史记录
"""
if not media:
return
value = {
"id": media.tmdb_id,
"name": media.title,
"year": media.year,
"type": media.type.value,
"rating": media.vote_average or 0,
"image": media.get_poster_image(),
"state": state,
"add_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
if self.get_history(key=media.tmdb_id):
self.update_history(key=media.tmdb_id, value=value)
else:
self.history(key=media.tmdb_id, value=value)
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
def __refresh_rss(self):
"""
刷新RSS
"""
self.info(f"开始刷新RSS ...")
addr_list = self._rss_addrs + [self._douban_address.get(rank) for rank in self._ranks]
if not addr_list:
self.info(f"未设置RSS地址")
return
else:
self.info(f"共 {len(addr_list)} 个RSS地址需要刷新")
for addr in addr_list:
if not addr:
continue
try:
self.info(f"获取RSS:{addr} ...")
rss_infos = self.__get_rss_info(addr)
if not rss_infos:
self.error(f"RSS地址:{addr} ,未查询到数据")
continue
else:
self.info(f"RSS地址:{addr} ,共 {len(rss_infos)} 条数据")
for rss_info in rss_infos:
if self._event.is_set():
self.info(f"订阅服务停止")
return
title = rss_info.get('title')
douban_id = rss_info.get('doubanid')
mtype = rss_info.get('type')
unique_flag = f"doubanrank: {title} (DB:{douban_id})"
# 检查是否已处理过
if self.rsshelper.is_rssd_by_simple(torrent_name=unique_flag, enclosure=None):
self.info(f"已处理过:{title} (豆瓣id:{douban_id})")
continue
# 识别媒体信息
if douban_id:
media_info = WebUtils.get_mediainfo_from_id(mtype=mtype,
mediaid=f"DB:{douban_id}",
wait=True)
else:
media_info = self.media.get_media_info(title=title, mtype=mtype)
if not media_info:
self.warn(f"未查询到媒体信息:{title} (豆瓣id:{douban_id})")
continue
media_release_date = int(media_info.year) if StringUtils.is_int_or_float(media_info.year) else None
media_vote_average = float(media_info.vote_average) if StringUtils.is_int_or_float(media_info.year) else None
if self._release_date and media_release_date and media_release_date < self._release_date:
self.info(
f"{media_info.get_title_string()} 上映日期 {media_release_date} 低于限制 {self._release_date},跳过 ...")
continue
if self._vote and media_vote_average and media_vote_average < self._vote:
self.info(
f"{media_info.get_title_string()} 评分 {media_vote_average} 低于限制 {self._vote},跳过 ...")
continue
# 检查媒体服务器是否存在
item_id = self.mediaserver.check_item_exists(mtype=media_info.type,
title=media_info.title,
year=media_info.year,
tmdbid=media_info.tmdb_id,
season=media_info.get_season_seq())
if item_id:
self.info(f"媒体服务器已存在:{media_info.get_title_string()}")
self.__update_history(media=media_info, state="DOWNLOADED")
continue
# 检查是否已订阅过
if self.subscribe.check_history(
type_str="MOV" if media_info.type == MediaType.MOVIE else "TV",
name=media_info.title,
year=media_info.year,
season=media_info.get_season_string()):
self.info(
f"{media_info.get_title_string()}{media_info.get_season_string()} 已订阅过")
self.__update_history(media=media_info, state="RSS")
continue
# 添加处理历史
self.rsshelper.simple_insert_rss_torrents(title=unique_flag, enclosure=None)
# 添加订阅
code, msg, rss_media = self.subscribe.add_rss_subscribe(
mtype=media_info.type,
name=media_info.title,
year=media_info.year,
season=media_info.begin_season,
channel=RssType.Auto,
in_from=SearchType.PLUGIN
)
if not rss_media or code != 0:
self.warn("%s 添加订阅失败:%s" % (media_info.get_title_string(), msg))
# 订阅已存在
if code == 9:
self.__update_history(media=media_info, state="RSS")
else:
self.info("%s 添加订阅成功" % media_info.get_title_string())
self.__update_history(media=media_info, state="RSS")
except Exception as e:
self.error(str(e))
self.info(f"所有RSS刷新完成")
def __get_rss_info(self, addr):
"""
获取RSS
"""
try:
ret = RequestUtils().get_res(addr)
if not ret:
return []
ret.encoding = ret.apparent_encoding
ret_xml = ret.text
ret_array = []
# 解析XML
dom_tree = xml.dom.minidom.parseString(ret_xml)
rootNode = dom_tree.documentElement
items = rootNode.getElementsByTagName("item")
for item in items:
try:
# 标题
title = DomUtils.tag_value(item, "title", default="")
# 链接
link = DomUtils.tag_value(item, "link", default="")
if not title and not link:
self.warn(f"条目标题和链接均为空,无法处理")
continue
doubanid = re.findall(r"/(\d+)/", link)
if doubanid:
doubanid = doubanid[0]
if doubanid and not str(doubanid).isdigit():
self.warn(f"解析的豆瓣ID格式不正确:{doubanid}")
continue
# 返回对象
ret_array.append({
'title': title,
'link': link,
'doubanid': doubanid
})
except Exception as e1:
self.error("解析RSS条目失败:" + str(e1))
continue
return ret_array
except Exception as e:
self.error("获取RSS失败:" + str(e))
return []
| 23,499 | Python | .py | 516 | 22.672481 | 136 | 0.39392 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,093 | movierandom.py | demigody_nas-tools/app/plugins/modules/movierandom.py | import random
from datetime import datetime, timedelta
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from jinja2 import Template
import log
from app.conf import ModuleConf
from app.helper import RssHelper
from app.media import Media
from app.mediaserver import MediaServer
from app.plugins.modules._base import _IPluginModule
from app.subscribe import Subscribe
from app.utils.types import SearchType, RssType, MediaType
from config import Config
class MovieRandom(_IPluginModule):
# 插件名称
module_name = "电影随机订阅"
# 插件描述
module_desc = "随机获取一部未入库的电影,自动添加订阅。"
# 插件图标
module_icon = "random.png"
# 主题色
module_color = "#0000FF"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
module_config_prefix = "movierandom_"
# 加载顺序
module_order = 18
# 可使用的用户级别
auth_level = 2
# 退出事件
_event = Event()
# 私有属性
mediaserver = None
rsshelper = None
subscribe = None
_scheduler = None
_enable = False
_onlyonce = False
_cron = None
_language = None
_genres = None
_vote = None
_date = None
@staticmethod
def get_fields():
language_options = ModuleConf.DISCOVER_FILTER_CONF.get("tmdb_movie").get("with_original_language").get(
"options")
genres_options = ModuleConf.DISCOVER_FILTER_CONF.get("tmdb_movie").get("with_genres").get("options")
# tmdb电影类型
genres = {m.get('name'): m.get('name') for m in genres_options}
# tmdb电影语言
language = {m.get('name'): m.get('name') for m in language_options}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启电影随机订阅',
'required': "",
'tooltip': '开启后,定时随机订阅一部电影。',
'type': 'switch',
'id': 'enable',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照随机周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
},
],
[
{
'title': '随机周期',
'required': "required",
'tooltip': '电影随机订阅的时间周期,支持5位cron表达式。',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '上映时间',
'required': "",
'tooltip': '电影上映时间,大于该时间的会被订阅',
'type': 'text',
'content': [
{
'id': 'date',
'placeholder': '2022',
}
]
},
{
'title': '电影评分',
'required': "",
'tooltip': '最低评分,大于等于该评分的会被订阅(最大10)',
'type': 'text',
'content': [
{
'id': 'vote',
'placeholder': '8',
}
]
},
],
[
{
'title': '电影类型',
'required': "",
'type': 'select',
'content': [
{
'id': 'genres',
'options': genres,
'default': '全部'
},
]
},
{
'title': '电影语言',
'required': "",
'type': 'select',
'content': [
{
'id': 'language',
'options': language,
'default': '全部'
},
]
},
]
]
}
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
results = self.get_history()
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th></th>
<th>标题</th>
<th>类型</th>
<th>状态</th>
<th>添加时间</th>
<th></th>
</tr>
</thead>
<tbody>
{% if HistoryCount > 0 %}
{% for Item in MovieRandomHistory %}
<tr id="movie_random_history_{{ Item.id }}">
<td class="w-5">
<img class="rounded w-5" src="{{ Item.image }}"
onerror="this.src='../static/img/no-image.png'" alt=""
style="min-width: 50px"/>
</td>
<td>
<div>{{ Item.name }} ({{ Item.year }})</div>
{% if Item.rating %}
<div class="text-muted text-nowrap">
评份:{{ Item.rating }}
</div>
{% endif %}
</td>
<td>
{{ Item.type }}
</td>
<td>
{% if Item.state == 'DOWNLOADED' %}
<span class="badge bg-green">已下载</span>
{% elif Item.state == 'RSS' %}
<span class="badge bg-blue">已订阅</span>
{% elif Item.state == 'NEW' %}
<span class="badge bg-blue">新增</span>
{% else %}
<span class="badge bg-orange">处理中</span>
{% endif %}
</td>
<td>
<small>{{ Item.add_time or '' }}</small>
</td>
<td>
<div class="dropdown">
<a href="#" class="btn-action" data-bs-toggle="dropdown"
aria-expanded="false">
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-dots-vertical {{ class }}"
width="24" height="24" viewBox="0 0 24 24"
stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<circle cx="12" cy="12" r="1"></circle>
<circle cx="12" cy="19" r="1"></circle>
<circle cx="12" cy="5" r="1"></circle>
</svg>
</a>
<div class="dropdown-menu dropdown-menu-end">
<a class="dropdown-item text-danger"
href='javascript:MovieRandom_delete_history("{{ Item.id }}")'>
删除
</a>
</div>
</div>
</td>
</tr>
{% endfor %}
{% else %}
<tr>
<td colspan="6" align="center">没有数据</td>
</tr>
{% endif %}
</tbody>
</table>
</div>
"""
return "随机历史", Template(template).render(HistoryCount=len(results),
MovieRandomHistory=results), None
@staticmethod
def get_script():
"""
删除随机电影历史记录的JS脚本
"""
return """
// 删除随机电影历史记录
function MovieRandom_delete_history(id){
ajax_post("run_plugin_method", {"plugin_id": 'MovieRandom', 'method': 'delete_random_history', 'tmdb_id': id}, function (ret) {
$("#movie_random_history_" + id).remove();
});
}
"""
def init_config(self, config: dict = None):
self.mediaserver = MediaServer()
self.subscribe = Subscribe()
self.rsshelper = RssHelper()
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._language = config.get("language")
self._genres = config.get("genres")
self._vote = config.get("vote")
self._date = config.get("date")
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"电影随机服务启动,周期:{self._cron}")
self._scheduler.add_job(self.__random,
CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"电影随机服务启动,立即运行一次")
self._scheduler.add_job(self.__random, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enable": self._enable,
"onlyonce": self._onlyonce,
"cron": self._cron,
"language": self._language,
"genres": self._genres,
"vote": self._vote,
"date": self._date,
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def __random(self):
"""
随机获取一部tmdb电影下载
"""
params = {}
if self._date:
params['primary_release_date.gte'] = f"{self._date}-01-01"
if self._vote:
params['vote_average.gte'] = self._vote
if self._language:
language_options = ModuleConf.DISCOVER_FILTER_CONF.get("tmdb_movie").get("with_original_language").get(
"options")
for m in language_options:
if m.get('name') == self._language:
params['with_original_language'] = m.get('value')
break
if self._genres:
genres_options = ModuleConf.DISCOVER_FILTER_CONF.get("tmdb_movie").get("with_genres").get("options")
for m in genres_options:
if m.get('name') == self._genres:
params['with_genres'] = m.get('value')
break
# 查询选择条件下所有页数
random_max_page = Media().get_tmdb_discover_movies_pages(params=params)
if random_max_page == 0:
log.error("当前所选条件下未获取到电影数据,停止随机订阅")
return
log.info(f"当前所选条件下获取到电影数据 {random_max_page} 页,开始随机订阅")
# ['page must be less than or equal to 500']
if random_max_page > 500:
random_max_page = 500
movie_list = []
retry_time = 0
try_page = []
while not movie_list and retry_time < 5:
page = random.randint(0, random_max_page - 1)
# 已经试过的页数重新random
while page in try_page:
page = random.randint(0, random_max_page - 1)
# 根据请求参数随机获取一页电影
movie_list = self.__get_discover(page=page,
params=params)
self.info(
f"正在尝试第 {retry_time + 1} 次获取,获取到随机页数 {page} 电影数据 {len(movie_list)} 条,最多尝试5次")
retry_time = retry_time + 1
try_page.append(page)
if not movie_list:
self.error("已达最大尝试次数,当前条件下未随机到电影")
return
# 随机出媒体库不存在的视频
media_info = self.__random_check(movie_list)
if not media_info:
self.warn("本次未随机出满足条件的电影")
return
title = media_info.get('title')
year = media_info.get('year')
tmdb_id = media_info.get('id')
unique_flag = f"movierandom: {title} (DB:{tmdb_id})"
log.info(
f"电影 {title}-{year}(tmdbid:{tmdb_id})未入库,开始订阅")
# 检查是否已订阅过
if self.subscribe.check_history(
type_str="MOV",
name=title,
year=year,
season=None):
self.info(
f"{title} 已订阅过")
self.__update_history(media=media_info, state="RSS")
return
# 添加处理历史
self.rsshelper.simple_insert_rss_torrents(title=unique_flag, enclosure=None)
# 添加订阅
code, msg, rss_media = self.subscribe.add_rss_subscribe(
mtype=MediaType.MOVIE,
name=title,
year=year,
season=None,
channel=RssType.Auto,
in_from=SearchType.PLUGIN
)
if not rss_media or code != 0:
self.warn("%s 添加订阅失败:%s" % (title, msg))
# 订阅已存在
if code == 9:
self.__update_history(media=media_info, state="RSS")
else:
self.info("%s 添加订阅成功" % title)
# 插入为已RSS状态
self.__update_history(media=media_info, state="RSS")
def __random_check(self, movie_list):
"""
随机一个电影
检查媒体服务器是否存在
"""
# 随机一个电影
media_info = random.choice(movie_list)
title = media_info.get('title')
year = media_info.get('year')
tmdb_id = media_info.get('id')
unique_flag = f"movierandom: {title} (DB:{tmdb_id})"
# 检查是否已处理过
if self.rsshelper.is_rssd_by_simple(torrent_name=unique_flag, enclosure=None):
self.info(f"已处理过:{title} (tmdbid:{tmdb_id})")
return
log.info(f"随机出电影 {title} {year} tmdbid:{tmdb_id}")
# 删除该电影,防止再次random到
movie_list.remove(media_info)
# 检查媒体服务器是否存在
item_id = self.mediaserver.check_item_exists(mtype=MediaType.MOVIE,
title=title,
year=year,
tmdbid=tmdb_id)
if item_id:
self.info(f"媒体服务器已存在:{title}")
self.__update_history(media=media_info, state="DOWNLOADED")
if len(movie_list) == 0:
return None
self.__random_check(movie_list)
return media_info
def delete_random_history(self, tmdb_id):
"""
删除同步历史
"""
return self.delete_history(key=tmdb_id)
def __update_history(self, media, state):
"""
插入历史记录
"""
value = {
"id": media.get('tmdbid'),
"name": media.get('title'),
"year": media.get('year'),
"type": media.get('media_type'),
"rating": media.get('vote')[0] if media.get('vote') else None,
"image": media.get('image'),
"state": state,
"add_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
if self.get_history(key=media.get('tmdbid')):
self.update_history(key=media.get('tmdbid'), value=value)
else:
self.history(key=media.get('tmdbid'), value=value)
@staticmethod
def __get_discover(page, params):
return Media().get_tmdb_discover(mtype=MediaType.MOVIE,
page=page,
params=params)
def get_state(self):
return self._enable \
and self._cron
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
| 19,240 | Python | .py | 455 | 22.037363 | 139 | 0.416585 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,094 | torrentremover.py | demigody_nas-tools/app/plugins/modules/torrentremover.py | import os
from app.downloader import Downloader
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils.types import EventType
from config import Config
class TorrentRemover(_IPluginModule):
# 插件名称
module_name = "下载任务联动删除"
# 插件描述
module_desc = "历史记录中源文件被删除时,同步删除下载器中的下载任务。"
# 插件图标
module_icon = "torrentremover.png"
# 主题色
module_color = "#F44336"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "torrentremover_"
# 加载顺序
module_order = 9
# 可使用的用户级别
auth_level = 2
# 私有属性
downloader = None
_enable = False
def __init__(self):
self._ua = Config().get_ua()
def init_config(self, config: dict = None):
self.downloader = Downloader()
if config:
self._enable = config.get("enable")
def get_state(self):
return self._enable
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '下载任务联动删除',
'required': "",
'tooltip': '在历史记录中选择删除源文件时联动删除下载器中的下载任务;只有NAStool添加的且被正确识别了的任务以及转种、辅种插件处理的任务才会被联动删除,其他工具辅种任务、非默认使用的下载器的任务等可通过建立自动删种任务等方式处理',
'type': 'switch',
'id': 'enable',
}
]
]
}
]
def stop_service(self):
pass
@EventHandler.register(EventType.SourceFileDeleted)
def deletetorrent(self, event):
"""
联动删除下载器中的下载任务
"""
if not self._enable:
return
event_info = event.event_data
if not event_info:
return
# 删除下载记录
source_path = event_info.get("path")
source_filename = event_info.get("filename")
media_title = event_info.get("media_info", {}).get("title")
source_file = os.path.join(source_path, source_filename)
# 同一标题的所有下载任务
downloadinfos = Downloader().get_download_history_by_title(title=media_title)
for info in downloadinfos:
if not info.DOWNLOADER or not info.DOWNLOAD_ID:
continue
# 删除下载任务
self.__del_torrent(source_file=source_file,
from_download=info.DOWNLOADER,
from_download_id=info.DOWNLOAD_ID)
def __del_torrent(self, source_file, from_download, from_download_id):
"""
删除下载任务
"""
download = from_download
download_id = from_download_id
# 查询是否有转种记录
history_key = "%s-%s" % (download, download_id)
plugin_id = "TorrentTransfer"
transfer_history = self.get_history(key=history_key,
plugin_id=plugin_id)
self.info(f"查询到 {history_key} 转种历史 {transfer_history}")
del_history = False
# 如果有转种记录,则删除转种后的下载任务
if transfer_history and isinstance(transfer_history, dict):
download = transfer_history['to_download']
download_id = transfer_history['to_download_id']
delete_source = transfer_history['delete_source']
del_history = True
# 转种后未删除源种时,同步删除源种
if not delete_source:
self.info(f"{history_key} 转种时未删除源下载任务,开始删除源下载任务…")
# 删除标志
delete_flag = False
try:
dl_files = self.downloader.get_files(tid=from_download_id,
downloader_id=from_download)
if not dl_files:
return
for dl_file in dl_files:
dl_file_name = dl_file.get("name")
if os.path.normpath(source_file).endswith(os.path.normpath(dl_file_name)):
delete_flag = True
break
if delete_flag:
self.info(f"删除下载任务:{from_download} - {from_download_id}")
self.downloader.delete_torrents(downloader_id=from_download,
ids=from_download_id)
except Exception as e:
self.error(f"删除源下载任务 {history_key} 失败: {str(e)}")
# 删除标志
delete_flag = False
self.info(f"开始删除下载任务 {download} {download_id}")
try:
dl_files = self.downloader.get_files(tid=download_id,
downloader_id=download)
if not dl_files:
return
for dl_file in dl_files:
dl_file_name = dl_file.get("name")
if os.path.normpath(source_file).endswith(os.path.normpath(dl_file_name)):
delete_flag = True
break
if delete_flag:
# 删除源下载任务或转种后下载任务
self.info(f"删除下载任务:{download} - {download_id}")
self.downloader.delete_torrents(downloader_id=download,
ids=download_id)
# 删除转种记录
if del_history:
self.delete_history(key=history_key, plugin_id=plugin_id)
# 处理辅种
self.__del_seed(download=download, download_id=download_id)
except Exception as e:
self.error(f"删除转种辅种下载任务失败: {str(e)}")
def __del_seed(self, download, download_id):
"""
删除辅种
"""
# 查询是否有辅种记录
history_key = download_id
plugin_id = "IYUUAutoSeed"
seed_history = self.get_history(key=history_key,
plugin_id=plugin_id) or []
self.info(f"查询到 {history_key} 辅种历史 {seed_history}")
# 有辅种记录则处理辅种
if seed_history and isinstance(seed_history, list):
for history in seed_history:
downloader = history['downloader']
torrents = history['torrents']
if not downloader or not torrents:
return
if not isinstance(torrents, list):
torrents = [torrents]
# 删除辅种历史中与本下载器相同的辅种记录
if int(downloader) == download:
for torrent in torrents:
# 删除辅种
self.info(f"删除辅种:{downloader} - {torrent}")
self.downloader.delete_torrents(downloader_id=downloader,
ids=torrent)
# 删除本下载器辅种历史
del history
break
# 更新辅种历史
if len(seed_history) > 0:
self.update_history(key=history_key,
value=seed_history,
plugin_id=plugin_id)
else:
self.delete_history(key=history_key,
plugin_id=plugin_id)
| 8,254 | Python | .py | 186 | 24.569892 | 152 | 0.501969 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,095 | movielike.py | demigody_nas-tools/app/plugins/modules/movielike.py | import os
from app.filetransfer import FileTransfer
from app.media import Category
from app.mediaserver import MediaServer
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
from app.utils.types import EventType, MediaServerType, MediaType
from config import RMT_FAVTYPE, Config
class MovieLike(_IPluginModule):
# 插件名称
module_name = "电影精选"
# 插件描述
module_desc = "媒体服务器中用户将电影设为最爱时,自动转移到精选文件夹。"
# 插件图标
module_icon = "like.jpg"
# 主题色
module_color = "#E4003F"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "movielike_"
# 加载顺序
module_order = 10
# 可使用的用户级别
auth_level = 2
# 私有属性
_enable = False
_dir_name = RMT_FAVTYPE
_remote_path = None
_local_path = None
_remote_path2 = None
_local_path2 = None
_remote_path3 = None
_local_path3 = None
mediaserver = None
filetransfer = None
category = None
def init_config(self, config: dict = None):
self.mediaserver = MediaServer()
self.filetransfer = FileTransfer()
self.category = Category()
if config:
self._enable = config.get("enable")
self._dir_name = config.get("dir_name")
if self._dir_name:
Config().update_favtype(self._dir_name)
self._local_path = config.get("local_path")
self._remote_path = config.get("remote_path")
self._local_path2 = config.get("local_path2")
self._remote_path2 = config.get("remote_path2")
self._local_path3 = config.get("local_path3")
self._remote_path3 = config.get("remote_path3")
def get_state(self):
return self._enable
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启电影精选',
'required': "",
'tooltip': '目前仅支持Emby,NAStool挂载目录如与Emby媒体库目录不一致则需要配置路径映射。在Emby的Webhooks中勾选 用户->添加到最爱 事件,如需控制仅部分用户生效,可在媒体服务器单独建立Webhook并设置对应用户范围',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '分类目录名称',
'required': True,
'tooltip': '添加到喜爱的电影将移动到该目录下',
'type': 'text',
'content': [
{
'default': RMT_FAVTYPE,
'placeholder': RMT_FAVTYPE,
'id': 'dir_name',
}
]
}
],
]
},
{
'type': 'details',
'summary': '路径映射',
'tooltip': '当NAStool与媒体服务器的媒体库路程不一致时,需要映射转换,最多可设置三组,留空时不启用',
'content': [
# 同一行
[
{
'title': '路径1',
'type': 'text',
'content': [
{
'id': 'local_path',
'placeholder': '本地路径'
},
{
'id': 'remote_path',
'placeholder': '远程路径'
}
]
},
{
'title': '路径2',
'type': 'text',
'content': [
{
'id': 'local_path2',
'placeholder': '本地路径'
},
{
'id': 'remote_path2',
'placeholder': '远程路径'
}
]
},
{
'title': '路径3',
'type': 'text',
'content': [
{
'id': 'local_path3',
'placeholder': '本地路径'
},
{
'id': 'remote_path3',
'placeholder': '远程路径'
}
]
}
],
]
}
]
def stop_service(self):
pass
@EventHandler.register(EventType.EmbyWebhook)
def favtransfer(self, event):
"""
监听Emby的Webhook事件
"""
if not self._enable or not self._dir_name:
return
# 不是当前正在使用的媒体服务器时不处理
if self.mediaserver.get_type() != MediaServerType.EMBY:
return
event_info = event.event_data
# 用户事件
action_type = event_info.get('Event')
# 不是like事件不处理
if action_type != 'item.rate':
return
# 不是电影不处理
if event_info.get('Item', {}).get('Type') != 'Movie':
return
# 路径不存在不处理
item_path = event_info.get('Item', {}).get('Path')
if not item_path:
return
# 路径替换
if self._local_path and self._remote_path and item_path.startswith(self._remote_path):
item_path = item_path.replace(self._remote_path, self._local_path).replace('\\', '/')
if self._local_path2 and self._remote_path2 and item_path.startswith(self._remote_path2):
item_path = item_path.replace(self._remote_path2, self._local_path2).replace('\\', '/')
if self._local_path3 and self._remote_path3 and item_path.startswith(self._remote_path3):
item_path = item_path.replace(self._remote_path3, self._local_path3).replace('\\', '/')
# 路径不存在不处理
if not os.path.exists(item_path):
self.warn(f"{item_path} 文件不存在")
return
# 文件转为目录
if os.path.isdir(item_path):
movie_dir = item_path
else:
movie_dir = os.path.dirname(item_path)
# 电影二级分类名
movie_type = os.path.basename(os.path.dirname(movie_dir))
if movie_type == self._dir_name:
return
if movie_type not in self.category.movie_categorys:
return
# 电影名
movie_name = os.path.basename(movie_dir)
# 最优媒体库路径
movie_path = self.filetransfer.get_best_target_path(mtype=MediaType.MOVIE, in_path=movie_dir)
# 原路径
org_path = os.path.join(movie_path, movie_type, movie_name)
# 精选路径
new_path = os.path.join(movie_path, self._dir_name, movie_name)
# 开始转移文件
if os.path.exists(org_path):
self.info(f"开始转移文件 {org_path} 到 {new_path} ...")
if os.path.exists(new_path):
self.info(f"目录 {new_path} 已存在")
return
ret, retmsg = SystemUtils.move(org_path, new_path)
if ret != 0:
self.error(f"{retmsg}")
else:
# 发送刷新媒体库事件
EventHandler.send_event(EventType.RefreshMediaServer, {
"dest": new_path,
"media_info": {}
})
else:
self.warn(f"{org_path} 目录不存在")
| 8,803 | Python | .py | 218 | 20.724771 | 157 | 0.427359 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,096 | iyuuautoseed.py | demigody_nas-tools/app/plugins/modules/iyuuautoseed.py | import re
from copy import deepcopy
from datetime import datetime, timedelta
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from jinja2 import Template
from lxml import etree
from app.downloader import Downloader
from app.media.meta import MetaInfo
from app.plugins.modules._base import _IPluginModule
from app.plugins.modules.iyuu.iyuu_helper import IyuuHelper
from app.sites import Sites
from app.utils import RequestUtils
from app.utils.types import DownloaderType
from config import Config
class IYUUAutoSeed(_IPluginModule):
# 插件名称
module_name = "IYUU自动辅种"
# 插件描述
module_desc = "基于IYUU官方Api实现自动辅种。"
# 插件图标
module_icon = "iyuu.png"
# 主题色
module_color = "#F3B70B"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "iyuuautoseed_"
# 加载顺序
module_order = 20
# 可使用的用户级别
user_level = 2
# 私有属性
_scheduler = None
downloader = None
iyuuhelper = None
sites = None
# 限速开关
_enable = False
_cron = None
_onlyonce = False
_token = None
_downloaders = []
_sites = []
_notify = False
_nolabels = None
_clearcache = False
# 退出事件
_event = Event()
# 种子链接xpaths
_torrent_xpaths = [
"//form[contains(@action, 'download.php?id=')]/@action",
"//a[contains(@href, 'download.php?hash=')]/@href",
"//a[contains(@href, 'download.php?id=')]/@href",
"//a[@class='index'][contains(@href, '/dl/')]/@href",
"//input[@title='DirectLink']/@value",
]
_torrent_tags = ["已整理", "辅种"]
# 待校全种子hash清单
_recheck_torrents = {}
_is_recheck_running = False
# 辅种缓存,出错的种子不再重复辅种,可清除
_error_caches = []
# 辅种缓存,辅种成功的种子,可清除
_success_caches = []
# 辅种缓存,出错的种子不再重复辅种,且无法清除。种子被删除404等情况
_permanent_error_caches = []
# 辅种计数
total = 0
realtotal = 0
success = 0
exist = 0
fail = 0
cached = 0
@staticmethod
def get_fields():
downloaders = {k: v for k, v in Downloader().get_downloader_conf_simple().items()
if v.get("type") in ["qbittorrent", "transmission"] and v.get("enabled")}
sites = {site.get("id"): site for site in Sites().get_site_dict()}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启自动辅种',
'required': "",
'tooltip': '开启后,自动监控下载器,对下载完成的任务根据执行周期自动辅种,辅种任务会自动暂停,校验通过且完整后才开始做种。',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': 'IYUU Token',
'required': "required",
'tooltip': '登录IYUU使用的Token,用于调用IYUU官方Api;需要完成IYUU认证,填写token并保存后,可通过左下角按钮完成认证(已通过IYUU其它渠道认证过的无需再认证)',
'type': 'text',
'content': [
{
'id': 'token',
'placeholder': 'IYUUxxx',
}
]
},
{
'title': '执行周期',
'required': "required",
'tooltip': '辅种任务执行的时间周期,支持5位cron表达式;应避免任务执行过于频繁',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
}
],
[
{
'title': '不辅种标签',
'required': "",
'tooltip': '下载器中的种子有以下标签时不进行辅种,多个标签使用英文,分隔',
'type': 'text',
'content': [
{
'id': 'nolabels',
'placeholder': '使用,分隔多个标签',
}
]
}
]
]
},
{
'type': 'details',
'summary': '辅种下载器',
'tooltip': '只有选中的下载器才会执行辅种任务',
'content': [
# 同一行
[
{
'id': 'downloaders',
'type': 'form-selectgroup',
'content': downloaders
},
]
]
},
{
'type': 'details',
'summary': '辅种站点',
'tooltip': '只有选中的站点才会执行辅种任务,不选则默认为全选',
'content': [
# 同一行
[
{
'id': 'sites',
'type': 'form-selectgroup',
'content': sites
},
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '运行时通知',
'required': "",
'tooltip': '运行辅助任务后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'notify',
},
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照刮削周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
},
{
'title': '下一次运行时清除缓存',
'required': "",
'tooltip': '打开后下一次运行前会先清除辅种缓存,辅种出错的种子会重新尝试辅种,此开关仅生效一次',
'type': 'switch',
'id': 'clearcache',
}
]
]
}
]
def init_config(self, config=None):
self.downloader = Downloader()
self.sites = Sites()
# 读取配置
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._token = config.get("token")
self._downloaders = config.get("downloaders")
self._sites = config.get("sites")
self._notify = config.get("notify")
self._nolabels = config.get("nolabels")
self._clearcache = config.get("clearcache")
self._permanent_error_caches = config.get("permanent_error_caches") or []
self._error_caches = [] if self._clearcache else config.get("error_caches") or []
self._success_caches = [] if self._clearcache else config.get("success_caches") or []
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
self.iyuuhelper = IyuuHelper(token=self._token)
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
try:
self._scheduler.add_job(self.auto_seed,
CronTrigger.from_crontab(self._cron))
self.info(f"辅种服务启动,周期:{self._cron}")
except Exception as err:
self.error(f"运行周期格式不正确:{str(err)}")
if self._onlyonce:
self.info(f"辅种服务启动,立即运行一次")
self._scheduler.add_job(self.auto_seed, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
if self._clearcache:
# 关闭清除缓存开关
self._clearcache = False
if self._clearcache or self._onlyonce:
# 保存配置
self.__update_config()
if self._scheduler.get_jobs():
# 追加种子校验服务
self._scheduler.add_job(self.check_recheck, 'interval', minutes=3)
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return True if self._enable and self._cron and self._token and self._downloaders else False
def get_page(self):
"""
IYUU认证页面
:return: 标题,页面内容,确定按钮响应函数
"""
if not self._token:
return None, None, None
if not self.iyuuhelper:
self.iyuuhelper = IyuuHelper(token=self._token)
auth_sites = self.iyuuhelper.get_auth_sites()
template = """
<div class="modal-body">
<div class="row">
<div class="col">
<div class="mb-3">
<label class="form-label required">IYUU合作站点</label>
<select class="form-control" id="iyuuautoseed_site" onchange="">
{% for Site in AuthSites %}
<option value="{{ Site.site }}">{{ Site.site }}</option>
{% endfor %}
</select>
</div>
</div>
</div>
<div class="row">
<div class="col-lg">
<div class="mb-3">
<label class="form-label required">用户ID</label>
<input class="form-control" autocomplete="off" type="text" id="iyuuautoseed_uid" placeholder="uid">
</div>
</div>
<div class="col-lg">
<div class="mb-3">
<label class="form-label required">PassKey</label>
<input class="form-control" autocomplete="off" type="text" id="iyuuautoseed_passkey" placeholder="passkey">
</div>
</div>
</div>
</div>
"""
return "IYUU站点绑定", Template(template).render(AuthSites=auth_sites,
IyuuToken=self._token), "IYUUAutoSeed_user_bind_site()"
@staticmethod
def get_script():
"""
页面JS脚本
"""
return """
// IYUU站点认证
function IYUUAutoSeed_user_bind_site(){
let site = $("#iyuuautoseed_site").val();
let uid = $("#iyuuautoseed_uid").val();
let passkey = $("#iyuuautoseed_passkey").val();
let token = '{{ IyuuToken }}';
if (!uid) {
$("#iyuuautoseed_uid").addClass("is-invalid");
return;
} else {
$("#iyuuautoseed_uid").removeClass("is-invalid");
}
if (!passkey) {
$("#iyuuautoseed_passkey").addClass("is-invalid");
return;
} else {
$("#iyuuautoseed_passkey").removeClass("is-invalid");
}
// 认证
ajax_post("run_plugin_method", {"plugin_id": 'IYUUAutoSeed', 'method': 'iyuu_bind_site', "site": site, "uid": uid, "passkey": passkey}, function (ret) {
$("#modal-plugin-page").modal('hide');
if (ret.result.code === 0) {
show_success_modal("IYUU用户认证成功!", function () {
$("#modal-plugin-IYUUAutoSeed").modal('show');
});
} else {
show_fail_modal(ret.result.msg, function(){
$("#modal-plugin-page").modal('show');
});
}
});
}
"""
def iyuu_bind_site(self, site, passkey, uid):
"""
IYUU绑定合作站点
"""
state, msg = self.iyuuhelper.bind_site(site=site,
passkey=passkey,
uid=uid)
return {"code": 0 if state else 1, "msg": msg}
def __update_config(self):
self.update_config({
"enable": self._enable,
"onlyonce": self._onlyonce,
"clearcache": self._clearcache,
"cron": self._cron,
"token": self._token,
"downloaders": self._downloaders,
"sites": self._sites,
"notify": self._notify,
"nolabels": self._nolabels,
"success_caches": self._success_caches,
"error_caches": self._error_caches,
"permanent_error_caches": self._permanent_error_caches
})
def auto_seed(self):
"""
开始辅种
"""
if not self._enable or not self._token or not self._downloaders:
self.warn("辅种服务未启用或未配置")
return
if not self.iyuuhelper:
return
self.info("开始辅种任务 ...")
# 计数器初始化
self.total = 0
self.realtotal = 0
self.success = 0
self.exist = 0
self.fail = 0
self.cached = 0
# 扫描下载器辅种
for downloader in self._downloaders:
self.info(f"开始扫描下载器 {downloader} ...")
# 下载器类型
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
# 获取下载器中已完成的种子
torrents = self.downloader.get_completed_torrents(downloader_id=downloader)
if torrents:
self.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}")
else:
self.info(f"下载器 {downloader} 没有已完成种子")
continue
hash_strs = []
for torrent in torrents:
if self._event.is_set():
self.info(f"辅种服务停止")
return
# 获取种子hash
hash_str = self.__get_hash(torrent, downloader_type)
if hash_str in self._error_caches or hash_str in self._permanent_error_caches:
self.info(f"种子 {hash_str} 辅种失败且已缓存,跳过 ...")
continue
save_path = self.__get_save_path(torrent, downloader_type)
# 获取种子标签
torrent_labels = self.__get_label(torrent, downloader_type)
if torrent_labels and self._nolabels:
is_skip = False
for label in self._nolabels.split(','):
if label in torrent_labels:
self.info(f"种子 {hash_str} 含有不转移标签 {label},跳过 ...")
is_skip = True
break
if is_skip:
continue
hash_strs.append({
"hash": hash_str,
"save_path": save_path
})
if hash_strs:
self.info(f"总共需要辅种的种子数:{len(hash_strs)}")
# 分组处理,减少IYUU Api请求次数
chunk_size = 200
for i in range(0, len(hash_strs), chunk_size):
# 切片操作
chunk = hash_strs[i:i + chunk_size]
# 处理分组
self.__seed_torrents(hash_strs=chunk,
downloader=downloader)
# 触发校验检查
self.check_recheck()
else:
self.info(f"没有需要辅种的种子")
# 保存缓存
self.__update_config()
# 发送消息
if self._notify:
if self.success or self.fail:
self.send_message(
title="【IYUU自动辅种任务完成】",
text=f"服务器返回可辅种总数:{self.total}\n"
f"实际可辅种数:{self.realtotal}\n"
f"已存在:{self.exist}\n"
f"成功:{self.success}\n"
f"失败:{self.fail}\n"
f"{self.cached} 条失败记录已加入缓存"
)
self.info("辅种任务执行完成")
def check_recheck(self):
"""
定时检查下载器中种子是否校验完成,校验完成且完整的自动开始辅种
"""
if not self._recheck_torrents:
return
if self._is_recheck_running:
return
self._is_recheck_running = True
for downloader in self._downloaders:
# 需要检查的种子
recheck_torrents = self._recheck_torrents.get(downloader) or []
if not recheck_torrents:
continue
self.info(f"开始检查下载器 {downloader} 的校验任务 ...")
# 下载器类型
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
# 获取下载器中的种子
torrents = self.downloader.get_torrents(downloader_id=downloader,
ids=recheck_torrents)
if torrents:
can_seeding_torrents = []
for torrent in torrents:
# 获取种子hash
hash_str = self.__get_hash(torrent, downloader_type)
if self.__can_seeding(torrent, downloader_type):
can_seeding_torrents.append(hash_str)
if can_seeding_torrents:
self.info(f"共 {len(can_seeding_torrents)} 个任务校验完成,开始辅种 ...")
self.downloader.start_torrents(downloader_id=downloader, ids=can_seeding_torrents)
# 去除已经处理过的种子
self._recheck_torrents[downloader] = list(
set(recheck_torrents).difference(set(can_seeding_torrents)))
elif torrents is None:
self.info(f"下载器 {downloader} 查询校验任务失败,将在下次继续查询 ...")
continue
else:
self.info(f"下载器 {downloader} 中没有需要检查的校验任务,清空待处理列表 ...")
self._recheck_torrents[downloader] = []
self._is_recheck_running = False
def __seed_torrents(self, hash_strs: list, downloader):
"""
执行一批种子的辅种
"""
if not hash_strs:
return
self.info(f"下载器 {downloader} 开始查询辅种,数量:{len(hash_strs)} ...")
# 下载器中的Hashs
hashs = [item.get("hash") for item in hash_strs]
# 每个Hash的保存目录
save_paths = {}
for item in hash_strs:
save_paths[item.get("hash")] = item.get("save_path")
# 查询可辅种数据
seed_list, msg = self.iyuuhelper.get_seed_info(hashs)
if not isinstance(seed_list, dict):
self.warn(f"当前种子列表没有可辅种的站点:{msg}")
return
else:
self.info(f"IYUU返回可辅种数:{len(seed_list)}")
# 遍历
for current_hash, seed_info in seed_list.items():
if not seed_info:
continue
seed_torrents = seed_info.get("torrent")
if not isinstance(seed_torrents, list):
seed_torrents = [seed_torrents]
# 本次辅种成功的种子
success_torrents = []
for seed in seed_torrents:
if not seed:
continue
if not isinstance(seed, dict):
continue
if not seed.get("sid") or not seed.get("info_hash"):
continue
if seed.get("info_hash") in hashs:
self.info(f"{seed.get('info_hash')} 已在下载器中,跳过 ...")
continue
if seed.get("info_hash") in self._success_caches:
self.info(f"{seed.get('info_hash')} 已处理过辅种,跳过 ...")
continue
if seed.get("info_hash") in self._error_caches or seed.get("info_hash") in self._permanent_error_caches:
self.info(f"种子 {seed.get('info_hash')} 辅种失败且已缓存,跳过 ...")
continue
# 添加任务
success = self.__download_torrent(seed=seed,
downloader=downloader,
save_path=save_paths.get(current_hash))
if success:
success_torrents.append(seed.get("info_hash"))
# 辅种成功的去重放入历史
if len(success_torrents) > 0:
self.__save_history(current_hash=current_hash,
downloader=downloader,
success_torrents=success_torrents)
self.info(f"下载器 {downloader} 辅种完成")
def __save_history(self, current_hash, downloader, success_torrents):
"""
[
{
"downloader":"2",
"torrents":[
"248103a801762a66c201f39df7ea325f8eda521b",
"bd13835c16a5865b01490962a90b3ec48889c1f0"
]
},
{
"downloader":"3",
"torrents":[
"248103a801762a66c201f39df7ea325f8eda521b",
"bd13835c16a5865b01490962a90b3ec48889c1f0"
]
}
]
"""
try:
# 查询当前Hash的辅种历史
seed_history = self.get_history(key=current_hash) or []
new_history = True
if len(seed_history) > 0:
for history in seed_history:
if not history:
continue
if not isinstance(history, dict):
continue
if not history.get("downloader"):
continue
# 如果本次辅种下载器之前有过记录则继续添加
if int(history.get("downloader")) == downloader:
history_torrents = history.get("torrents") or []
history["torrents"] = list(set(history_torrents + success_torrents))
new_history = False
break
# 本次辅种下载器之前没有成功记录则新增
if new_history:
seed_history.append({
"downloader": downloader,
"torrents": list(set(success_torrents))
})
# 保存历史
self.history(key=current_hash,
value=seed_history)
except Exception as e:
print(str(e))
def __download_torrent(self, seed, downloader, save_path):
"""
下载种子
torrent: {
"sid": 3,
"torrent_id": 377467,
"info_hash": "a444850638e7a6f6220e2efdde94099c53358159"
}
"""
self.total += 1
# 获取种子站点及下载地址模板
site_url, download_page = self.iyuuhelper.get_torrent_url(seed.get("sid"))
if not site_url or not download_page:
# 加入缓存
self._error_caches.append(seed.get("info_hash"))
self.fail += 1
self.cached += 1
return False
# 查询站点
site_info = self.sites.get_sites(siteurl=site_url)
if not site_info:
self.debug(f"没有维护种子对应的站点:{site_url}")
return False
if self._sites and str(site_info.get("id")) not in self._sites:
self.info("当前站点不在选择的辅助站点范围,跳过 ...")
return False
self.realtotal += 1
# 查询hash值是否已经在下载器中
torrent_info = self.downloader.get_torrents(downloader_id=downloader,
ids=[seed.get("info_hash")])
if torrent_info:
self.debug(f"{seed.get('info_hash')} 已在下载器中,跳过 ...")
self.exist += 1
return False
# 站点流控
if self.sites.check_ratelimit(site_info.get("id")):
self.fail += 1
return False
# 下载种子
torrent_url = self.__get_download_url(seed=seed,
site=site_info,
base_url=download_page)
if not torrent_url:
# 加入失败缓存
self._error_caches.append(seed.get("info_hash"))
self.fail += 1
self.cached += 1
return False
# 强制使用Https
if "?" in torrent_url:
torrent_url += "&https=1"
else:
torrent_url += "?https=1"
meta_info = MetaInfo(title="IYUU自动辅种")
meta_info.set_torrent_info(site=site_info.get("name"),
enclosure=torrent_url)
# 辅种任务默认暂停
_, download_id, retmsg = self.downloader.download(
media_info=meta_info,
is_paused=True,
tag=deepcopy(self._torrent_tags),
downloader_id=downloader,
download_dir=save_path,
download_setting="-2",
)
if not download_id:
# 下载失败
self.warn(f"添加下载任务出错,"
f"错误原因:{retmsg or '下载器添加任务失败'},"
f"种子链接:{torrent_url}")
self.fail += 1
# 加入失败缓存
if retmsg and ('无法打开链接' in retmsg or '触发站点流控' in retmsg):
self._error_caches.append(seed.get("info_hash"))
else:
# 种子不存在的情况
self._permanent_error_caches.append(seed.get("info_hash"))
return False
else:
self.success += 1
# 追加校验任务
self.info(f"添加校验检查任务:{download_id} ...")
if not self._recheck_torrents.get(downloader):
self._recheck_torrents[downloader] = []
self._recheck_torrents[downloader].append(download_id)
# 下载成功
self.info(f"成功添加辅种下载,站点:{site_info.get('name')},种子链接:{torrent_url}")
# TR会自动校验
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
if downloader_type == DownloaderType.QB:
# 开始校验种子
self.downloader.recheck_torrents(downloader_id=downloader, ids=[download_id])
# 成功也加入缓存,有一些改了路径校验不通过的,手动删除后,下一次又会辅上
self._success_caches.append(seed.get("info_hash"))
return True
@staticmethod
def __get_hash(torrent, dl_type):
"""
获取种子hash
"""
try:
return torrent.get("hash") if dl_type == DownloaderType.QB else torrent.hashString
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_label(torrent, dl_type):
"""
获取种子标签
"""
try:
return torrent.get("tags") or [] if dl_type == DownloaderType.QB else torrent.labels or []
except Exception as e:
print(str(e))
return []
@staticmethod
def __can_seeding(torrent, dl_type):
"""
判断种子是否可以做种并处于暂停状态
"""
try:
return torrent.get("state") == "pausedUP" if dl_type == DownloaderType.QB \
else (torrent.status.stopped and torrent.percent_done == 1)
except Exception as e:
print(str(e))
return False
@staticmethod
def __get_save_path(torrent, dl_type):
"""
获取种子保存路径
"""
try:
return torrent.get("save_path") if dl_type == DownloaderType.QB else torrent.download_dir
except Exception as e:
print(str(e))
return ""
def __get_download_url(self, seed, site, base_url):
"""
拼装种子下载链接
"""
def __is_special_site(url):
"""
判断是否为特殊站点
"""
spec_params = ["hash=", "authkey="]
if any(field in base_url for field in spec_params):
return True
if "hdchina.org" in url:
return True
if "hdsky.me" in url:
return True
if "hdcity.in" in url:
return True
if "totheglory.im" in url:
return True
return False
try:
if __is_special_site(site.get('strict_url')):
# 从详情页面获取下载链接
return self.__get_torrent_url_from_page(seed=seed, site=site)
else:
download_url = base_url.replace(
"id={}",
"id={id}"
).replace(
"/{}",
"/{id}"
).replace(
"/{torrent_key}",
""
).format(
**{
"id": seed.get("torrent_id"),
"passkey": site.get("passkey") or '',
"uid": site.get("uid") or '',
}
)
if download_url.count("{"):
self.warn(f"当前不支持该站点的辅助任务,Url转换失败:{seed}")
return None
download_url = re.sub(r"[&?]passkey=", "",
re.sub(r"[&?]uid=", "",
download_url,
flags=re.IGNORECASE),
flags=re.IGNORECASE)
return f"{site.get('strict_url')}/{download_url}"
except Exception as e:
self.warn(f"站点 {site.get('name')} Url转换失败:{str(e)},尝试通过详情页面获取种子下载链接 ...")
return self.__get_torrent_url_from_page(seed=seed, site=site)
def __get_torrent_url_from_page(self, seed, site):
"""
从详情页面获取下载链接
"""
try:
page_url = f"{site.get('strict_url')}/details.php?id={seed.get('torrent_id')}&hit=1"
self.info(f"正在获取种子下载链接:{page_url} ...")
res = RequestUtils(headers=site.get("ua"), cookies=site.get("cookie"),
proxies=Config().get_proxies() if site.get("proxy") else None).get_res(url=page_url)
if res is not None and res.status_code in (200, 500):
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
if not res.text:
self.warn(f"获取种子下载链接失败,页面内容为空:{page_url}")
return None
# 使用xpath从页面中获取下载链接
html = etree.HTML(res.text)
for xpath in self._torrent_xpaths:
download_url = html.xpath(xpath)
if download_url:
download_url = download_url[0]
self.info(f"获取种子下载链接成功:{download_url}")
if not download_url.startswith("http"):
if download_url.startswith("/"):
download_url = f"{site.get('strict_url')}{download_url}"
else:
download_url = f"{site.get('strict_url')}/{download_url}"
return download_url
self.warn(f"获取种子下载链接失败,未找到下载链接:{page_url}")
return None
else:
self.error(f"获取种子下载链接失败,请求失败:{page_url},{res.status_code if res else ''}")
return None
except Exception as e:
self.warn(f"获取种子下载链接失败:{str(e)}")
return None
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
| 36,010 | Python | .py | 827 | 23.350665 | 164 | 0.451978 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,097 | mediasyncdel.py | demigody_nas-tools/app/plugins/modules/mediasyncdel.py | import os
import time
from app.filetransfer import FileTransfer
from app.media import Media
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils.types import EventType, MediaType
from web.action import WebAction
class MediaSyncDel(_IPluginModule):
# 插件名称
module_name = "Emby同步删除"
# 插件描述
module_desc = "Emby删除媒体后同步删除历史记录或源文件。"
# 插件图标
module_icon = "emby.png"
# 主题色
module_color = "#C90425"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
module_config_prefix = "mediasyncdel_"
# 加载顺序
module_order = 15
# 可使用的用户级别
auth_level = 1
# 私有属性
filetransfer = None
_enable = False
_del_source = False
_exclude_path = None
_send_notify = False
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启Emby同步删除',
'required': "",
'tooltip': 'Emby删除媒体后同步删除历史记录,需按照wiki(https://github.com/thsrite/emby_sync_del_nt)配置Emby Scripter-X插件后才能正常使用。',
'type': 'switch',
'id': 'enable',
},
{
'title': '删除源文件',
'required': "",
'tooltip': '开启后,删除历史记录的同时会同步删除源文件。同时开启下载任务清理插件,可联动删除下载任务。',
'type': 'switch',
'id': 'del_source',
},
{
'title': '运行时通知',
'required': "",
'tooltip': '打开后Emby触发同步删除后会发送通知(需要打开插件消息通知)',
'type': 'switch',
'id': 'send_notify',
}
],
]
},
{
'type': 'details',
'summary': '排除路径',
'tooltip': '需要排除的Emby媒体库路径,多个用英文逗号分割(例如没经过NAStool刮削的或者云盘)。',
'content': [
[
{
'required': "",
'type': 'text',
'content': [
{
'id': 'exclude_path',
'placeholder': ''
}
]
}
]
]
}
]
def init_config(self, config=None):
self.filetransfer = FileTransfer()
# 读取配置
if config:
self._enable = config.get("enable")
self._del_source = config.get("del_source")
self._exclude_path = config.get("exclude_path")
self._send_notify = config.get("send_notify")
@EventHandler.register(EventType.EmbyWebhook)
def sync_del(self, event):
"""
emby删除媒体库同步删除历史记录
"""
if not self._enable:
return
event_data = event.event_data
event_type = event_data.get("event_type")
if not event_type or str(event_type) != 'media_del':
return
# 是否虚拟标识
item_isvirtual = event_data.get("item_isvirtual")
if not item_isvirtual:
self.error("item_isvirtual参数未配置,为防止误删除,暂停插件运行")
self.update_config({
"enable": False,
"del_source": self._del_source,
"exclude_path": self._exclude_path,
"send_notify": self._send_notify
})
return
# 如果是虚拟item,则直接return,不进行删除
if item_isvirtual == 'True':
return
# 媒体类型
media_type = event_data.get("media_type")
# 媒体名称
media_name = event_data.get("media_name")
# 媒体路径
media_path = event_data.get("media_path")
# tmdb_id
tmdb_id = event_data.get("tmdb_id")
# 季数
season_num = event_data.get("season_num")
if season_num and str(season_num).isdigit() and int(season_num) < 10:
season_num = f'0{season_num}'
# 集数
episode_num = event_data.get("episode_num")
if episode_num and str(episode_num).isdigit() and int(episode_num) < 10:
episode_num = f'0{episode_num}'
if not media_type:
self.error(f"{media_name} 同步删除失败,未获取到媒体类型")
return
if not tmdb_id or not str(tmdb_id).isdigit():
self.error(f"{media_name} 同步删除失败,未获取到TMDB ID")
return
if self._exclude_path and media_path and any(
os.path.abspath(media_path).startswith(os.path.abspath(path)) for path in
self._exclude_path.split(",")):
self.info(f"媒体路径 {media_path} 已被排除,暂不处理")
return
# 删除电影
if media_type == "Movie":
msg = f'电影 {media_name} {tmdb_id}'
self.info(f"正在同步删除{msg}")
transfer_history = self.filetransfer.get_transfer_info_by(tmdbid=tmdb_id)
# 删除电视剧
elif media_type == "Series":
msg = f'剧集 {media_name} {tmdb_id}'
self.info(f"正在同步删除{msg}")
transfer_history = self.filetransfer.get_transfer_info_by(tmdbid=tmdb_id)
# 删除季 S02
elif media_type == "Season":
if not season_num or not str(season_num).isdigit():
self.error(f"{media_name} 季同步删除失败,未获取到具体季")
return
msg = f'剧集 {media_name} S{season_num} {tmdb_id}'
self.info(f"正在同步删除{msg}")
transfer_history = self.filetransfer.get_transfer_info_by(tmdbid=tmdb_id, season=f'S{season_num}')
# 删除剧集S02E02
elif media_type == "Episode":
if not season_num or not str(season_num).isdigit() or not episode_num or not str(episode_num).isdigit():
self.error(f"{media_name} 集同步删除失败,未获取到具体集")
return
msg = f'剧集 {media_name} S{season_num}E{episode_num} {tmdb_id}'
self.info(f"正在同步删除{msg}")
transfer_history = self.filetransfer.get_transfer_info_by(tmdbid=tmdb_id,
season_episode=f'S{season_num} E{episode_num}')
else:
return
if not transfer_history:
return
# 开始删除
if media_type == "Episode" or media_type == "Movie":
# 如果有剧集或者电影有多个版本的话,需要根据名称筛选下要删除的版本
logids = [history.ID for history in transfer_history if
history.DEST_FILENAME == os.path.basename(media_path)]
else:
logids = [history.ID for history in transfer_history]
if len(logids) == 0:
self.warn(f"{media_type} {media_name} 未获取到可删除数据")
return
self.info(f"获取到删除媒体数量 {len(logids)}")
WebAction().delete_history({
"logids": logids,
"flag": "del_source" if self._del_source else ""
})
# 发送消息
if self._send_notify:
if media_type == "Episode":
# 根据tmdbid获取图片
image_url = Media().get_episode_images(tv_id=tmdb_id,
season_id=season_num,
episode_id=episode_num,
orginal=True)
else:
# 根据tmdbid获取图片
image_url = Media().get_tmdb_backdrop(mtype=MediaType.MOVIE if media_type == "Movie" else MediaType.TV,
tmdbid=tmdb_id)
# 发送通知
self.send_message(
title="【Emby同步删除任务完成】",
image=image_url or 'https://emby.media/notificationicon.png',
text=f"{msg}\n"
f"数量 {len(logids)}\n"
f"时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}"
)
self.info(f"同步删除 {msg} 完成!")
def get_state(self):
return self._enable
def stop_service(self):
"""
退出插件
"""
pass
| 9,519 | Python | .py | 222 | 23.810811 | 139 | 0.476833 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,098 | libraryrefresh.py | demigody_nas-tools/app/plugins/modules/libraryrefresh.py | from app.mediaserver import MediaServer
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils.types import EventType
from datetime import datetime, timedelta
from app.utils import ExceptionUtils
from apscheduler.schedulers.background import BackgroundScheduler
from config import Config
class LibraryRefresh(_IPluginModule):
# 插件名称
module_name = "刷新媒体库"
# 插件描述
module_desc = "入库完成后刷新媒体库服务器海报墙。"
# 插件图标
module_icon = "refresh.png"
# 主题色
module_color = "#32BEA6"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "libraryrefresh_"
# 加载顺序
module_order = 1
# 可使用的用户级别
auth_level = 2
# 私有属性
_enable = False
_scheduler = None
_refresh_delay = 0
mediaserver = None
def init_config(self, config: dict = None):
self.mediaserver = MediaServer()
if config:
self._enable = config.get("enable")
try:
# 延迟时间
delay = int(float(config.get("delay") or 0))
if delay < 0:
delay = 0
self._refresh_delay = delay
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._refresh_delay = 0
self.stop_service()
if not self._enable:
return
if self._refresh_delay > 0:
self.info(f"媒体库延迟刷新服务启动,延迟 {self._refresh_delay} 秒刷新媒体库")
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
else:
self.info("媒体库实时刷新服务启动")
def get_state(self):
return self._enable
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启媒体库刷新',
'required': "",
'tooltip': 'Emby已有电视剧新增剧集时只会刷新对应电视剧,其它场景下如开启了二级分类则只刷新二级分类对应媒体库,否则刷新整库;Jellyfin/Plex只支持刷新整库',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '延迟刷新时间',
'required': "",
'tooltip': '延迟刷新时间,单位秒,0或留空则不延迟',
'type': 'text',
'content': [
{
'id': 'delay',
'placeholder': '0',
}
]
}
]
]
}
]
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
print(str(e))
def __refresh_library(self, event_data):
mediaserver_type = self.mediaserver.get_type().value
media_info = event_data.get("media_info")
if media_info:
title = media_info.get("title")
year = media_info.get("year")
media_name = f"{title} ({year})" if year else title
self.info(f"媒体服务器 {mediaserver_type} 刷新媒体 {media_name} ...")
self.mediaserver.refresh_library_by_items([{
"title": title,
"year": year,
"type": media_info.get("type"),
"category": media_info.get("category"),
# 目的媒体库目录
"target_path": event_data.get("dest"),
# 这个媒体的转移后的最终路径,包含文件名
"file_path": event_data.get("target_path")
}])
else:
self.info(f"媒体服务器 {mediaserver_type} 刷新整库 ...")
self.mediaserver.refresh_root_library()
@EventHandler.register([
EventType.TransferFinished,
EventType.RefreshMediaServer
])
def refresh(self, event):
"""
监听入库完成事件
"""
if not self._enable:
return
if self._refresh_delay > 0:
# 计算延迟时间
run_date = datetime.now() + timedelta(seconds=self._refresh_delay)
# 使用 date 触发器添加任务到调度器
formatted_run_date = run_date.strftime("%Y-%m-%d %H:%M:%S")
self.info(f"新增延迟刷新任务,将在 {formatted_run_date} 刷新媒体库")
self._scheduler.add_job(func=self.__refresh_library, args=[event.event_data], trigger='date',
run_date=run_date)
# 启动调度器(懒启动)
if not self._scheduler.running:
self._scheduler.start()
else:
# 不延迟刷新
self.__refresh_library(event.event_data)
| 5,744 | Python | .py | 148 | 21.547297 | 120 | 0.486383 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,099 | chinesesubfinder.py | demigody_nas-tools/app/plugins/modules/chinesesubfinder.py | import os.path
from functools import lru_cache
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import RequestUtils
from app.utils.types import MediaType, EventType
from config import Config
class ChineseSubFinder(_IPluginModule):
# 插件名称
module_name = "ChineseSubFinder"
# 插件描述
module_desc = "通知ChineseSubFinder下载字幕。"
# 插件图标
module_icon = "chinesesubfinder.png"
# 主题色
module_color = "#83BE39"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "chinesesubfinder_"
# 加载顺序
module_order = 3
# 可使用的用户级别
auth_level = 1
# 私有属性
_save_tmp_path = None
_host = None
_api_key = None
_remote_path = None
_local_path = None
_remote_path2 = None
_local_path2 = None
_remote_path3 = None
_local_path3 = None
def init_config(self, config: dict = None):
self._save_tmp_path = Config().get_temp_path()
if not os.path.exists(self._save_tmp_path):
os.makedirs(self._save_tmp_path, exist_ok=True)
if config:
self._api_key = config.get("api_key")
self._host = config.get('host')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._local_path = config.get("local_path")
self._remote_path = config.get("remote_path")
self._local_path2 = config.get("local_path2")
self._remote_path2 = config.get("remote_path2")
self._local_path3 = config.get("local_path3")
self._remote_path3 = config.get("remote_path3")
def get_state(self):
return self._host and self._api_key
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '服务器地址',
'required': "required",
'tooltip': '配置IP地址和端口,如为https则需要增加https://前缀',
'type': 'text',
'content': [
{
'id': 'host',
'placeholder': 'http://127.0.0.1:19035'
}
]
},
{
'title': 'Api Key',
'required': "required",
'tooltip': '在ChineseSubFinder->配置中心->实验室->API Key处生成',
'type': 'text',
'content': [
{
'id': 'api_key',
'placeholder': ''
}
]
}
]
]
},
{
'type': 'details',
'summary': '路径映射',
'tooltip': '当NAStool与ChineseSubFinder媒体库路程不一致时,需要映射转换,最多可设置三组,留空时不启用',
'content': [
# 同一行
[
{
'title': '路径1',
'type': 'text',
'content': [
{
'id': 'local_path',
'placeholder': '本地路径'
},
{
'id': 'remote_path',
'placeholder': '远程路径'
}
]
},
{
'title': '路径2',
'type': 'text',
'content': [
{
'id': 'local_path2',
'placeholder': '本地路径'
},
{
'id': 'remote_path2',
'placeholder': '远程路径'
}
]
},
{
'title': '路径3',
'type': 'text',
'content': [
{
'id': 'local_path3',
'placeholder': '本地路径'
},
{
'id': 'remote_path3',
'placeholder': '远程路径'
}
]
}
],
]
}
]
def stop_service(self):
pass
@EventHandler.register(EventType.SubtitleDownload)
def download(self, event):
"""
调用ChineseSubFinder下载字幕
"""
if not self._host or not self._api_key:
return
item = event.event_data
if not item:
return
req_url = "%sapi/v1/add-job" % self._host
item_media = item.get("media_info")
item_type = item_media.get("type")
item_bluray = item.get("bluray")
item_file = item.get("file")
item_file_ext = item.get("file_ext")
if item_bluray:
file_path = "%s.mp4" % item_file
else:
if os.path.splitext(item_file)[-1] != item_file_ext:
file_path = "%s%s" % (item_file, item_file_ext)
else:
file_path = item_file
# 路径替换
if self._local_path and self._remote_path and file_path.startswith(self._local_path):
file_path = file_path.replace(self._local_path, self._remote_path).replace('\\', '/')
if self._local_path2 and self._remote_path2 and file_path.startswith(self._local_path2):
file_path = file_path.replace(self._local_path2, self._remote_path2).replace('\\', '/')
if self._local_path3 and self._remote_path3 and file_path.startswith(self._local_path3):
file_path = file_path.replace(self._local_path3, self._remote_path3).replace('\\', '/')
# 调用CSF下载字幕
self.__request_csf(req_url=req_url,
file_path=file_path,
item_type=0 if item_type == MediaType.MOVIE.value else 1,
item_bluray=item_bluray)
@lru_cache(maxsize=128)
def __request_csf(self, req_url, file_path, item_type, item_bluray):
# 一个名称只建一个任务
self.info("通知ChineseSubFinder下载字幕: %s" % file_path)
params = {
"video_type": item_type,
"physical_video_file_full_path": file_path,
"task_priority_level": 3,
"media_server_inside_video_id": "",
"is_bluray": item_bluray
}
try:
res = RequestUtils(headers={
"Authorization": "Bearer %s" % self._api_key
}).post(req_url, json=params)
if not res or res.status_code != 200:
self.error("调用ChineseSubFinder API失败!")
else:
# 如果文件目录没有识别的nfo元数据, 此接口会返回控制符,推测是ChineseSubFinder的原因
# emby refresh元数据时异步的
if res.text:
job_id = res.json().get("job_id")
message = res.json().get("message")
if not job_id:
self.warn("ChineseSubFinder下载字幕出错:%s" % message)
else:
self.info("ChineseSubFinder任务添加成功:%s" % job_id)
else:
self.error("%s 目录缺失nfo元数据" % file_path)
except Exception as e:
self.error("连接ChineseSubFinder出错:" + str(e))
| 8,891 | Python | .py | 215 | 21.539535 | 99 | 0.410389 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
Subsets and Splits