id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,289,100 | torrentmark.py | demigody_nas-tools/app/plugins/modules/torrentmark.py | from datetime import datetime
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.downloader import Downloader
from app.message import Message
from app.plugins.modules._base import _IPluginModule
from app.utils.types import DownloaderType
from config import Config
class TorrentMark(_IPluginModule):
# 插件名称
module_name = "种子标记"
# 插件描述
module_desc = "标记种子是否是PT。"
# 插件图标
module_icon = "tag.png"
# 主题色
module_color = "#4876b6"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "hsuyelin"
# 作者主页
author_url = "https://github.com/hsuyelin"
# 插件配置项ID前缀
module_config_prefix = "torrentmark_"
# 加载顺序
module_order = 10
# 可使用的用户级别
user_level = 1
# 私有属性
_scheduler = None
downloader = None
# 限速开关
_enable = False
_cron = None
_onlyonce = False
_downloaders = []
_nolabels = None
# 退出事件
_event = Event()
@staticmethod
def get_fields():
downloaders = {k: v for k, v in Downloader().get_downloader_conf_simple().items()
if v.get("type") in ["qbittorrent", "transmission"] and v.get("enabled")}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启种子标记',
'required': "",
'tooltip': '开启后,自动监控下载器,对下载完成的任务根据执行周期标记。',
'type': 'switch',
'id': 'enable',
}
],
[
{
'title': '执行周期',
'required': "required",
'tooltip': '标记任务执行的时间周期,支持5位cron表达式;应避免任务执行过于频繁',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
}
]
]
},
{
'type': 'details',
'summary': '下载器',
'tooltip': '只有选中的下载器才会执行标记',
'content': [
# 同一行
[
{
'id': 'downloaders',
'type': 'form-selectgroup',
'content': downloaders
},
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照刮削周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
}
]
]
}
]
def init_config(self, config=None):
self.downloader = Downloader()
self.message = Message()
# 读取配置
if config:
self._enable = config.get("enable")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._downloaders = config.get("downloaders")
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"标记服务启动,周期:{self._cron}")
self._scheduler.add_job(self.auto_mark,
CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"标记服务启动,立即运行一次")
self._scheduler.add_job(self.auto_mark, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"enable": self._enable,
"onlyonce": self._onlyonce,
"cron": self._cron,
"downloaders": self._downloaders
})
if self._cron or self._onlyonce:
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return True if self._enable and self._cron and self._downloaders else False
def auto_mark(self):
"""
开始标记
"""
if not self._enable or not self._downloaders:
self.warn("标记服务未启用或未配置")
return
# 扫描下载器辅种
for downloader in self._downloaders:
self.info(f"开始扫描下载器:{downloader} ...")
# 下载器类型
downloader_type = self.downloader.get_downloader_type(downloader_id=downloader)
# 获取下载器中已完成的种子
torrents = self.downloader.get_completed_torrents(downloader_id=downloader)
if torrents:
self.info(f"下载器 {downloader} 已完成种子数:{len(torrents)}")
else:
self.info(f"下载器 {downloader} 没有已完成种子")
continue
for torrent in torrents:
if self._event.is_set():
self.info(f"标记服务停止")
return
# 获取种子hash
hash_str = self.__get_hash(torrent, downloader_type)
# 获取种子标签
torrent_tags = set(self.__get_tag(torrent, downloader_type))
pt_flag = self.__isPt(torrent, downloader_type)
torrent_tags.discard("")
if pt_flag is True:
torrent_tags.discard("BT")
torrent_tags.add("PT")
self.downloader.set_torrents_tag(downloader_id=downloader, ids=hash_str, tags=list(torrent_tags))
else:
torrent_tags.add("BT")
torrent_tags.discard("PT")
self.downloader.set_torrents_tag(downloader_id=downloader, ids=hash_str, tags=list(torrent_tags))
self.info("标记任务执行完成")
@staticmethod
def __get_hash(torrent, dl_type):
"""
获取种子hash
"""
try:
return torrent.get("hash") if dl_type == DownloaderType.QB else torrent.hashString
except Exception as e:
print(str(e))
return ""
@staticmethod
def __get_tag(torrent, dl_type):
"""
获取种子标签
"""
try:
return list(map(lambda s: s.strip(), (torrent.get("tags") or "").split(","))) if dl_type == DownloaderType.QB else torrent.labels or []
except Exception as e:
print(str(e))
return []
@staticmethod
def __isPt(torrent, dl_type):
"""
获取种子标签
"""
try:
tracker_list = list()
if dl_type == DownloaderType.QB and torrent.trackers_count == 1:
for tracker in torrent.trackers.data:
if tracker['url'].find('http') != -1:
tracker_list.append(tracker['url'])
elif dl_type == DownloaderType.TR:
tracker_list = list(map(lambda s: s['announce'], torrent.trackers or []))
if len(tracker_list) == 1:
if tracker_list[0].find("secure=") != -1 \
or tracker_list[0].find("passkey=") != -1 \
or tracker_list[0].find("totheglory") != -1:
return True
else:
return False
except Exception as e:
print(str(e))
return False
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e)) | 9,393 | Python | .py | 239 | 21.330544 | 147 | 0.456734 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,101 | customhosts.py | demigody_nas-tools/app/plugins/modules/customhosts.py | from python_hosts import Hosts, HostsEntry
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils, IpUtils
from app.utils.types import EventType
class CustomHosts(_IPluginModule):
# 插件名称
module_name = "自定义Hosts"
# 插件描述
module_desc = "修改系统hosts文件,加速网络访问。"
# 插件图标
module_icon = "hosts.png"
# 主题色
module_color = "#02C4E0"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
module_config_prefix = "customhosts_"
# 加载顺序
module_order = 11
# 可使用的用户级别
auth_level = 1
# 私有属性
_hosts = []
_enable = False
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': 'hosts',
'required': False,
'tooltip': 'hosts配置,会追加到系统hosts文件中生效',
'type': 'textarea',
'content':
{
'id': 'hosts',
'placeholder': '每行一个配置,格式为:ip host1 host2 ...',
'rows': 10,
}
}
],
[
{
'title': '错误hosts',
'required': False,
'tooltip': '错误的hosts配置会展示在此处,请修改上方hosts重新提交(错误的hosts不会写入系统hosts文件)',
'type': 'textarea',
'readonly': True,
'content':
{
'id': 'err_hosts',
'placeholder': '',
'rows': 2,
}
}
],
[
{
'title': '开启hosts同步',
'required': "",
'tooltip': '将自定义hosts更新到系统中生效,如因权限问题等无法更新到系统时此开关将自动关闭,此时需查看日志',
'type': 'switch',
'id': 'enable',
}
]
]
}
]
def init_config(self, config=None):
# 读取配置
if config:
self._enable = config.get("enable")
self._hosts = config.get("hosts")
if isinstance(self._hosts, str):
self._hosts = str(self._hosts).split('\n')
if self._enable and self._hosts:
# 排除空的host
new_hosts = []
for host in self._hosts:
if host and host != '\n':
new_hosts.append(host.replace("\n", "") + "\n")
self._hosts = new_hosts
# 添加到系统
error_flag, error_hosts = self.__add_hosts_to_system(self._hosts)
self._enable = self._enable and not error_flag
# 更新错误Hosts
self.update_config({
"hosts": self._hosts,
"err_hosts": error_hosts,
"enable": self._enable
})
@EventHandler.register(EventType.PluginReload)
def reload(self, event):
"""
响应插件重载事件
"""
plugin_id = event.event_data.get("plugin_id")
if not plugin_id:
return
if plugin_id != self.__class__.__name__:
return
return self.init_config(self.get_config())
@staticmethod
def __read_system_hosts():
"""
读取系统hosts对象
"""
# 获取本机hosts路径
if SystemUtils.is_windows():
hosts_path = r"c:\windows\system32\drivers\etc\hosts"
else:
hosts_path = '/etc/hosts'
# 读取系统hosts
return Hosts(path=hosts_path)
def __add_hosts_to_system(self, hosts):
"""
添加hosts到系统
"""
# 系统hosts对象
system_hosts = self.__read_system_hosts()
# 过滤掉插件添加的hosts
orgin_entries = []
for entry in system_hosts.entries:
if entry.entry_type == "comment" and entry.comment == "# CustomHostsPlugin":
break
orgin_entries.append(entry)
system_hosts.entries = orgin_entries
# 新的有效hosts
new_entrys = []
# 新的错误的hosts
err_hosts = []
err_flag = False
for host in hosts:
if not host:
continue
host_arr = str(host).split()
try:
host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6',
address=host_arr[0],
names=host_arr[1:])
new_entrys.append(host_entry)
except Exception as err:
err_hosts.append(host + "\n")
self.error(f"{host} 格式转换错误:{str(err)}")
# 写入系统hosts
if new_entrys:
try:
# 添加分隔标识
system_hosts.add([HostsEntry(entry_type='comment', comment="# CustomHostsPlugin")])
# 添加新的Hosts
system_hosts.add(new_entrys)
system_hosts.write()
self.info("更新系统hosts文件成功")
except Exception as err:
err_flag = True
self.error(f"更新系统hosts文件失败:{str(err) or '请检查权限'}")
return err_flag, err_hosts
def get_state(self):
return self._enable and self._hosts and self._hosts[0]
def stop_service(self):
"""
退出插件
"""
pass
| 6,577 | Python | .py | 174 | 19.488506 | 107 | 0.431234 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,102 | prowlarr.py | demigody_nas-tools/app/plugins/modules/prowlarr.py | import requests
from datetime import datetime, timedelta
from threading import Event
import xml.dom.minidom
from jinja2 import Template
import re
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.utils import RequestUtils
from app.indexer.indexerConf import IndexerConf
from app.utils import ExceptionUtils, StringUtils
from app.plugins.modules._base import _IPluginModule
from config import Config
class Prowlarr(_IPluginModule):
# 插件名称
module_name = "Prowlarr"
# 插件描述
module_desc = "让内荐索引器支持检索Prowlarr站点资源"
# 插件图标
module_icon = "prowlarr.png"
# 主题色
module_color = "#7F4A28"
# 插件版本
module_version = "1.5"
# 插件作者
module_author = "hsuyelin"
# 作者主页
author_url = "https://github.com/hsuyelin"
# 插件配置项ID前缀
module_config_prefix = "prowlarr"
# 加载顺序
module_order = 16
# 可使用的用户级别
auth_level = 1
# 私有属性
eventmanager = None
_scheduler = None
_enable = False
_host = ""
_api_key = ""
_onlyonce = False
_sites = None
# 退出事件
_event = Event()
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': 'Prowlarr地址',
'required': "required",
'tooltip': 'Prowlarr访问地址和端口,如为https需加https://前缀。注意需要先在Prowlarr中添加搜刮器,同时勾选所有搜刮器后搜索一次,才能正常测试通过和使用',
'type': 'text',
'content': [
{
'id': 'host',
'placeholder': 'http://127.0.0.1:9696',
}
]
},
{
'title': 'Api Key',
'required': "required",
'tooltip': '在Prowlarr->Settings->General->Security-> API Key中获取',
'type': 'text',
'content': [
{
'id': 'api_key',
'placeholder': '',
}
]
}
],
[
{
'title': '更新周期',
'required': "",
'tooltip': '索引列表更新周期,支持5位cron表达式,默认每24小时运行一次',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 */24 * *',
}
]
}
],
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次获取索引器列表,否则需要等到预先设置的更新周期才会获取',
'type': 'switch',
'id': 'onlyonce',
}
]
]
},
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
if not isinstance(self._sites, list) or len(self._sites) <= 0:
return None, None, None
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
{% if IndexersCount > 0 %}
<tr>
<th>id</th>
<th>索引</th>
<th>是否公开</th>
<th></th>
</tr>
{% endif %}
</thead>
<tbody>
{% if IndexersCount > 0 %}
{% for Item in Indexers %}
<tr id="indexer_{{ Item.id }}">
<td>{{ Item.id }}</td>
<td>{{ Item.domain }}</td>
<td>{{ Item.public }}</td>
</tr>
{% endfor %}
{% endif %}
</tbody>
</table>
</div>
"""
return "索引列表", Template(template).render(IndexersCount=len(self._sites), Indexers=self._sites), None
def init_config(self, config=None):
self.info(f"初始化配置{config}")
if config:
self._host = config.get("host")
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if self._host.endswith('/'):
self._host = self._host.rstrip('/')
self._api_key = config.get("api_key")
self._enable = self.get_status()
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
if not StringUtils.is_string_and_not_empty(self._cron):
self._cron = "0 0 */24 * *"
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"【{self.module_name}】 索引更新服务启动,周期:{self._cron}")
self._scheduler.add_job(self.get_status, CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"【{self.module_name}】开始获取索引器状态")
self._scheduler.add_job(self.get_status, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.__update_config()
if self._cron or self._onlyonce:
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_status(self):
"""
检查连通性
:return: True、False
"""
if not self._api_key or not self._host:
return False
self._sites = self.get_indexers()
return True if isinstance(self._sites, list) and len(self._sites) > 0 else False
def get_state(self):
return self._enable
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
self.error(f"【{self.module_name}】停止插件错误: {str(e)}")
def __update_config(self):
"""
更新优选插件配置
"""
self.update_config({
"onlyonce": False,
"cron": self._cron,
"host": self._host,
"api_key": self._api_key
})
def get_indexers(self, check=True, indexer_id=None, public=True, plugins=True):
"""
获取配置的prowlarr indexer
:return: indexer 信息 [(indexerId, indexerName, url)]
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": Config().get_ua(),
"X-Api-Key": self._api_key,
"Accept": "application/json, text/javascript, */*; q=0.01"
}
indexer_query_url = f"{self._host}/api/v1/indexerstats"
try:
ret = RequestUtils(headers=headers).get_res(indexer_query_url)
if not ret:
return []
if not RequestUtils.check_response_is_valid_json(ret):
self.info(f"【{self.module_name}】参数设置不正确,请检查所有的参数是否填写正确")
return []
if not ret.json():
return []
ret_indexers = ret.json()["indexers"]
if not ret or ret_indexers == [] or ret is None:
return []
indexers = [IndexerConf({"id": f'{v["indexerName"]}-prowlarr',
"name": f'{v["indexerName"]}(Prowlarr)',
"domain": f'{self._host}/api/v1/indexer/{v["indexerId"]}',
"public": True,
"builtin": False,
"proxy": True,
"parser": self.module_name})
for v in ret_indexers]
return indexers
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return []
def search(self, indexer,
keyword,
page):
"""
根据关键字多线程检索
"""
if not indexer or not keyword:
return None
self.info(f"【{self.module_name}】开始检索Indexer:{indexer.name} ...")
# 获取indexerId
indexerId_pattern = r"/indexer/([^/]+)"
indexerId_match = re.search(indexerId_pattern, indexer.domain)
indexerId = ""
if indexerId_match:
indexerId = indexerId_match.group(1)
if not StringUtils.is_string_and_not_empty(indexerId):
self.info(f"【{self.module_name}】{indexer.name} 索引id为空")
return []
try:
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": Config().get_ua(),
"X-Api-Key": self._api_key,
"Accept": "application/json, text/javascript, */*; q=0.01"
}
api_url = f"{self._host}/api/v1/search?query={keyword}&indexerIds={indexerId}&type=search&limit=100&offset=0"
ret = RequestUtils(headers=headers).get_res(api_url)
if not ret:
return []
if not RequestUtils.check_response_is_valid_json(ret):
self.info(f"【{self.module_name}】参数设置不正确,请检查所有的参数是否填写正确")
return []
if not ret.json():
return []
ret_indexers = ret.json()
if not ret or ret_indexers == [] or ret is None:
return []
torrents = []
for entry in ret_indexers:
tmp_dict = {'indexer_id': entry["indexerId"],
'indexer': entry["indexer"],
'title': entry["title"],
'enclosure': entry["downloadUrl"],
'description': entry["sortTitle"],
'size': entry["size"],
'seeders': entry["seeders"],
'peers': None,
'freeleech': None,
'downloadvolumefactor': None,
'uploadvolumefactor': None,
'page_url': entry["guid"],
'imdbid': None}
torrents.append(tmp_dict)
return torrents
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return [] | 12,349 | Python | .py | 303 | 22.580858 | 125 | 0.444752 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,103 | jackett.py | demigody_nas-tools/app/plugins/modules/jackett.py | import requests
from datetime import datetime, timedelta
from threading import Event
import xml.dom.minidom
from jinja2 import Template
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.utils import RequestUtils, Torrent
from app.indexer.indexerConf import IndexerConf
from app.utils import ExceptionUtils, DomUtils, StringUtils
from app.plugins import EventManager
from app.plugins.modules._base import _IPluginModule
from config import Config
class Jackett(_IPluginModule):
# 插件名称
module_name = "Jackett"
# 插件描述
module_desc = "让内荐索引器支持检索Jackett站点资源"
# 插件图标
module_icon = "jackett.png"
# 主题色
module_color = "#141A21"
# 插件版本
module_version = "1.5"
# 插件作者
module_author = "hsuyelin"
# 作者主页
author_url = "https://github.com/hsuyelin"
# 插件配置项ID前缀
module_config_prefix = "jackett_"
# 加载顺序
module_order = 15
# 可使用的用户级别
auth_level = 1
# 私有属性
eventmanager = None
_scheduler = None
_cron = None
_enable = False
_host = ""
_api_key = ""
_password = ""
_onlyonce = False
_sites = None
# 退出事件
_event = Event()
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': 'Jackett地址',
'required': "required",
'tooltip': 'Jackett访问地址和端口,如为https需加https://前缀。注意需要先在Jackett中添加indexer,才能正常测试通过和使用',
'type': 'text',
'content': [
{
'id': 'host',
'placeholder': 'http://127.0.0.1:9117',
}
]
},
{
'title': 'Api Key',
'required': "required",
'tooltip': 'Jackett管理界面右上角复制API Key',
'type': 'text',
'content': [
{
'id': 'api_key',
'placeholder': '',
}
]
}
],
[
{
'title': '密码',
'required': "required",
'tooltip': 'Jackett管理界面中配置的Admin password,如未配置可为空',
'type': 'password',
'content': [
{
'id': 'password',
'placeholder': '',
}
]
}
],
[
{
'title': '更新周期',
'required': "",
'tooltip': '索引列表更新周期,支持5位cron表达式,默认每24小时运行一次',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 */24 * *',
}
]
}
],
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次获取索引器列表,否则需要等到预先设置的更新周期才会获取',
'type': 'switch',
'id': 'onlyonce'
}
]
]
},
]
def get_page(self):
"""
插件的额外页面,返回页面标题和页面内容
:return: 标题,页面内容,确定按钮响应函数
"""
if not isinstance(self._sites, list) or len(self._sites) <= 0:
return None, None, None
template = """
<div class="table-responsive table-modal-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
{% if IndexersCount > 0 %}
<tr>
<th>id</th>
<th>索引</th>
<th>是否公开</th>
<th></th>
</tr>
{% endif %}
</thead>
<tbody>
{% if IndexersCount > 0 %}
{% for Item in Indexers %}
<tr id="indexer_{{ Item.id }}">
<td>{{ Item.id }}</td>
<td>{{ Item.domain }}</td>
<td>{{ Item.public }}</td>
</tr>
{% endfor %}
{% endif %}
</tbody>
</table>
</div>
"""
return "索引列表", Template(template).render(IndexersCount=len(self._sites), Indexers=self._sites), None
def init_config(self, config=None):
self.eventmanager = EventManager()
# 读取配置
if config:
self._host = config.get("host")
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if self._host.endswith('/'):
self._host = self._host.rstrip('/')
self._api_key = config.get("api_key")
self._password = config.get("password")
self._enable = self.get_status()
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
if not StringUtils.is_string_and_not_empty(self._cron):
self._cron = "0 0 */24 * *"
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"【{self.module_name}】 索引更新服务启动,周期:{self._cron}")
self._scheduler.add_job(self.get_status, CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"【{self.module_name}】开始获取索引器状态")
self._scheduler.add_job(self.get_status, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.__update_config()
if self._cron or self._onlyonce:
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_status(self):
"""
检查连通性
:return: True、False
"""
if not self._api_key or not self._host:
return False
self._sites = self.get_indexers()
return True if isinstance(self._sites, list) and len(self._sites) > 0 else False
def get_state(self):
return self._enable
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
self.error(f"【{self.module_name}】停止插件错误: {str(e)}")
def __update_config(self):
"""
更新优选插件配置
"""
self.update_config({
"onlyonce": False,
"cron": self._cron,
"host": self._host,
"api_key": self._api_key,
"password": self._password,
})
def get_indexers(self):
"""
获取配置的jackett indexer
:return: indexer 信息 [(indexerId, indexerName, url)]
"""
#获取Cookie
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": Config().get_ua(),
"X-Api-Key": self._api_key,
"Accept": "application/json, text/javascript, */*; q=0.01"
}
cookie = None
session = requests.session()
res = RequestUtils(headers=headers, session=session).post_res(url=f"{self._host}/UI/Dashboard", data={"password": self._password},
params={"password": self._password})
if res and session.cookies:
cookie = session.cookies.get_dict()
indexer_query_url = f"{self._host}/api/v2.0/indexers?configured=true"
try:
ret = RequestUtils(headers=headers, cookies=cookie).get_res(indexer_query_url)
if not ret:
return []
if not RequestUtils.check_response_is_valid_json(ret):
self.info(f"【{self.module_name}】参数设置不正确,请检查所有的参数是否填写正确")
return []
if not ret.json():
return []
indexers = [IndexerConf({"id": f'{v["id"]}-jackett',
"name": f'{v["name"]}(Jackett)',
"domain": f'{self._host}/api/v2.0/indexers/{v["id"]}/results/torznab/',
"public": True if v['type'] == 'public' else False,
"builtin": False,
"proxy": True,
"parser": self.module_name})
for v in ret.json()]
return indexers
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return []
def search(self, indexer,
keyword,
page):
"""
根据关键字多线程检索
"""
if not indexer or not keyword:
return None
self.info(f"【{self.module_name}】开始检索Indexer:{indexer.name} ...")
# 特殊符号处理
api_url = f"{indexer.domain}?apikey={self._api_key}&t=search&q={keyword}"
result_array = self.__parse_torznabxml(api_url)
if len(result_array) == 0:
self.warn(f"【{self.module_name}】{indexer.name} 未检索到数据")
return []
else:
self.warn(f"【{self.module_name}】{indexer.name} 返回数据:{len(result_array)}")
return result_array
@staticmethod
def __parse_torznabxml(url):
"""
从torznab xml中解析种子信息
:param url: URL地址
:return: 解析出来的种子信息列表
"""
if not url:
return []
try:
ret = RequestUtils(timeout=10).get_res(url)
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return []
if not ret:
return []
xmls = ret.text
if not xmls:
return []
torrents = []
try:
# 解析XML
dom_tree = xml.dom.minidom.parseString(xmls)
root_node = dom_tree.documentElement
items = root_node.getElementsByTagName("item")
for item in items:
try:
# indexer id
indexer_id = DomUtils.tag_value(item, "jackettindexer", "id",
default=DomUtils.tag_value(item, "jackettindexer", "id", ""))
# indexer
indexer = DomUtils.tag_value(item, "jackettindexer",
default=DomUtils.tag_value(item, "jackettindexer", default=""))
# 标题
title = DomUtils.tag_value(item, "title", default="")
if not title:
continue
# 种子链接
enclosure = DomUtils.tag_value(item, "enclosure", "url", default="")
if not enclosure:
continue
# 描述
description = DomUtils.tag_value(item, "description", default="")
# 种子大小
size = DomUtils.tag_value(item, "size", default=0)
# 种子页面
page_url = DomUtils.tag_value(item, "comments", default="")
# 做种数
seeders = 0
# 下载数
peers = 0
# 是否免费
freeleech = False
# 下载因子
downloadvolumefactor = 1.0
# 上传因子
uploadvolumefactor = 1.0
# imdbid
imdbid = ""
torznab_attrs = item.getElementsByTagName("torznab:attr")
for torznab_attr in torznab_attrs:
name = torznab_attr.getAttribute('name')
value = torznab_attr.getAttribute('value')
if name == "seeders":
seeders = value
if name == "peers":
peers = value
if name == "downloadvolumefactor":
downloadvolumefactor = value
if float(downloadvolumefactor) == 0:
freeleech = True
if name == "uploadvolumefactor":
uploadvolumefactor = value
if name == "imdbid":
imdbid = value
tmp_dict = {'indexer_id': indexer_id,
'indexer': indexer,
'title': title,
'enclosure': enclosure,
'description': description,
'size': size,
'seeders': seeders,
'peers': peers,
'freeleech': freeleech,
'downloadvolumefactor': downloadvolumefactor,
'uploadvolumefactor': uploadvolumefactor,
'page_url': page_url,
'imdbid': imdbid}
torrents.append(tmp_dict)
except Exception as e:
ExceptionUtils.exception_traceback(e)
continue
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
pass
return torrents | 15,779 | Python | .py | 383 | 21.848564 | 138 | 0.426512 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,104 | opensubtitles.py | demigody_nas-tools/app/plugins/modules/opensubtitles.py | import os
import shutil
from functools import lru_cache
from urllib.parse import quote
from pyquery import PyQuery
from app.helper import SiteHelper
from app.helper.chrome_helper import ChromeHelper
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import RequestUtils, PathUtils, ExceptionUtils
from app.utils.types import MediaType, EventType
from config import Config, RMT_SUBEXT
class OpenSubtitles(_IPluginModule):
# 插件名称
module_name = "OpenSubtitles"
# 插件描述
module_desc = "从opensubtitles.org下载中文字幕。"
# 插件图标
module_icon = "opensubtitles.png"
# 主题色
module_color = "bg-black"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "opensubtitles_"
# 加载顺序
module_order = 2
# 可使用的用户级别
auth_level = 1
# 私有属性
sitehelper = None
_cookie = ""
_ua = None
_url_imdbid = "https://www.opensubtitles.org/zh/search/imdbid-%s/sublanguageid-chi"
_url_keyword = "https://www.opensubtitles.org/zh/search/moviename-%s/sublanguageid-chi"
_save_tmp_path = None
_enable = False
def __init__(self):
self._ua = Config().get_ua()
def init_config(self, config: dict = None):
self.sitehelper = SiteHelper()
self._save_tmp_path = Config().get_temp_path()
if not os.path.exists(self._save_tmp_path):
os.makedirs(self._save_tmp_path, exist_ok=True)
if config:
self._enable = config.get("enable")
def get_state(self):
return self._enable
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '开启opensubtitles.org字幕下载',
'required': "",
'tooltip': '需要确保网络能正常连通www.opensubtitles.org',
'type': 'switch',
'id': 'enable',
}
]
]
}
]
def stop_service(self):
pass
@EventHandler.register(EventType.SubtitleDownload)
def download(self, event):
"""
调用OpenSubtitles Api下载字幕
"""
if not self._enable:
return
item = event.event_data
if not item:
return
# 媒体信息
item_media = item.get("media_info")
if item_media.get("type") != MediaType.MOVIE.value and not item_media.get("imdb_id"):
self.warn("电视剧类型需要imdbid才能搜索字幕!")
return
# 查询名称
item_name = item_media.get("en_name") or item_media.get("cn_name")
# 查询IMDBID
imdb_id = item_media.get("imdb_id")
# 查询年份
item_year = item_media.get("year")
# 查询季
item_season = item_media.get("season")
# 查询集
item_episode = item_media.get("episode")
# 文件路径
item_file = item.get("file")
# 后缀
item_file_ext = item.get("file_ext")
self.info("开始从Opensubtitle.org搜索字幕: %s,imdbid=%s" % (item_name, imdb_id))
subtitles = self.search_subtitles(imdb_id=imdb_id, name=item_name, year=item_year)
if not subtitles:
self.warn("%s 未搜索到字幕" % item_name)
else:
self.info("opensubtitles.org返回数据:%s" % len(subtitles))
# 成功数
subtitle_count = 0
for subtitle in subtitles:
# 标题
if not imdb_id:
if str(subtitle.get('title')) != "%s (%s)" % (item_name, item_year):
continue
# 季
if item_season \
and subtitle.get('season') \
and int(subtitle.get('season').replace("Season", "").strip()) not in item_season:
continue
# 集
if item_episode \
and subtitle.get('episode') \
and int(subtitle.get('episode')) not in item_episode:
continue
# 字幕文件名
SubFileName = subtitle.get('description')
# 下载链接
Download_Link = subtitle.get('link')
# 下载后的字幕文件路径
Media_File = "%s.chi.zh-cn%s" % (item_file, item_file_ext)
self.info("正在从opensubtitles.org下载字幕 %s 到 %s " % (SubFileName, Media_File))
# 下载
ret = RequestUtils(headers=self._ua, cookies=self._cookie).get_res(Download_Link)
if ret and ret.status_code == 200:
# 保存ZIP
file_name = self.sitehelper.get_url_subtitle_name(ret.headers.get('content-disposition'), Download_Link)
if not file_name:
continue
zip_file = os.path.join(self._save_tmp_path, file_name)
zip_path = os.path.splitext(zip_file)[0]
with open(zip_file, 'wb') as f:
f.write(ret.content)
# 解压文件
shutil.unpack_archive(zip_file, zip_path, format='zip')
# 遍历转移文件
for sub_file in PathUtils.get_dir_files(in_path=zip_path, exts=RMT_SUBEXT):
self.sitehelper.transfer_subtitle(sub_file, Media_File)
# 删除临时文件
try:
shutil.rmtree(zip_path)
os.remove(zip_file)
except Exception as err:
ExceptionUtils.exception_traceback(err)
else:
self.error("下载字幕文件失败:%s" % Download_Link)
continue
# 最多下载3个字幕
subtitle_count += 1
if subtitle_count > 2:
break
if not subtitle_count:
if item_episode:
self.info("%s 第%s季 第%s集 未找到符合条件的字幕" % (
item_name, item_season, item_episode))
else:
self.info("%s 未找到符合条件的字幕" % item_name)
else:
self.info("%s 共下载了 %s 个字幕" % (item_name, subtitle_count))
def search_subtitles(self, imdb_id, name, year):
if imdb_id:
return self.__search_subtitles_by_imdbid(imdb_id)
else:
return self.__search_subtitles_by_keyword("%s %s" % (name, year))
def __search_subtitles_by_imdbid(self, imdbid):
"""
按TMDBID搜索OpenSubtitles
"""
return self.__parse_opensubtitles_results(url=self._url_imdbid % str(imdbid).replace("tt", ""))
def __search_subtitles_by_keyword(self, keyword):
"""
按关键字搜索OpenSubtitles
"""
return self.__parse_opensubtitles_results(url=self._url_keyword % quote(keyword))
@classmethod
@lru_cache(maxsize=128)
def __parse_opensubtitles_results(cls, url):
"""
搜索并解析结果
"""
chrome = ChromeHelper()
if not chrome.get_status():
return []
# 访问页面
if not chrome.visit(url):
return []
# 源码
html_text = chrome.get_html()
# Cookie
cls._cookie = chrome.get_cookies()
# 解析列表
ret_subtitles = []
html_doc = PyQuery(html_text)
global_season = ''
for tr in html_doc('#search_results > tbody > tr:not([style])'):
tr_doc = PyQuery(tr)
# 季
season = tr_doc('span[id^="season-"] > a > b').text()
if season:
global_season = season
continue
# 集
episode = tr_doc('span[itemprop="episodeNumber"]').text()
# 标题
title = tr_doc('strong > a.bnone').text()
# 描述 下载链接
if not global_season:
description = tr_doc('td:nth-child(1)').text()
if description and len(description.split("\n")) > 1:
description = description.split("\n")[1]
link = tr_doc('td:nth-child(5) > a').attr("href")
else:
description = tr_doc('span[itemprop="name"]').text()
link = tr_doc('a[href^="/download/"]').attr("href")
if link:
link = "https://www.opensubtitles.org%s" % link
else:
continue
ret_subtitles.append({
"season": global_season,
"episode": episode,
"title": title,
"description": description,
"link": link
})
return ret_subtitles
| 9,422 | Python | .py | 237 | 24.864979 | 124 | 0.510522 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,105 | downloader_helper.py | demigody_nas-tools/app/plugins/modules/downloader_helper.py | from app.plugins.modules._base import _IPluginModule
from app.downloader import Downloader
from config import Config
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import pytz
from datetime import datetime, timedelta
from threading import Event
from app.utils.types import DownloaderType
import urllib
import re
from app.sites import Sites
from app.plugins import EventHandler
from app.utils.types import EventType
import os
from jinja2 import Template
class DownloaderHelper(_IPluginModule):
# 插件名称
module_name = "下载器助手"
# 插件描述
module_desc = "定期将完成但未做种的种子设为做种,种子赋予站点标签,联动删种,自动删丢失文件坏种。"
# 插件图标
module_icon = "'); background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMAAAADACAYAAABS3GwHAAART0lEQVR4nO3dC3xMVx4H8N/M5CGRaFcJolXimUQkHqVR2lpaStViJVpaQUMpFdUE9VqVqqX1CKXs5lWr3SI+jVeRaD26qFe9Hx+7bRBZj4hQeUoy+zk3iWZzEu4dM3Pv3PP/fj4+OGcymXvn/Oace+fccw25ublmECIoI73xRGQUACI0CgARGgWACI0CQIRGASBCowAQoVEAiNAoAERoFAAiNAoAERoFgAiNAkCERgEgQqMAEKFRAIjQKABEaBQAIjQKABEaBYAIjQJAhEYBIEKjABChUQCI0CgARGgUACI0CgARGgWACM1J9B2gBrPZjE0bt2JJzEpk3bwNwACDAajrVRtRUe+he/cXxdspKqHVoe2soKAA48dF4uDB4zCwVl+J2VyCXq90w7x5s7k6Yn00BLKzuNjVOHjwWJWNnzEYjNi+bReSkjZydcT6qAewo5ycHHT/Yz8UFBQ99Jd6e9fFlq3ruHJiXdQD2FF+foH0R47bt+9IwyViWxQAO8rOLj3glSMvrxCXL18RY8eoiAJgR0VFRdIYn2gHvRt2VFxcIvuXsYPk6g6UifVQAIjQKABEaBQAIjQKABEaBcCOlBzUsvlC7A+xLQqAHSk7p0ON3x4oAHakpEkbjPTW2APtZTtSdF7fDJgoBDZHe9iOTCb5u5tlhQZBtudQs0GvXMnA2TPnkH7lKo79fAK//JKG6zcyca+wCI7ypWlxMVdUJXZdgJNTWWAM2jskYAfozi7OqOXpAa96deHlVRc9erwIP7+WaNz4ae7xWqX5ABQXFyMxcQ327zuEU6fOSZPEaIqAdrm4GOHf2hd9X+2F/gP6av71ajYAeXl5SE7egti/r8aNG7doEpnDMePppxti3Phw9OjRTbMvXpMBuHDhP4iMnI60XzPo097BsaFcYJA/Fi2ai9q1/6C5jdHcx2pSUjJGjHgXF9P+S41fB1jPfeL4Wbw5NBwnTpzS3AZpqgdITPwKCxeugNFg4uqI43NyMmDU6LcQHj5cM9uimR4gaf23WLRwJTV+HSsqMuPzZXFSL68VmugB2Jj/jdfDUVQk/4IR4rhcXEyIi18Gf39f1bdB9QDcu3cPg/48DBcvZnB1RL/q1auNtesSUauWp6rbqPoQaO03G5CWRhd/i+batSysWfON6lutag9w924OevYcgNwcWv7Duh78lj76LOvSJyg9S2f5mTo2FErdmQxPTw+uzl5UXRv0k7mfIuduvuLTnezcsoeHGxo2bACTkxHFRcXS+8Cex2gwwmgsfT6D0QAD6+QMZRPLDJDq719wbjDAyan0oLu8zCDN2TGVrtdZ/jzS85b/DKQDdfa9HDvFJ5UbjdLPsb9L/8+ep/Tv0t9b+jy5OXnYuvV7bnuqYjCY0b9/b7i4utz//Ub2e4xG6TmNJqP0O+5vNysrmzxnLHsdKP+Zst9fXi493mQq20+G+/umfP9Jr7/yv8ue21T2e01OJqSnZ2DP7h+RmZmFixevwGxW9j4WFhZjzpy/Yv78OVydvajWA+Tm5qJP7xBkZ9/l6h7E1dUZE98fg27dukrzTxxJWtol9P/TW7JeMZsHtGnz16hfvx5Xp0Xbt+/E7NnzkZerrDd//HEPbNm6Fu7u7lydPah2DHDs2ElkZ//GlVeHfer7+jVF6s5vERo6wOEaP8pCL1dJiRk5OfIfr7aePbsjNfVbBLRpIb1XcrE2wNqCWlQLwOlTZxWNH/38muGrr2Lh4VGTq3MYCvtaR7sk0t3dDfHxK9DE50murnqGsragDtUCkJKyiyurjrOzAV+sXFJNLdESdvy0YsVimEzyP9yUtAVrUy0At27d4sqqEzp4gOrni4l89et7ITi4vezHK2kL1qZaAH67m8OVVYftUOJY/P39bNIWrE21ABTkF3JlVWFn8Ly9G1RRQ7Ss1mOeso9h5LYFW1AtAHKP79j57saNG3HlRNvc3WvIvkxVzWN9FadCyN9qJasqE21Q1qjVS4CKAZB/loAujHE8yla2U+/9pQttiU04yv0NHCIAtEYmsRXqAYjQ6E7xVWAXb69ZsxYnT57B9WuZaNbcByNGDEX37i+UzRR1HPv2HcDq1d/gyOHjeOwxTzRr3hRvDg1B5+eedajtsBUKQAXsJnazZ8/D1i2pKKlw4un8uV8QFfkXdH2+E5YuXcD9nFYtXrwciQnf3D/IzMy8jczMo9i/7wgCg1ph1aoYuLq6Osz22AINgSpYuTIemzf9f+Mvx+b+/7j3EMLDx3F1WhSzZAUS4v9Z5RkWdnB64vh5TJ8eLfzxFQWgDJuSGxe7hiuv7PChUxg5cixXriVzP/4McXFfP3Q1vdSUPapORdaCB+8hgezcuavKT/6qHD1yGmFho6uoUd9HH/0Va9cmy1xK0oDjFADCnDx5WsF+YA3nPEaOGMPVqIk1/g1JWxSto3rmzHmuTCQUgDJGC760OXr0LMKGqd8TsHH8zJnR2JC0VfEiwuXXRIuKAlBmwoSxii7lK3f8+HkMH/4OV25P06Z9hI3JOyxYXMCMF154jisXCQWgTGBQALzqPcGVy3Hs57OqHBOwBjxl8kxs3fK9RcvHe3vXxXNdgrlykVAAKoiJmQ9nZ0t2SekxwZh3IrgaW5o8eSa2bdtl0ZwbJyfg47kzHfsaayugAFTQqlVzrP7HSkXXs1Z04MAxTJk8iyu3haioGdixfY9Fn/xs+2bMiETbtm24OtFQACpp2ZKF4Ivf78+l0PbtuxEVOdOmr7G08e+26JOfrW81+6PJeK1fH5u8NkdDAaiCr29LfPnlCmk9e0ukpOyxWQgiP5he1viVv3Ws8U+b/j769OnF1YmKAlANX7+W+HK15T0BC0HEhCn/V6Zo0oGBn8TAbhuVkrLX4sY/a1YkBgx4jasTGQXgAVhP8I81Ky3uCXbvPoD3J069/3+jkhtfm4HiCl9Ns2FPyo69Fg172DqjU6a+R8OeKlAAHoIdEzxKT/DDD/sxfnyU9G8XF2eu/kHKH//BpGllB7yWNf7pMyZh0KABXB2hAMjCeoLSA2PLeoIf9x5EVNRMaRVluQMhtiqzs7MzZs6IRmrqjxY3/siocTTseQAKgEytWrVAYuIKGC08RZqyYw/eGT2pyunJVTGXmDExYio2blT+DS/uD3sm4PXXB3F15HcUAAX8/Fth9eovLP6eQAm2OvSFCxctvEF4CSZOHI2QEBr2PAwFQCE/v5aIT/jc4p7A9kowecp4vPnWGxp9fdpCAbBAQIAfEhKW2aUnUKYEEyJGYfBgGvbIRQGwUECAP+Lil2qoJyht/GFhQ7kaUj0KwCNo06Y14uOWwmTh2SHrKcGYMWHU+C1AAXhEbQJbIzY2Bkq+47Imdg3D2HeHY9ToEZrbN46AAmAFgYEBiJV6AvvuTnY9wPj3RiI8fDhXR+ShAFhJUFAAVq1aZLeegDX+OdFTMHLkMK6OyEcBsKJ27QLxxcqFNg8BG/Z8OC0Cffu+wtURZSgAVvbMM+2wfMWnNgyBGVM/nICQkP5cDVGOAmADnTp1sEkI2LBn0gdjERo6kKsjlqEA2AgLwbLP51stBGzYM/H90Rg6NJSrI5ajANhQcHBHLFnyySOHgH3yjxk7HMOG0fQGa6MA2FiXrsFYvHiuNDvTEqzxh4cPwejRdKrTFigAdtD1+c5YEmNJT2BG2PBQvDtuFFdDrIMCYCddu3bG4iXyewL2yT9kyEBERGh7JWpHRwGwIxaCzz6bI6snCB38Gj6IfI8rJ9blEAHQ021Su/3xebwdPrTaa4xZD8GGPVOnTuLqiPVV/S5oiB7vEDxmzNtYtz4BXbo8Aw+PGtJY38XVhNatWyAufhkmTNDWsuuWUHafYPU4xD3CjEb9xaBx40ZYumwBsrNvIy3tIurV80KDBvW5xzkqR7lPsGoBkLtviotLkJWVjSZNuCpdePzxxxAUpL81Om/dygbrAOS8z2rmRLUhkJubvLsTsovDz5w5y5UTbcvJyZHdA8htC7agWgBq1pS3LDfbifl5BVw50ba0Xy9bvS3YgmoBqFffiyurzqbN26upIVp0585vOHDgkE3agrWpFoDOnTtyZdW5fOm/iI52nBtUi27WrLnIzS2UvReUtAVrUy0Abdr4K1ovef26TYiJWSH8jZ21LC8vHx9/vAA/fP8vBa/SXNYW1KFaANiFI7VqyR/7sRXS4mL/ibfeHIWsrFtcPVHX6dNnMXDAUKxft1nRanasDbC2oBbVToO6urqix0svSrf2lIsdEJ86dQF9eofCx+cptG0XiPbtg+BRs6a0lHjpjqcewlYq9r7OTk64fiMThw//jBPHT+LXX9NRXGxW/NVl7z4vSW1BLYbc3FzVWszNm1no1XMgioqo0YrI3d0V27YnwdPTQ7WtV3UqxBNP1Mbg12kBVxGx3iQi4h1VGz+0MBeIXeX0h9qeXDnRN3//5uj3p1dV30bVA1CnzhPS/XnZ8n5EDG7uzljw6RzFd8yxBU3MBm3d2hfR0R9KF34TvSvBsmUL4O3dQBPbqZnp0H1e7YWpUyPoLI6OGY1mJCQuR7t2QZrZSE1dDxA6eABiln4C1xoOMUubKODdsK50Y5HAwNaa2m2auyCGXTb49dexaOLTkL711QUz2rbzx5o1f5eWk9caVb8HeJjNm7dh2dK/4erVTF1dFikCdjzXtGkjTJ4cgY6dOmh2izUdAObevSJs+24HvvsuFYcPH0NhYZGFN44jtleCmjXd8Wxwe3TtGox+/dQ/zfkwmg9ARfn5+Th37gIOHTqCS5cu40p6hvR1/D0WivJM0KjJdgy/7182OnV3d4NXvTpo2NAbTz31JDp3fhY+Po3h7Ow4x3AOFQBCrI3GEkRoFAAiNAoAERoFgAhNmK9c2TIdhYX36PsEGdgXkGylBi1MVrM13QdgQ1IyNmzYhGvXM1FYcI+rJzx2WtCthit8fBrh5Z7d0b9/X+4xeqHb06Dp6RmYOSMaR4+epk/9R2KGr29TLPg0Wjrfrze6DMCd23cQEhKGa9eyuDpimaca1cfatQmoUaOGrvagLg+CV65KoMZvZZcuZiD52y262iboMQDsAG7fvp+4cvJo2Pyr9euTdbcXdReA27fvIDOTPv1t4fqNm7rbJt0FwNXVBa4CnL5Tg6uLi+62SXcBcHNz0+XZCi1oHeCru23S5UFwSGh/usDeyti9y954Y5Cutgl6DUDv3i+jz6s9KARWwvZj39deRocObXWxPRXp+nqALVu2Y968Jfjtjvy7lZDfsTNqtWvXwsi3h2LIkFBd7hndXxCTdTMLhw4fxckTZ6Qb0hlNNP/vYUqKS1DXqw6Cn30GPk2bSIuX6RVdEUaERh+HRGgUACI0CgARGgWACI0W4ZShsLBQWqDL0bBpIU5O9BY/CO2dB0hI+Arf79yFKxlXUZBfqOot/ZViC1e5udVAo6efRO9XXsLAP/dznBdvR3QatAoZGVcRFTkdp0//m690QOyb3I6dgrD884VwcqBV2+yBAlBJbm4eQkPCkJ5+jatzdO07BGD58s/gosNZnZaig+BK4mK/RHr6Va5cD44cPoH9+w/qctssRQGoZJ/UQPQ6b8iAA/sPcaUiowBUYi7R94hwz979XJnIKACVmHQ+Wa5Zs8ZcmcgoAJXUrVuHK9OTF57vouvtU4oCUMnYd8NhMunzGMDT0w3BnTty5SKjAFTSvHlTDAsL1eHVZCWYEz0NDRrU52pERt8DVOOnnw5h7txFuHwpA457s0ozjEYjgoL8MHnKRLRo0Yx7hOgoAERoNAQiQqMAEKFRAIjQKABEaBQAIjQKABEaBYAIjQJAhEYBIEKjABChUQCI0CgARGgUACI0CgARGgWACI0CQIRGASBCowAQoVEAiNAoAERoFAAiNAoAERoFgAiNAkCERgEgQqMAEKFRAIjQKABEXAD+BxIoFEjfbXnqAAAAAElFTkSuQmCC"
# 主题色
module_color = "#6c7a91"
# 插件版本
module_version = "1.4"
# 插件作者
module_author = "hotlcc"
# 作者主页
author_url = "https://gitee.com/hotlcc"
# 插件配置项ID前缀
module_config_prefix = "com.hotlcc.downloader-helper."
# 加载顺序
module_order = 21
# 可使用的用户级别
user_level = 2
# 私有属性
__timezone = None
# 调度器
__scheduler = None
# 下载器
__downloader = None
# 退出事件
__exit_event = Event()
# 任务运行中状态
__running_state = Event()
# 配置对象
__config_obj = None
@staticmethod
def get_fields():
fields = [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'id': 'enable',
'type': 'switch',
'title': '启动插件',
'tooltip': '插件总开关',
},
{
'id': 'enable_notify',
'type': 'switch',
'title': '运行时通知',
'tooltip': '运行任务后会发送通知(需要打开插件消息通知)',
},
{
'id': 'run_once',
'type': 'switch',
'title': '立即运行一次',
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照周期运行(同时上次触发运行的任务如果在运行中也会停止)',
}
]
]
},
{
'type': 'div',
'content': [
[
{
'id': 'listen_download_event',
'type': 'switch',
'title': '监听下载事件',
'tooltip': '监听下载添加事件。当在NAStool中添加下载任务时,会自动触发运行本插件任务进行自动做种。',
},
{
'id': 'listen_source_file_event',
'type': 'switch',
'title': '监听源文件事件',
'tooltip': '监听源文件事件。当在NAStool的“媒体整理/历史记录”中删除源文件时,会自动触发运行本插件任务进行自动删种。',
},
{
'id': 'use_site_config',
'type': 'switch',
'title': '使用站点配置',
'tooltip': '给种子添加站点标签时,是否优先以【站点配置】中配置的站点名称作为标签内容?',
},
{
'title': ' '
}
],
[
{
'type': 'text',
'title': '定时执行周期',
'tooltip': '设置插件任务执行周期,支持5位cron表达式;应避免任务执行过于频繁',
'content': [
{
'id': 'cron',
'placeholder': '0/30 * * * *',
}
]
},
{
'type': 'text',
'title': '排除种子标签',
'tooltip': '下载器中的种子有以下标签时不进行任何操作,多个标签使用英文,分割',
'content': [
{
'id': 'exclude_tags'
}
]
}
]
]
}
]
# 过滤出有效的下载器
downloaders = {k: v for k, v in Downloader().get_downloader_conf_simple().items()
if v.get("type") in ["qbittorrent", "transmission"] and v.get("enabled")}
# 遍历下载器
for downloader_id, downloader in downloaders.items():
downloader_id = downloader.get('id')
downloader_id_str = str(downloader_id)
downloader_name = downloader.get('name')
fields.append({
'type': 'details',
'summary': '任务:' + downloader_name,
'content': [
[
{
'id': 'downloader.' + downloader_id_str + '.enable',
'type': 'switch',
'title': '任务开关',
},
{
'id': 'downloader.' + downloader_id_str + '.enable_seeding',
'type': 'switch',
'title': '自动做种',
'tooltip': '设置“' + downloader_name + '”下载器是否开启自动做种,开启后将会定期把完成但未做种的种子设为做种',
},
{
'id': 'downloader.' + downloader_id_str + '.enable_tagging',
'type': 'switch',
'title': '站点标签',
'tooltip': '设置“' + downloader_name + '”下载器是否开启自动添加站点标签,开启后将会定期完善种子的站点标签',
}
],
[
{
'id': 'downloader.' + downloader_id_str + '.enable_delete',
'type': 'switch',
'title': '自动删种',
'tooltip': '设置“' + downloader_name + '”下载器是否开启自动删种,开启后以下功能将生效:1、定期删除丢失文件的坏种;2、通过NAStool删除媒体源文件时,同步删除下载器中的关联种子(需开启【监听源文件事件】)。',
}
]
]
})
return fields
def get_page(self):
"""
左下角按钮页面
"""
template = """
<div class="modal-body" style="max-height: 545px;">
<p>本插件仅支持qBittorrent和Transmission两种下载器。</p>
<div class="card">
<div class="card-header">
<h3 class="card-title">
<strong>全局配置</strong>
</h3>
</div>
<div class="card-body">
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th>配置项</th>
<th>说明</th>
</tr>
</thead>
<tbody>
<tr>
<td>启动插件</td>
<td>插件任务总开关,关闭后插件将不会在后台运行,即定时任务和事件监听不会生效,但不影响“立即运行一次”。</td>
</tr>
<tr>
<td>运行时通知</td>
<td>插件运行后,以通知方式汇报运行结果。</td>
</tr>
<tr>
<td>立即运行一次</td>
<td>开关打开并保存插件配置后会立即运行一次插件任务,该功能不受“启动插件”的管控,也不受事件监听和定时周期的影响,只要存在有效的任务配置就能够生效。</td>
</tr>
<tr>
<td>监听下载事件</td>
<td>打开此监听后,通过NAStool向下载器中添加种子时会触发插件任务执行打标,下载、刷流、订阅、转种、辅种等场景均有效。单独在下载器中直接添加时不会生效,但当定时任务扫描时也会生效。</td>
</tr>
<tr>
<td>监听源文件事件</td>
<td>打开此监听后,在“媒体整理/历史记录”中删除源文件(或源及媒体文件)时,是否允许联动删除下载器中的对应种子,需要配合任务中的“自动删种”进行使用。该功能比NAStool作者开发的“下载任务联动删除”插件更加强大。</td>
</tr>
<tr>
<td>使用站点配置</td>
<td>打开后,在给种子添加站点标签时,会优先采用“站点配置”中配置的站点名称作为标签内容,否则使用域名关键字。</td>
</tr>
<tr>
<td>定时执行周期</td>
<td>插件定时执行的周期,仅支持5位cron表达式,即格式“分 时 日 月 周”。</td>
</tr>
<tr>
<td>排除种子标签</td>
<td>插件在执行任务时,带有相关标签的种子将被忽略。支持配置多个标签,多个用英文逗号分隔。</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="card">
<div class="card-header">
<h3 class="card-title">
<strong>任务配置</strong>
</h3>
</div>
<div class="card-body">
<p>当NAStool中有配置下载器时,插件配置界面会展示这些下载器对应的任务配置栏。</p>
<table class="table table-vcenter card-table table-hover table-striped">
<thead>
<tr>
<th>配置项</th>
<th>说明</th>
</tr>
</thead>
<tbody>
<tr>
<td>任务开关</td>
<td>是否启用当前下载器的任务。</td>
</tr>
<tr>
<td>自动做种</td>
<td>插件运行时,会将已经下载完成但不是做种状态的种子设为做种状态。转种、辅种场景有时候校验完成后不会自动做种,可以通过该功能解决。</td>
</tr>
<tr>
<td>站点标签</td>
<td>插件运行时,会给没有站点标签的种子添加站点标签,方便根据站点统计种子数量。</td>
</tr>
<tr>
<td>自动删种</td>
<td>自动删种有两个方面的功能:一方面独立生效,插件运行时会检测并删除丢文件的坏种;另一方面需要配合“监听源文件事件”使用,联动删除种子。</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="card">
<div class="card-header">
<h3 class="card-title">
<strong>关于</strong>
</h3>
</div>
<div class="card-body">
<p>开发者:<a href="{{ module_info.author_url }}">{{ module_info.module_author }}</a></p>
<p>开源仓库:<a href="{{ module_info.author_url }}/nastool-plugin">{{ module_info.author_url }}/nastool-plugin</a></p>
<p>当前版本:v{{ module_info.module_version }}</p>
</div>
</div>
</div>
"""
return '使用帮助', Template(template).render(module_info = self), "DownloaderHelper.goBack()"
@staticmethod
def get_script():
"""
页面JS脚本
"""
return """
(function() {
var DownloaderHelper = {
id: "DownloaderHelper"
}
window.DownloaderHelper = DownloaderHelper;
var goBack = function() {
$("#modal-plugin-page").modal('hide');
$("#modal-plugin-" + DownloaderHelper.id).modal('show');
};
DownloaderHelper.goBack = goBack;
})();
"""
def __parse_config(self, config = None):
"""
解析配置
"""
config_obj = {}
if (not config):
self.debug(f"解析配置: config = {config}, config_obj = {config_obj}")
return config_obj
config_obj['enable'] = config.get('enable')
config_obj['enable_notify'] = config.get('enable_notify')
config_obj['run_once'] = config.get('run_once')
config_obj['cron'] = config.get('cron')
config_obj['exclude_tags'] = config.get('exclude_tags')
config_obj['use_site_config'] = config.get('use_site_config')
config_obj['listen_download_event'] = config.get('listen_download_event')
config_obj['listen_source_file_event'] = config.get('listen_source_file_event')
config_obj['downloader'] = {}
for config_key, config_value in config.items():
if (config_key.startswith('downloader.')):
downloader = config_obj.get('downloader')
config_key_array = config_key.split('.')
downloader_id = int(config_key_array[1])
downloader_config_key = config_key_array[2]
downloader_info = downloader.get(downloader_id)
if (not downloader_info):
downloader_info = {}
downloader[downloader_id] = downloader_info
downloader_info[downloader_config_key] = config_value
self.debug(f"解析配置: config = {config}, config_obj = {config_obj}")
return config_obj
def __un_parse_config(self, config_obj = None):
"""
反解析配置
"""
config = {}
if (not config_obj):
self.debug(f"反解析配置: config_obj = {config_obj}, config = {config}")
return config
config['enable'] = config_obj.get('enable')
config['enable_notify'] = config_obj.get('enable_notify')
config['run_once'] = config_obj.get('run_once')
config['cron'] = config_obj.get('cron')
config['exclude_tags'] = config_obj.get('exclude_tags')
config['use_site_config'] = config_obj.get('use_site_config')
config['listen_download_event'] = config_obj.get('listen_download_event')
config['listen_source_file_event'] = config_obj.get('listen_source_file_event')
downloader = config_obj.get('downloader')
for downloader_id, downloader_info in downloader.items():
for downloader_config_key, config_value in downloader_info.items():
config_key = 'downloader.' + str(downloader_id) + '.' + downloader_config_key
config[config_key] = config_value
self.debug(f"反解析配置: config_obj = {config_obj}, config = {config}")
return config
def __init_scheduler(self, timezone = None):
"""
初始化调度器
"""
if (self.__scheduler):
return
if (not timezone):
timezone = Config().get_timezone()
self.__scheduler = BackgroundScheduler(timezone = timezone)
self.debug(f"服务调度器初始化完成")
def init_config(self, config = None):
self.debug(f"初始化配置")
timezone = Config().get_timezone()
self.__timezone = timezone
self.__downloader = Downloader()
# 读取配置
if config:
self.__config_obj = self.__parse_config(config)
# 停止现有任务
self.stop_service()
self.debug(f"停止现有服务成功")
# 启动插件服务
if (self.get_state()):
self.__init_scheduler(timezone)
cron = self.__config_obj.get('cron')
if (cron):
self.__scheduler.add_job(self.__do_task, CronTrigger.from_crontab(cron))
self.info(f"定时任务已启动,周期: cron = {cron}")
else:
self.warn(f"插件配置无效,服务未启动")
# 如果需要立即运行一次
if (self.__config_obj.get('run_once')):
if (self.__check_has_enabled_task()):
self.__init_scheduler(timezone)
self.__scheduler.add_job(self.__do_task, 'date', run_date = datetime.now(tz = pytz.timezone(timezone)) + timedelta(seconds = 3))
self.info(f"立即运行一次成功")
else:
self.warn(f"任务配置无效,立即运行一次未成功")
# 关闭一次性开关
self.__config_obj['run_once'] = False
self.update_config(self.__un_parse_config(self.__config_obj))
# 启动服务调度器
if (self.__scheduler):
self.__scheduler.print_jobs()
self.__scheduler.start()
self.debug(f"服务调度器初启动成功")
def __check_has_enabled_sub_task(self, downloader_info = None):
"""
判断单个任务中是否有生效的子任务
"""
if (not downloader_info):
return False
return True if downloader_info.get('enable_seeding') \
or downloader_info.get('enable_tagging') \
or downloader_info.get('enable_delete') else False
def __check_has_enabled_task(self, downloader = None):
"""
判断任务列表中是否有生效的任务
"""
if (not downloader):
downloader = self.__config_obj.get('downloader')
if (not downloader):
return False
for downloader_info in downloader.values():
enable = downloader_info.get('enable')
if (enable and self.__check_has_enabled_sub_task(downloader_info)):
return True
return False
def get_state(self):
"""
插件生效状态
"""
state = True if self.__config_obj \
and self.__config_obj.get('enable') \
and (self.__config_obj.get('cron') or self.__config_obj.get("listen_download_event") or self.__config_obj.get("listen_source_file_event")) \
and self.__check_has_enabled_task() else False
self.debug(f"插件状态: {state}")
return state
def stop_service(self):
"""
退出插件
"""
try:
if self.__scheduler:
self.__scheduler.remove_all_jobs()
if self.__scheduler.running:
self.__exit_event.set()
self.__scheduler.shutdown()
self.__exit_event.clear()
self.__scheduler = None
self.debug(f"插件服务停止成功")
except Exception as e:
self.error(f"插件服务停止异常: {str(e)}")
@staticmethod
def __split_tags(tags = None):
"""
分割标签tags为数组
"""
return re.split("\s*,\s*", tags.strip()) if tags else []
def __get_exclude_tag_array(self):
"""
获取排除的标签数组
"""
exclude_tags = self.__config_obj.get("exclude_tags")
return self.__split_tags(exclude_tags)
def __exists_exclude_tag(self, tags = None):
"""
判断多个标签中是否存在被排除的标签
"""
if (not tags):
return False
tags_type = type(tags)
if (tags_type == str):
return self.__exists_exclude_tag(self.__split_tags(tags))
elif (tags_type == list):
exclude_tag_array = self.__get_exclude_tag_array()
if (not exclude_tag_array):
return False
for tag in tags:
if (tag in exclude_tag_array):
return True
return False
else:
return False
@staticmethod
def __check_need_seeding(torrent, downloader_type):
"""
检查是否需要做种
"""
if (downloader_type == DownloaderType.QB):
return torrent.state_enum.is_complete and torrent.state_enum.is_paused
elif (downloader_type == DownloaderType.TR):
return torrent.progress == 100 and torrent.stopped and torrent.error == 0
else:
return False
@staticmethod
def __get_torrent_hash(torrent, downloader_type):
"""
获取种子的hash
"""
if (downloader_type == DownloaderType.QB):
return torrent.get('hash')
elif (downloader_type == DownloaderType.TR):
return torrent.hashString
else:
None
@staticmethod
def __get_torrent_name(torrent, downloader_type):
"""
获取种子的名称
"""
if (downloader_type == DownloaderType.QB):
return torrent.get('name')
elif (downloader_type == DownloaderType.TR):
return torrent.get('name')
else:
None
def __seeding_one_for_qb(self, torrent):
"""
qb单个做种
"""
# 判断种子中是否存在排除的标签
torrent_tags = self.__split_tags(torrent.get('tags'))
if (self.__exists_exclude_tag(torrent_tags)):
return False
downloader_type = DownloaderType.QB
if (not self.__check_need_seeding(torrent, downloader_type)):
return False
torrent.resume()
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
self.info(f'[QB]单个做种完成: hash = {hash}, name = {name}')
return True
def __seeding_one_for_tr(self, torrent, downloader):
"""
tr单个做种
"""
# 判断种子中是否存在排除的标签
torrent_tags = torrent.get('labels')
if (self.__exists_exclude_tag(torrent_tags)):
return False
downloader_type = DownloaderType.TR
if (not self.__check_need_seeding(torrent, downloader_type)):
return False
downloader.start_torrents(torrent.id)
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
self.info(f'[TR]单个做种完成: hash = {hash}, name = {name}')
return True
def __seeding_batch_for_qb(self, torrents):
"""
qb批量做种
"""
self.info('[QB]批量做种开始...')
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if(self.__seeding_one_for_qb(torrent)):
count += 1
self.info('[QB]批量做种结束')
return count
def __seeding_batch_for_tr(self, downloader, torrents = None):
"""
tr批量做种
"""
self.info('[TR]批量做种开始...')
if (not torrents):
torrents = self.__get_torrents(downloader)
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if(self.__seeding_one_for_tr(torrent, downloader)):
count += 1
self.info('[TR]批量做种结束')
return count
def __seeding_batch(self, downloader, torrents = None):
"""
批量做种
"""
downloader_type = downloader.get_type()
if (not torrents):
torrents = self.__get_torrents(downloader)
if (downloader_type == DownloaderType.QB):
return self.__seeding_batch_for_qb(torrents)
elif (downloader_type == DownloaderType.TR):
return self.__seeding_batch_for_tr(downloader, torrents)
return 0
@staticmethod
def __parse_url_query(query = None):
"""
解析url
:param query 字典
"""
if (not query or len(query) <= 0):
return {}
return urllib.parse.parse_qs(query)
@classmethod
def __parse_tracker_for_qb(cls, torrent = None):
"""
qb解析 tracker
"""
if (not torrent):
return None
tracker = torrent.get('tracker')
if (tracker and len(tracker) > 0):
return tracker
magnet_uri = torrent.get('magnet_uri')
if (not magnet_uri or len(magnet_uri) <= 0):
return None
magnet_uri_obj = urllib.parse.urlparse(magnet_uri)
query = cls.__parse_url_query(magnet_uri_obj.query)
tr = query['tr']
if (not tr or len(tr) <= 0):
return None
return tr[0]
@staticmethod
def __parse_tracker_for_tr(torrent = None):
"""
tr解析 tracker
"""
if (not torrent):
return None
trackers = torrent.trackers
if (not trackers or len(trackers) <= 0):
return None
tracker = trackers[0]
return tracker.get('announce')
@classmethod
def __parse_tracker(cls, torrent, downloader_type):
"""
解析 tracker
"""
if (downloader_type == DownloaderType.QB):
return cls.__parse_tracker_for_qb(torrent)
elif (downloader_type == DownloaderType.TR):
return cls.__parse_tracker_for_tr(torrent)
else:
return None
@staticmethod
def __parse_hostname_from_url(url = None):
"""
从url中解析域名
"""
if (not url):
return None
url_obj = urllib.parse.urlparse(url)
if (not url_obj):
return None
return url_obj.hostname
@staticmethod
def __parse_keyword_from_hostname(hostname = None):
"""
从域名中解析关键字
"""
if (not hostname):
return None
hostname_array = hostname.split('.')
hostname_array_len = len(hostname_array)
if (hostname_array_len >= 2):
return hostname_array[-2]
elif (hostname_array >= 1):
return hostname_array[-1]
else:
return None
@classmethod
def __parse_keyword_from_url(cls, url = None):
"""
从url中解析域名关键字
"""
return cls.__parse_keyword_from_hostname(cls.__parse_hostname_from_url(url))
@staticmethod
def __build_site_tag_by_hostname_keyword(keyword = None):
"""
根据域名关键字构造站点标签
"""
if (not keyword):
return None
return f'站点/{keyword}'
def __tagging_one_for_qb(self, torrent, site_dict = {}):
"""
qb单个打标签
"""
# 判断种子中是否存在排除的标签
torrent_tags = self.__split_tags(torrent.get('tags'))
if (self.__exists_exclude_tag(torrent_tags)):
return False
downloader_type = DownloaderType.QB
tracker = self.__parse_tracker(torrent, downloader_type)
keyword = self.__parse_keyword_from_url(tracker)
if (not keyword):
return False
use_site_config = self.__config_obj.get("use_site_config")
site_tag = self.__build_site_tag_by_hostname_keyword(keyword)
site_name_tag = self.__build_site_tag_by_hostname_keyword(site_dict.get(keyword)) if site_dict else None
result = False
if (use_site_config and site_name_tag):
if (site_name_tag not in torrent_tags):
torrent.add_tags(site_name_tag)
result = True
if (site_tag in torrent_tags):
torrent.remove_tags(site_tag)
result = True
else:
if (site_tag not in torrent_tags):
torrent.add_tags(site_tag)
result = True
if (site_name_tag in torrent_tags):
torrent.remove_tags(site_name_tag)
result = True
if (not result):
return False
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
self.info(f'[QB]单个打标成功: hash = {hash}, name = {name}')
return True
def __tagging_one_for_tr(self, torrent, downloader, site_dict = {}):
"""
tr单个打标签
"""
# 判断种子中是否存在排除的标签
torrent_tags = torrent.get('labels')
if (self.__exists_exclude_tag(torrent_tags)):
return False
if (not torrent_tags):
torrent_tags = []
torrent_tags_copy = torrent_tags.copy()
downloader_type = DownloaderType.TR
tracker = self.__parse_tracker(torrent, downloader_type)
keyword = self.__parse_keyword_from_url(tracker)
if (not keyword):
return False
use_site_config = self.__config_obj.get("use_site_config")
site_tag = self.__build_site_tag_by_hostname_keyword(keyword)
site_name_tag = self.__build_site_tag_by_hostname_keyword(site_dict.get(keyword)) if site_dict else None
if (use_site_config and site_name_tag):
if (site_name_tag not in torrent_tags_copy):
torrent_tags_copy.append(site_name_tag)
if (site_tag in torrent_tags_copy):
torrent_tags_copy.remove(site_tag)
else:
if (site_tag not in torrent_tags_copy):
torrent_tags_copy.append(site_tag)
if (site_name_tag in torrent_tags_copy):
torrent_tags_copy.remove(site_name_tag)
if (torrent_tags_copy == torrent_tags):
return False
downloader.set_torrent_tag(torrent.id, torrent_tags_copy)
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
self.info(f'[TR]单个打标成功: hash = {hash}, name = {name}')
return True
def __tagging_batch_for_qb(self, torrents, site_dict = {}):
"""
qb批量打标签
"""
self.info('[QB]批量打标开始...')
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if (self.__tagging_one_for_qb(torrent, site_dict)):
count += 1
self.info('[QB]批量打标结束')
return count
def __tagging_batch_for_tr(self, downloader, torrents = None, site_dict = {}):
"""
tr批量打标签
"""
self.info('[TR]批量打标开始...')
if (not torrents):
torrents = self.__get_torrents(downloader)
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if (self.__tagging_one_for_tr(torrent, downloader, site_dict)):
count += 1
self.info('[TR]批量打标结束')
return count
def __tagging_batch(self, downloader, torrents = None, site_dict = {}):
"""
批量打标签
"""
downloader_type = downloader.get_type()
if (not torrents):
torrents = self.__get_torrents(downloader)
if (downloader_type == DownloaderType.QB):
return self.__tagging_batch_for_qb(torrents, site_dict)
elif (downloader_type == DownloaderType.TR):
return self.__tagging_batch_for_tr(downloader, torrents, site_dict)
return 0
@staticmethod
def __check_is_missing_files(torrent, downloader_type):
"""
检查种子是否丢失文件
"""
if (downloader_type == DownloaderType.QB):
return torrent.get('state') == 'missingFiles'
elif (downloader_type == DownloaderType.TR):
return torrent.error == 3 \
and torrent.error_string != None \
and 'No data found' in torrent.error_string
else:
return False
@classmethod
def __get_torrent_data_filename(cls, torrent, downloader_type):
"""
获取种子数据文件(夹)名,通常和种子名称相同
"""
return cls.__get_torrent_name(torrent=torrent, downloader_type=downloader_type);
@classmethod
def __check_file_match_torrent(cls, path, filename, torrent, downloader_type):
"""
检查文件和种子是否匹配
:return is_match 是否匹配
:return torrent_data_path 匹配的种子数据路径
"""
if (not path or not filename or not torrent or not downloader_type):
return False, None
# 种子数据文件名
torrent_filename = cls.__get_torrent_data_filename(torrent=torrent, downloader_type=downloader_type)
if (not torrent_filename):
return False, None
filepath = os.path.join(path, filename)
if (filename == torrent_filename):
return True, filepath
torrent_filename_wrap1 = os.path.sep + torrent_filename
if (path.endswith(torrent_filename_wrap1)):
return True, path
torrent_filename_wrap2 = torrent_filename_wrap1 + os.path.sep
index = filepath.find(torrent_filename_wrap2)
if (index >= 0):
return True, filepath[0, index] + torrent_filename_wrap1
return False, None
@classmethod
def __check_need_delete(cls, torrent, downloader_type, deleted_source_file=None):
"""
检查是否需要删种
"""
if (cls.__check_is_missing_files(torrent, downloader_type)):
return True
if (not deleted_source_file):
return False
source_path = deleted_source_file.get("path")
source_filename = deleted_source_file.get("filename")
is_match, torrent_data_path = cls.__check_file_match_torrent(source_path, source_filename, torrent, downloader_type)
if (not is_match):
return False
# 如果匹配的种子数据路径不存在,说明数据文件已经被删除了,那么就允许删种
return not os.path.exists(torrent_data_path)
def __delete_one_for_qb(self, torrent, downloader, deleted_source_file=None):
"""
qb单个删种
"""
# 判断种子中是否存在排除的标签
torrent_tags = self.__split_tags(torrent.get('tags'))
if (self.__exists_exclude_tag(torrent_tags)):
return False
downloader_type = DownloaderType.QB
if (not self.__check_need_delete(torrent=torrent, downloader_type=downloader_type, deleted_source_file=deleted_source_file)):
return False
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
downloader.delete_torrents(True, hash)
self.info(f'[QB]单个删种完成: hash = {hash}, name = {name}')
return True
def __delete_one_for_tr(self, torrent, downloader, deleted_source_file=None):
"""
tr单个删种
"""
# 判断种子中是否存在排除的标签
torrent_tags = torrent.get('labels')
if (self.__exists_exclude_tag(torrent_tags)):
return False
downloader_type = DownloaderType.TR
if (not self.__check_need_delete(torrent=torrent, downloader_type=downloader_type, deleted_source_file=deleted_source_file)):
return False
hash = self.__get_torrent_hash(torrent, downloader_type)
name = self.__get_torrent_name(torrent, downloader_type)
downloader.delete_torrents(True, torrent.id)
self.info(f'[TR]单个删种完成: hash = {hash}, name = {name}')
return True
def __delete_batch_for_qb(self, downloader, torrents, deleted_source_file=None):
"""
qb批量删种
"""
self.info('[QB]批量删种开始...')
if (not torrents):
torrents = self.__get_torrents(downloader)
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if (self.__delete_one_for_qb(torrent=torrent, downloader=downloader, deleted_source_file=deleted_source_file)):
count += 1
self.info('[QB]批量删种结束')
return count
def __delete_batch_for_tr(self, downloader, torrents, deleted_source_file=None):
"""
tr批量删种
"""
self.info('[TR]批量删种开始...')
if (not torrents):
torrents = self.__get_torrents(downloader)
count = 0
for torrent in torrents:
if (self.__exit_event.is_set()):
return count
if (self.__delete_one_for_tr(torrent=torrent, downloader=downloader, deleted_source_file=deleted_source_file)):
count += 1
self.info('[TR]批量删种结束')
return count
def __delete_batch(self, downloader, torrents=None, deleted_source_file=None):
"""
批量删种
"""
downloader_type = downloader.get_type()
if (not torrents):
torrents = self.__get_torrents(downloader)
if (downloader_type == DownloaderType.QB):
return self.__delete_batch_for_qb(downloader=downloader, torrents=torrents, deleted_source_file=deleted_source_file)
elif (downloader_type == DownloaderType.TR):
return self.__delete_batch_for_tr(downloader=downloader, torrents=torrents, deleted_source_file=deleted_source_file)
return 0
@staticmethod
def __get_torrents(downloader = None):
"""
从下载器中获取全部种子
"""
if (not downloader):
return None
return downloader.get_torrents()[0]
def __do_task(self):
"""
执行任务
"""
if (self.__running_state.is_set()):
self.debug('已有进行中的任务,本次不执行')
return
try:
self.__running_state.set()
self.__do_task_for_all_downloader()
finally:
self.__running_state.clear()
def __do_task_for_all_downloader(self, sub_task_control=None, deleted_source_file=None):
"""
针对所有下载器执行任务
:param sub_task_control 子任务控制
:param deleted_source_file 删除的源文件,仅针对根据源文件删除事件同步删种场景有效
"""
# 站点词典
site_dict = self.__get_site_dict()
# 任务执行统计结果
results = []
# 下载器任务配置
downloader_configs = self.__config_obj.get('downloader')
for downloader_id, downloader_config in downloader_configs.items():
result = self.__do_task_for_single_downloader(downloader_id=downloader_id, downloader_config=downloader_config, site_dict=site_dict, sub_task_control=sub_task_control, deleted_source_file=deleted_source_file)
if (result):
results.append(result)
# 发送通知
self.__send_notify(results)
def __do_task_for_single_downloader(self, downloader_id=None, downloader_config=None, site_dict=None, sub_task_control=None, deleted_source_file=None):
"""
针对单个下载器执行任务
"""
if (self.__exit_event.is_set()):
self.warn(f'任务中止')
return None
if (not downloader_id):
return None
if (not downloader_config):
# 下载器任务配置
downloader_configs = self.__config_obj.get('downloader')
downloader_config = downloader_configs.get(downloader_id)
if (not downloader_config):
return None
if (not site_dict):
# 站点词典
site_dict = self.__get_site_dict()
if (not downloader_config.get('enable')):
return None
# 子任务开关处理
enable_seeding = downloader_config.get('enable_seeding') == True and (sub_task_control == None or sub_task_control.get('enable_seeding') != False)
enable_tagging = downloader_config.get('enable_tagging') == True and (sub_task_control == None or sub_task_control.get('enable_tagging') != False)
enable_delete = downloader_config.get('enable_delete') == True and (sub_task_control == None or sub_task_control.get('enable_delete') != False)
if (not enable_seeding and not enable_tagging and not enable_delete):
return None
downloader = self.__downloader.get_downloader(downloader_id = downloader_id)
if (not downloader):
self.warn(f'下载器不存在: id = {downloader_id}')
return None
downloader_type = downloader.get_type()
downloader_name = downloader.name
self.info(f'下载器[{downloader_name}]任务执行开始...')
self.info(f'子任务执行状态: 自动做种={enable_seeding}, 自动打标={enable_tagging}, 自动删种={enable_delete}')
if (downloader_type not in [DownloaderType.QB, DownloaderType.TR]):
self.warn(f'下载器[{downloader_name}]类型不受支持: type = {downloader_type}')
return None
if (not downloader.get_status()):
self.warn(f'下载器[{downloader_name}]状态无效')
return None
torrents = self.__get_torrents(downloader)
if (not torrents or len(torrents) <= 0):
self.warn(f'下载器[{downloader_name}]中没有种子')
return None
torrents_count = len(torrents)
result = {
'downloader_name': downloader_name,
'total': torrents_count
}
# 批量做种
if (enable_seeding):
result['seeding'] = self.__seeding_batch(downloader=downloader, torrents=torrents)
if (self.__exit_event.is_set()):
return result
# 批量打标签
if (enable_tagging):
result['tagging'] = self.__tagging_batch(downloader=downloader, torrents=torrents, site_dict=site_dict)
if (self.__exit_event.is_set()):
return result
# 批量删种
if (enable_delete):
result['delete'] = self.__delete_batch(downloader=downloader, torrents=torrents, deleted_source_file=deleted_source_file)
if (self.__exit_event.is_set()):
return result
self.info(f'下载器[{downloader_name}]任务执行结束')
return result
def __send_notify(self, results = None):
"""
发送通知
"""
if (self.__config_obj.get('enable_notify') and results):
text = ''
for result in results:
seeding = result.get('seeding')
tagging = result.get('tagging')
delete = result.get('delete')
if ((seeding and seeding > 0) or (tagging and tagging > 0) or (delete and delete > 0)):
downloader_name = result.get('downloader_name')
total = result.get('total')
text += f'【任务:{downloader_name}】\n'
text += f'种子总数:{total}\n'
if (seeding):
text += f'做种数:{seeding}\n'
if (tagging):
text += f'打标数:{tagging}\n'
if (delete):
text += f'删种数:{delete}\n'
text += '\n'
text += '————————————\n'
if (text):
self.send_message(
title = f"{self.module_name}任务执行结果",
text = text
)
def __get_site_dict(self):
"""
获取站点词典
"""
site_infos = Sites().get_sites()
if (site_infos):
return {self.__parse_keyword_from_url(site_info.get("signurl")): site_info.get("name") for site_info in site_infos}
return {}
@EventHandler.register(EventType.DownloadAdd)
def listen_download_add_event(self, event):
"""
监听下载添加事件
"""
if (not event or not event.event_data):
return
if (not self.get_state() or not self.__config_obj.get("listen_download_event")):
return
if (not self.__scheduler or not self.__scheduler.running):
return
if (self.__exit_event.is_set()):
return
downloader_id = event.event_data.get('downloader_id')
sub_task_control = {
'enable_seeding': False,
'enable_tagging': True,
'enable_delete': False
}
def __do_task():
if (downloader_id):
self.__do_task_for_single_downloader(downloader_id=downloader_id, sub_task_control=sub_task_control)
else:
self.__do_task_for_all_downloader(sub_task_control=sub_task_control)
timezone = Config().get_timezone()
self.__scheduler.add_job(__do_task, 'date', run_date = datetime.now(tz = pytz.timezone(timezone)) + timedelta(seconds = 3))
self.info('监听到下载添加事件,触发插件任务')
@EventHandler.register(EventType.SourceFileDeleted)
def listen_source_file_deleted_event(self, event):
"""
监听源文件删除事件,同步删种
"""
# 检查事件数据,如果不满足执行条件就抛弃事件
if (not event or not event.event_data):
return
source_path = event.event_data.get("path")
source_filename = event.event_data.get("filename")
if (not source_path or not source_filename):
return
# 检查插件配置及状态
if (not self.get_state() or not self.__config_obj.get("listen_source_file_event")):
return
if (not self.__scheduler or not self.__scheduler.running):
return
if (self.__exit_event.is_set()):
return
# 针对全部下载器处理
sub_task_control = {
'enable_seeding': False,
'enable_tagging': False,
'enable_delete': True
}
def __do_task():
self.__do_task_for_all_downloader(sub_task_control=sub_task_control, deleted_source_file=event.event_data)
timezone = Config().get_timezone()
self.__scheduler.add_job(__do_task, 'date', run_date = datetime.now(tz = pytz.timezone(timezone)) + timedelta(seconds = 3))
self.info('监听到源文件删除事件,触发插件任务')
| 56,676 | Python | .py | 1,116 | 31.985663 | 6,053 | 0.554569 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,106 | speedlimiter.py | demigody_nas-tools/app/plugins/modules/speedlimiter.py | import time
from apscheduler.schedulers.background import BackgroundScheduler
from app.downloader import Downloader
from app.helper.security_helper import SecurityHelper
from app.mediaserver import MediaServer
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils import ExceptionUtils
from app.utils.types import MediaServerType, EventType
from config import Config
class SpeedLimiter(_IPluginModule):
# 插件名称
module_name = "下载器限速"
# 插件描述
module_desc = "媒体服务器播状态改变时,根据设置对下载器进行限速。"
# 插件图标
module_icon = "SpeedLimiter.jpg"
# 主题色
module_color = "#183883"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "Shurelol"
# 作者主页
author_url = "https://github.com/Shurelol"
# 插件配置项ID前缀
module_config_prefix = "speedlimit_"
# 加载顺序
module_order = 8
# 可使用的用户级别
auth_level = 2
# 私有属性
_downloader = None
_mediaserver = None
_scheduler = None
# 任务执行间隔
_interval = 300
# 限速开关
_limit_enabled = False
# 限速状态,True为播放限速,False为未播放限速
_playing_flag = False
# 限速设置
_download_limit = 0
_upload_limit = 0
_download_unlimit = 0
_upload_unlimit = 0
# 不限速地址
_unlimited_ips = {"ipv4": "0.0.0.0/0", "ipv6": "::/0"}
# 智能上传限速标志
_auto_limit = False
# 总速宽
_bandwidth = 0
_allocation_ratio = 0
# 限速下载器
_limited_downloader_ids = []
# 发送消息标志
_notify = False
@staticmethod
def get_fields():
downloaders = {k: v for k, v in Downloader().get_downloader_conf_simple().items()
if v.get("type") in ["qbittorrent", "transmission"] and v.get("enabled")}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '播放限速',
'required': "",
'tooltip': '媒体服务器有媒体播放时对选取的下载器进行限速,不限速地址范围除外,0或留空不启用',
'type': 'text',
'content': [
{
'id': 'upload_limit',
'placeholder': '上传限速,KB/s'
},
{
'id': 'download_limit',
'placeholder': '下载限速,KB/s'
}
]
},
{
'title': '未播放限速',
'required': "",
'tooltip': '媒体服务器无媒体播放时对选取的下载器进行限速,0或留空不启用',
'type': 'text',
'content': [
{
'id': 'upload_unlimit',
'placeholder': '上传限速,KB/s'
},
{
'id': 'download_unlimit',
'placeholder': '下载限速,KB/s'
}
]
},
{
'title': '智能上传限速设置',
'required': "",
'tooltip': '设置上行带宽后,媒体服务器有媒体播放时根据上行带宽和媒体播放占用带宽计算上传限速数值。多个下载器设置分配比例,如两个下载器设置1:2,留空均分',
'type': 'text',
'content': [
{
'id': 'bandwidth',
'placeholder': '上行带宽,Mbps'
},
{
'id': 'allocation_ratio',
'placeholder': '分配比例,1:1:1'
}
]
}
]
]
},
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '不限速地址范围',
'required': 'required',
'tooltip': '以下地址范围不进行限速处理,一般配置为局域网地址段;多个地址段用,号分隔,留空或配置为0.0.0.0/0,::/0则不做限制',
'type': 'text',
'content': [
{
'id': 'ipv4',
'placeholder': '192.168.1.0/24',
},
{
'id': 'ipv6',
'placeholder': 'FE80::/10',
}
]
},
],
[
{
'title': '消息通知',
'required': "",
'tooltip': '开启后,下载器限速状态改变时,发送通知',
'type': 'switch',
'id': 'notify',
},
]
]
},
{
'type': 'details',
'summary': '下载器',
'tooltip': '设置后根据上行带宽及剩余比例自动计算限速数值',
'content': [
# 同一行
[
{
'id': 'downloaders',
'type': 'form-selectgroup',
'content': downloaders
},
]
]
},
{
'type': 'details',
'summary': '任务间隔',
'tooltip': '设置任务执行间隔,单位为秒,默认时间300秒;应优先通过配置Emby/Jellyfin/Plex的webhook发送播放事件给NAStool来触发自动限速,而非定时执行检查',
'content': [
[
{
'required': "",
'type': 'text',
'content': [
{
'id': 'interval',
'placeholder': '300'
}
]
}
]
]
}
]
def init_config(self, config=None):
self._downloader = Downloader()
self._mediaserver = MediaServer()
# 读取配置
if config:
try:
# 总带宽
self._bandwidth = int(float(config.get("bandwidth") or 0)) * 1000000
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._bandwidth = 0
# 自动限速开关
self._auto_limit = True if self._bandwidth else False
try:
# 播放下载限速
self._download_limit = int(float(config.get("download_limit") or 0))
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._download_limit = 0
try:
# 播放上传限速
self._upload_limit = int(float(config.get("upload_limit") or 0))
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._upload_limit = 0
# 限速服务开关
self._limit_enabled = True if self._download_limit or self._upload_limit or self._auto_limit else False
# 下载器
self._limited_downloader_ids = config.get("downloaders") or []
if not self._limited_downloader_ids:
self._limit_enabled = False
# 不限速地址
self._unlimited_ips["ipv4"] = config.get("ipv4") or "0.0.0.0/0"
self._unlimited_ips["ipv6"] = config.get("ipv6") or "::/0"
if "0.0.0.0/0" in self._unlimited_ips["ipv4"] and "::/0" in self._unlimited_ips["ipv6"]:
self._limit_enabled = False
try:
# 未播放下载限速
self._download_unlimit = int(float(config.get("download_unlimit") or 0))
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._download_unlimit = 0
try:
# 未播放上传限速
self._upload_unlimit = int(float(config.get("upload_unlimit") or 0))
except Exception as e:
ExceptionUtils.exception_traceback(e)
self._upload_unlimit = 0
# 任务时间间隔
self._interval = int(config.get("interval") or "300")
# 下载器限速分配比例
self._allocation_ratio = config.get("allocation_ratio")
if self._allocation_ratio:
try:
self._allocation_ratio = [int(i) for i in self._allocation_ratio.split(":")]
except Exception as e:
ExceptionUtils.exception_traceback(e)
self.warn("分配比例含有:外非数字字符,执行均分")
self._allocation_ratio = []
else:
self._allocation_ratio = []
# 发送消息
self._notify = True if config.get("notify") else False
else:
# 限速关闭
self._limit_enabled = False
# 移出现有任务
self.stop_service()
# 启动限速任务
if self._limit_enabled:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
self._scheduler.add_job(func=self.__check_playing_sessions,
args=[self._mediaserver.get_type(), True],
trigger='interval',
seconds=self._interval)
self._scheduler.print_jobs()
self._scheduler.start()
self.info("播放限速服务启动")
def get_state(self):
return self._limit_enabled
def __speed_limit(self, downloader_confs, allocation_ratio, playing_flag):
"""
下载器限速
"""
if not downloader_confs:
return
limit_log = []
allocation_count = sum(allocation_ratio) if allocation_ratio else len(downloader_confs)
for i in range(len(downloader_confs)):
downloader_conf = downloader_confs[i]
downloader_name = downloader_conf.get("name")
# 播放限速
if playing_flag:
# 智能上传限速
if self._auto_limit:
if not allocation_ratio:
upload_limit = int(self._upload_limit / allocation_count)
else:
upload_limit = int(self._upload_limit * allocation_ratio[i] / allocation_count)
# 不能为0
if upload_limit < 10:
upload_limit = 10
# 非智能上传限速
else:
upload_limit = self._upload_limit
# 下载限速
download_limit = self._download_limit
# 未播放限速
else:
upload_limit = self._upload_unlimit
download_limit = self._download_unlimit
self._downloader.set_speed_limit(
downloader_id=downloader_conf.get("id"),
download_limit=download_limit,
upload_limit=upload_limit
)
# 记录日志
log_info = f"{downloader_name}"
if upload_limit:
log_info += f" 上传:{upload_limit}KB/s"
if download_limit:
log_info += f" 下载:{download_limit}KB/s"
if not upload_limit and not download_limit:
log_info += " 不限速"
limit_log.append(log_info)
# 设置限速状态
self._playing_flag = playing_flag
# 返回限速日志
return limit_log
@EventHandler.register(EventType.EmbyWebhook)
def emby_action(self, event):
"""
检查emby Webhook消息
"""
if self._limit_enabled and event.event_data.get("Event") in ["playback.start", "playback.stop"]:
self.__check_playing_sessions(_mediaserver_type=MediaServerType.EMBY,
time_check=False,
message=event.event_data.get("Title"))
@EventHandler.register(EventType.JellyfinWebhook)
def jellyfin_action(self, event):
"""
检查jellyfin Webhook消息
"""
if self._limit_enabled and event.event_data.get("NotificationType") in ["PlaybackStart", "PlaybackStop"]:
self.__check_playing_sessions(_mediaserver_type=MediaServerType.JELLYFIN, time_check=False)
@EventHandler.register(EventType.PlexWebhook)
def plex_action(self, event):
"""
检查plex Webhook消息
"""
if self._limit_enabled and event.event_data.get("event") in ["media.play", "media.stop"]:
self.__check_playing_sessions(_mediaserver_type=MediaServerType.PLEX, time_check=False)
def __check_playing_sessions(self, _mediaserver_type, time_check=False, message=""):
"""
检查是否限速
"""
mediaserver_type = self._mediaserver.get_type()
if _mediaserver_type != mediaserver_type:
return
# plex的webhook时尝试sleep一段时间,以保证get_playing_sessions获取到正确的值
if not time_check and _mediaserver_type == MediaServerType.PLEX:
time.sleep(3)
# 当前播放的会话
playing_sessions = self._mediaserver.get_playing_sessions()
# 当前播放的总比特率
total_bit_rate = 0
if _mediaserver_type == MediaServerType.EMBY:
for session in playing_sessions:
if not SecurityHelper.allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
elif _mediaserver_type == MediaServerType.JELLYFIN:
for session in playing_sessions:
if not SecurityHelper.allow_access(self._unlimited_ips, session.get("RemoteEndPoint")) \
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
media_streams = session.get("NowPlayingItem", {}).get("MediaStreams") or []
for media_stream in media_streams:
total_bit_rate += int(media_stream.get("BitRate") or 0)
elif _mediaserver_type == MediaServerType.PLEX:
for session in playing_sessions:
if not SecurityHelper.allow_access(self._unlimited_ips, session.get("address")) \
and session.get("type") == "Video":
total_bit_rate += int(session.get("bitrate") or 0)
else:
return
# 限速状态
_playing_flag = True if total_bit_rate else False
# 限速检查
if _playing_flag:
# 非定时检查,非智能上传限速,且进行过播放限速
if not time_check and not self._auto_limit and _playing_flag == self._playing_flag:
return
# 智能上传限速计算上传限速
if self._auto_limit:
self.calc_limit(total_bit_rate)
else:
# 非定时检查,且进行过未播放限速
if not time_check and _playing_flag == self._playing_flag:
return
# 发送日志标记
_log = True
# 定时检查时,如播放状态未改变,不发送通知及日志
if time_check and _playing_flag == self._playing_flag:
_log = False
# 获取限速下载器
limited_downloader_confs, limited_allocation_ratio = self.check_limited_downloader()
if not limited_downloader_confs:
self.warn("未有启用的限速下载器")
# 启动限速
limit_log = self.__speed_limit(
downloader_confs=limited_downloader_confs,
allocation_ratio=limited_allocation_ratio,
playing_flag=_playing_flag
)
# 发送消息及日志
if _log:
for log_info in limit_log:
self.info(f"{'' if _playing_flag else '未'}播放限速:{log_info}")
if self._notify:
limit_log = "\n".join(limit_log)
title = f"【{'定时检查'if time_check else mediaserver_type.value}{'开始' if _playing_flag else '停止'}播放限速】"
self.send_message(
title=title,
text=f"{message}\n{limit_log}"
)
def calc_limit(self, total_bit_rate):
"""
计算智能上传限速
"""
if not total_bit_rate:
return
residual_bandwidth = (self._bandwidth - total_bit_rate)
if residual_bandwidth < 0:
self._upload_limit = 10
else:
self._upload_limit = residual_bandwidth / 8 / 1024
def check_limited_downloader(self):
"""
检查限速下载器
"""
# 限速下载器
limited_downloader_confs = []
limited_allocation_ratio = []
# 检查分配比例配置
if self._allocation_ratio and len(self._allocation_ratio) != len(self._limited_downloader_ids):
self._allocation_ratio = []
self.warn("分配比例配置错误,与限速下载器数量不一致,执行均分")
# 检查限速下载器配置
downloader_confs_dict = self._downloader.get_downloader_conf_simple()
for i in range(len(self._limited_downloader_ids)):
did = self._limited_downloader_ids[i]
downloader_conf = downloader_confs_dict.get(did)
if downloader_conf and downloader_conf.get("enabled"):
limited_downloader_confs.append(downloader_conf)
if self._allocation_ratio:
limited_allocation_ratio.append(self._allocation_ratio[i])
return limited_downloader_confs, limited_allocation_ratio
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
print(str(e))
| 20,063 | Python | .py | 456 | 24.041667 | 116 | 0.466449 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,107 | customization.py | demigody_nas-tools/app/plugins/modules/customization.py | from app.media.meta.customization import CustomizationMatcher
from app.plugins.modules._base import _IPluginModule
class Customization(_IPluginModule):
# 插件名称
module_name = "自定义占位符"
# 插件描述
module_desc = "添加自定义占位符识别正则,重命名格式中添加{customization}使用,自定义多个结果间分隔符"
# 插件图标
module_icon = "regex.png"
# 主题色
module_color = "#E64D1C"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "Shurelol"
# 作者主页
author_url = "https://github.com/Shurelol"
# 插件配置项ID前缀
module_config_prefix = "customization_"
# 加载顺序
module_order = 6
# 可使用的用户级别
auth_level = 1
# 私有属性
_customization = None
_custom_separator = None
_customization_matcher = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '',
'required': '',
'tooltip': '',
'type': 'textarea',
'content':
{
'id': 'customization',
'placeholder': '多个匹配对象请用;或换行分隔,支持正则表达式,特殊字符注意转义',
'rows': 5
}
},
],
[
{
'title': '自定义分隔符',
'required': "",
'tooltip': '当匹配到多个结果时,使用此分隔符按添加自定义占位符的顺序进行合成,留空使用@(同一正则表达式内的多个对象按名称中出现的顺序合成)',
'type': 'text',
'content': [
{
'id': 'separator',
'placeholder': '请不要使用文件名中禁止使用的符号!',
}
]
},
]
]
}
]
def init_config(self, config=None):
self._customization_matcher = CustomizationMatcher()
# 读取配置
if config:
customization = config.get('customization')
custom_separator = config.get('separator')
if customization:
customization = customization.replace("\n", ";").strip(";").split(";")
customization = "|".join([f"({item})" for item in customization])
if customization:
self.info("自定义占位符已加载")
if custom_separator:
self.info(f"自定义分隔符 {custom_separator} 已加载")
self._customization_matcher.update_custom(customization, custom_separator)
self._customization = customization
self._custom_separator = custom_separator
def get_state(self):
return True if self._customization or self._custom_separator else False
def stop_service(self):
"""
退出插件
"""
pass
| 3,626 | Python | .py | 89 | 19.651685 | 106 | 0.432601 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,108 | libraryscraper.py | demigody_nas-tools/app/plugins/modules/libraryscraper.py | from datetime import datetime, timedelta
from threading import Event
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.media import Scraper
from app.plugins import EventHandler
from app.plugins.modules._base import _IPluginModule
from app.utils.types import EventType
from config import Config
class LibraryScraper(_IPluginModule):
# 插件名称
module_name = "媒体库刮削"
# 插件描述
module_desc = "定时对媒体库进行刮削,补齐缺失元数据和图片。"
# 插件图标
module_icon = "scraper.png"
# 主题色
module_color = "#FF7D00"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
module_config_prefix = "libraryscraper_"
# 加载顺序
module_order = 7
# 可使用的用户级别
user_level = 1
# 私有属性
_scheduler = None
_scraper = None
# 限速开关
_cron = None
_onlyonce = False
_mode = None
_scraper_path = None
_exclude_path = None
# 退出事件
_event = Event()
@staticmethod
def get_fields():
movie_path = Config().get_config('media').get('movie_path') or []
tv_path = Config().get_config('media').get('tv_path') or []
anime_path = Config().get_config('media').get('anime_path') or []
path = {p: {'name': p} for p in (movie_path + tv_path + anime_path)}
return [
# 同一板块
{
'type': 'div',
'content': [
# 同一行
[
{
'title': '刮削周期',
'required': "required",
'tooltip': '支持5位cron表达式;需要在基础设置中配置好刮削内容;刮削时间根据媒体库中的文件数量及网络状况而定,耗时可能会非常长,建议合理设置刮削周期,留空则不启用定期刮削',
'type': 'text',
'content': [
{
'id': 'cron',
'placeholder': '0 0 0 ? *',
}
]
},
{
'title': '刮削模式',
'required': "required",
'type': 'select',
'content': [
{
'id': 'mode',
'default': 'no_force',
'options': {
"no_force": "仅刮削缺失的元数据和图片",
"force_nfo": "覆盖所有元数据",
"force_all": "覆盖所有元数据和图片"
},
}
]
}
],
]
},
{
'type': 'details',
'summary': '刮削媒体库',
'tooltip': '请选择需要刮削的媒体库',
'content': [
# 同一行
[
{
'id': 'scraper_path',
'type': 'form-selectgroup',
'content': path
},
]
]
},
{
'type': 'details',
'summary': '排除路径',
'tooltip': '需要排除的媒体库路径,多个用英文逗号分割',
'content': [
[
{
'required': "",
'type': 'text',
'content': [
{
'id': 'exclude_path',
'placeholder': '多个路径用,分割'
}
]
}
]
]
},
{
'type': 'div',
'content': [
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次(点击此对话框的确定按钮后即会运行,周期未设置也会运行),关闭后将仅按照刮削周期运行(同时上次触发运行的任务如果在运行中也会停止)',
'type': 'switch',
'id': 'onlyonce',
}
],
]
}
]
def init_config(self, config=None):
self._scraper = Scraper()
# 读取配置
if config:
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._mode = config.get("mode")
self._scraper_path = config.get("scraper_path")
self._exclude_path = config.get("exclude_path")
# 停止现有任务
self.stop_service()
# 启动定时任务 & 立即运行一次
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=Config().get_timezone())
if self._cron:
self.info(f"刮削服务启动,周期:{self._cron}")
self._scheduler.add_job(self.__libraryscraper,
CronTrigger.from_crontab(self._cron))
if self._onlyonce:
self.info(f"刮削服务启动,立即运行一次")
self._scheduler.add_job(self.__libraryscraper, 'date',
run_date=datetime.now(tz=pytz.timezone(Config().get_timezone())) + timedelta(
seconds=3))
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": self._onlyonce,
"cron": self._cron,
"mode": self._mode,
"scraper_path": self._scraper_path,
"exclude_path": self._exclude_path
})
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self):
return True if self._cron else False
@EventHandler.register(EventType.MediaScrapStart)
def start_scrap(self, event):
"""
刮削事件响应
:param event:
:return:
"""
event_info = event.event_data
if not event_info:
return
path = event_info.get("path")
force = event_info.get("force")
if force:
mode = 'force_all'
else:
mode = 'no_force'
self._scraper.folder_scraper(path, mode=mode)
def __libraryscraper(self):
"""
开始刮削媒体库
"""
# 已选择的目录
self.info(f"开始刮削媒体库:{self._scraper_path} ...")
for path in self._scraper_path:
if not path:
continue
if self._event.is_set():
self.info(f"媒体库刮削服务停止")
return
# 刮削目录
self._scraper.folder_scraper(path=path,
exclude_path=self._exclude_path,
mode=self._mode)
self.info(f"媒体库刮削完成")
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
| 8,574 | Python | .py | 221 | 18.520362 | 123 | 0.400268 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,109 | autosub.py | demigody_nas-tools/app/plugins/modules/autosub.py | import copy
import os
import re
import subprocess
import tempfile
import time
import traceback
from datetime import timedelta
import iso639
import psutil
import srt
from lxml import etree
from app.helper import FfmpegHelper
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._base import _IPluginModule
from app.utils import SystemUtils
from config import RMT_MEDIAEXT
class AutoSub(_IPluginModule):
# 插件名称
module_name = "AI字幕自动生成"
# 插件描述
module_desc = "使用whisper自动生成视频文件字幕。"
# 插件图标
module_icon = "autosubtitles.jpeg"
# 主题色
module_color = "#2C4F7E"
# 插件版本
module_version = "1.0"
# 插件作者
module_author = "olly"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
module_config_prefix = "autosub"
# 加载顺序
module_order = 14
# 可使用的用户级别
auth_level = 2
# 私有属性
_running = False
# 语句结束符
_end_token = ['.', '!', '?', '。', '!', '?', '。"', '!"', '?"', '."', '!"', '?"']
_noisy_token = [('(', ')'), ('[', ']'), ('{', '}'), ('【', '】'), ('♪', '♪'), ('♫', '♫'), ('♪♪', '♪♪')]
def __init__(self):
self.additional_args = '-t 4 -p 1'
self.translate_zh = False
self.translate_only = False
self.whisper_model = None
self.whisper_main = None
self.file_size = None
self.process_count = 0
self.skip_count = 0
self.fail_count = 0
self.success_count = 0
self.send_notify = False
self.asr_engine = 'whisper.cpp'
self.faster_whisper_model = 'base'
self.faster_whisper_model_path = None
@staticmethod
def get_fields():
return [
# 同一板块
{
'type': 'div',
'content': [
[
{
'title': '媒体路径',
'required': '',
'tooltip': '要进行字幕生成的路径,每行一个路径,请确保路径正确',
'type': 'textarea',
'content':
{
'id': 'path_list',
'placeholder': '文件路径',
'rows': 5
}
}
],
# asr 引擎
[
{
'title': '文件大小(MB)',
'required': "required",
'tooltip': '单位 MB, 大于该大小的文件才会进行字幕生成',
'type': 'text',
'content':
[{
'id': 'file_size',
'placeholder': '文件大小, 单位MB'
}]
},
{
'title': 'ASR引擎',
'required': "required",
'tooltip': '自动语音识别引擎选择',
'type': 'select',
'content': [
{
'id': 'asr_engine',
'options': {
'whisper.cpp': 'whisper.cpp',
'faster-whisper': 'faster-whisper'
},
'default': 'whisper.cpp',
'onchange': 'AutoSub_asr_engine_change(this)'
}
]
}
]
]
},
{
'type': 'details',
'id': 'whisper_config',
'summary': 'whisper.cpp 配置',
'tooltip': '使用 whisper.cpp 引擎时的配置',
'hidden': False,
'content': [
[
{
'title': 'whisper.cpp路径',
'required': "",
'tooltip': '填写whisper.cpp主程序路径,如/config/plugin/autosub/main \n'
'推荐教程 https://ddsrem.com/autosub',
'type': 'text',
'content': [
{
'id': 'whisper_main',
'placeholder': 'whisper.cpp主程序路径'
}
]
}
],
[
{
'title': 'whisper.cpp模型路径',
'required': "",
'tooltip': '填写whisper.cpp模型路径,如/config/plugin/autosub/models/ggml-base.en.bin\n'
'可从https://github.com/ggerganov/whisper.cpp/tree/master/models处下载',
'type': 'text',
'content':
[{
'id': 'whisper_model',
'placeholder': 'whisper.cpp模型路径'
}]
}
],
[
{
'title': '高级参数',
'tooltip': 'whisper.cpp的高级参数,请勿随意修改',
'required': "",
'type': 'text',
'content': [
{
'id': 'additional_args',
'placeholder': '-t 4 -p 1'
}
]
}
]
]
},
{
'type': 'details',
'id': 'faster_whisper_config',
'summary': 'faster-whisper 配置',
'tooltip': '使用 faster-whisper 引擎时的配置,安装参考 https://github.com/guillaumekln/faster-whisper',
'content': [
[
{
'title': '模型',
'required': "",
'tooltip': '选择模型后第一次运行会从Hugging Face Hub下载模型,可能需要一段时间',
'type': 'select',
'content': [
{
'id': 'faster_whisper_model',
'options': {
# tiny, tiny.en, base, base.en,
# small, small.en, medium, medium.en,
# large-v1, or large-v2
'tiny': 'tiny',
'tiny.en': 'tiny.en',
'base': 'base',
'base.en': 'base.en',
'small': 'small',
'small.en': 'small.en',
'medium': 'medium',
'medium.en': 'medium.en',
'large-v1': 'large-v1',
'large-v2': 'large-v2',
},
'default': 'base'
}
]
}
],
[
{
'title': '模型保存路径',
'required': "",
'tooltip': '配置模型保存路径,如/config/plugin/autosub/faster-whisper/models',
'type': 'text',
'content': [
{
'id': 'faster_whisper_model_path',
'placeholder': 'faster-whisper配置模型保存路径'
}
]
}
]
]
},
{
'type': 'div',
'content': [
[
{
'title': '立即运行一次',
'required': "",
'tooltip': '打开后立即运行一次',
'type': 'switch',
'id': 'run_now',
},
{
'title': '翻译为中文',
'required': "",
'tooltip': '打开后将自动翻译非中文字幕,生成双语字幕,关闭后只生成英文字幕,需要配置OpenAI API Key',
'type': 'switch',
'id': 'translate_zh',
},
{
'title': '仅已有字幕翻译',
'required': "",
'tooltip': '打开后仅翻译已有字幕,不做语音识别,关闭后将自动识别语音并生成字幕',
'type': 'switch',
'id': 'translate_only',
}
],
[
{
'title': '运行时通知',
'required': "",
'tooltip': '打开后将在单个字幕生成开始和完成后发送通知, 需要开启插件消息推送通知',
'type': 'switch',
'id': 'send_notify',
}
]
]
}
]
@staticmethod
def get_script():
"""
返回插件额外的JS代码
"""
return """
function AutoSub_asr_engine_change(obj) {
if ($(obj).val() == 'faster-whisper') {
$('#autosubwhisper_config').hide();
$('#autosubfaster_whisper_config').show();
}else{
$('#autosubwhisper_config').show();
$('#autosubfaster_whisper_config').hide();
}
}
// 初始化完成后执行的方法
function AutoSub_PluginInit(){
AutoSub_asr_engine_change('#autosubasr_engine');
}
"""
def init_config(self, config=None):
# 如果没有配置信息, 则不处理
if not config:
return
# config.get('path_list') 用 \n 分割为 list 并去除重复值和空值
path_list = list(set(config.get('path_list').split('\n')))
# file_size 转成数字
self.file_size = config.get('file_size')
self.whisper_main = config.get('whisper_main')
self.whisper_model = config.get('whisper_model')
self.translate_zh = config.get('translate_zh', False)
self.translate_only = config.get('translate_only', False)
self.additional_args = config.get('additional_args', '-t 4 -p 1')
self.send_notify = config.get('send_notify', False)
self.asr_engine = config.get('asr_engine', 'whisper.cpp')
self.faster_whisper_model = config.get('faster_whisper_model', 'base')
self.faster_whisper_model_path = config.get('faster_whisper_model_path')
run_now = config.get('run_now')
if not run_now:
return
config['run_now'] = False
self.update_config(config)
# 如果没有配置信息, 则不处理
if not path_list or not self.file_size:
self.warn(f"配置信息不完整,不进行处理")
return
# 校验文件大小是否为数字
if not self.file_size.isdigit():
self.warn(f"文件大小不是数字,不进行处理")
return
# asr 配置检查
if not self.translate_only and not self.__check_asr():
return
if self._running:
self.warn(f"上一次任务还未完成,不进行处理")
return
# 依次处理每个目录
try:
self._running = True
self.success_count = self.skip_count = self.fail_count = self.process_count = 0
for path in path_list:
self.info(f"开始处理目录:{path} ...")
# 如果目录不存在, 则不处理
if not os.path.exists(path):
self.warn(f"目录不存在,不进行处理")
continue
# 如果目录不是文件夹, 则不处理
if not os.path.isdir(path):
self.warn(f"目录不是文件夹,不进行处理")
continue
# 如果目录不是绝对路径, 则不处理
if not os.path.isabs(path):
self.warn(f"目录不是绝对路径,不进行处理")
continue
# 处理目录
self.__process_folder_subtitle(path)
except Exception as e:
self.error(f"处理异常: {e}")
finally:
self.info(f"处理完成: "
f"成功{self.success_count} / 跳过{self.skip_count} / 失败{self.fail_count} / 共{self.process_count}")
self._running = False
def __check_asr(self):
if self.asr_engine == 'whisper.cpp':
if not self.whisper_main or not self.whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.whisper_main):
self.warn(f"whisper.cpp主程序不存在,不进行处理")
return False
if not os.path.exists(self.whisper_model):
self.warn(f"whisper.cpp模型文件不存在,不进行处理")
return False
# 校验扩展参数是否包含异常字符
if self.additional_args and re.search(r'[;|&]', self.additional_args):
self.warn(f"扩展参数包含异常字符,不进行处理")
return False
elif self.asr_engine == 'faster-whisper':
if not self.faster_whisper_model_path or not self.faster_whisper_model:
self.warn(f"配置信息不完整,不进行处理")
return
if not os.path.exists(self.faster_whisper_model_path):
self.warn(f"faster-whisper模型文件夹不存在,不进行处理")
return False
try:
from faster_whisper import WhisperModel, download_model
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False
return True
else:
self.warn(f"未配置asr引擎,不进行处理")
return False
return True
def __process_folder_subtitle(self, path):
"""
处理目录字幕
:param path:
:return:
"""
# 获取目录媒体文件列表
for video_file in self.__get_library_files(path):
if not video_file:
continue
# 如果文件大小小于指定大小, 则不处理
if os.path.getsize(video_file) < int(self.file_size):
continue
self.process_count += 1
start_time = time.time()
file_path, file_ext = os.path.splitext(video_file)
file_name = os.path.basename(video_file)
try:
self.info(f"开始处理文件:{video_file} ...")
# 判断目的字幕(和内嵌)是否已存在
if self.__target_subtitle_exists(video_file):
self.warn(f"字幕文件已经存在,不进行处理")
self.skip_count += 1
continue
# 生成字幕
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始处理文件 ... ")
ret, lang = self.__generate_subtitle(video_file, file_path, self.translate_only)
if not ret:
message = f" 媒体: {file_name}\n "
if self.translate_only:
message += "内嵌&外挂字幕不存在,不进行翻译"
self.skip_count += 1
else:
message += "生成字幕失败,跳过后续处理"
self.fail_count += 1
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
continue
if self.translate_zh:
# 翻译字幕
self.info(f"开始翻译字幕为中文 ...")
if self.send_notify:
self.send_message(title="自动字幕生成",
text=f" 媒体: {file_name}\n 开始翻译字幕为中文 ... ")
self.__translate_zh_subtitle(lang, f"{file_path}.{lang}.srt", f"{file_path}.zh.srt")
self.info(f"翻译字幕完成:{file_name}.zh.srt")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理完成\n 字幕原始语言: {lang}\n "
if self.translate_zh:
message += f"字幕翻译语言: zh\n "
message += f"耗时:{round(end_time - start_time, 2)}秒"
self.info(f"自动字幕生成 处理完成:{message}")
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
self.success_count += 1
except Exception as e:
self.error(f"自动字幕生成 处理异常:{e}")
end_time = time.time()
message = f" 媒体: {file_name}\n 处理失败\n 耗时:{round(end_time - start_time, 2)}秒"
if self.send_notify:
self.send_message(title="自动字幕生成", text=message)
# 打印调用栈
traceback.print_exc()
self.fail_count += 1
def __do_speech_recognition(self, audio_lang, audio_file):
"""
语音识别, 生成字幕
:param audio_lang:
:param audio_file:
:return:
"""
lang = audio_lang
if self.asr_engine == 'whisper.cpp':
command = [self.whisper_main] + self.additional_args.split()
command += ['-l', lang, '-m', self.whisper_model, '-osrt', '-of', audio_file, audio_file]
ret = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if ret.returncode == 0:
if lang == 'auto':
# 从output中获取语言 "whisper_full_with_state: auto-detected language: en (p = 0.973642)"
output = ret.stdout.decode('utf-8') if ret.stdout else ""
lang = re.search(r"auto-detected language: (\w+)", output)
if lang and lang.group(1):
lang = lang.group(1)
else:
lang = "en"
return True, lang
elif self.asr_engine == 'faster-whisper':
try:
from faster_whisper import WhisperModel, download_model
# 设置缓存目录, 防止缓存同目录出现 cross-device 错误
cache_dir = os.path.join(self.faster_whisper_model_path, "cache")
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
os.environ["HUGGINGFACE_HUB_CACHE"] = cache_dir
model = WhisperModel(download_model(self.faster_whisper_model),
device="cpu", compute_type="int8", cpu_threads=psutil.cpu_count(logical=False))
segments, info = model.transcribe(audio_file,
language=lang if lang != 'auto' else None,
word_timestamps=True,
temperature=0,
beam_size=5)
if lang == 'auto':
lang = info.language
subs = []
if lang in ['en', 'eng']:
# 英文先生成单词级别字幕,再合并
idx = 0
for segment in segments:
for word in segment.words:
idx += 1
subs.append(srt.Subtitle(index=idx,
start=timedelta(seconds=word.start),
end=timedelta(seconds=word.end),
content=word.word))
subs = self.__merge_srt(subs)
else:
for i, segment in enumerate(segments):
subs.append(srt.Subtitle(index=i,
start=timedelta(seconds=segment.start),
end=timedelta(seconds=segment.end),
content=segment.text))
self.__save_srt(f"{audio_file}.srt", subs)
return True, lang
except ImportError:
self.warn(f"faster-whisper 未安装,不进行处理")
return False, None
except Exception as e:
traceback.print_exc()
self.error(f"faster-whisper 处理异常:{e}")
return False, None
return False, None
def __generate_subtitle(self, video_file, subtitle_file, only_extract=False):
"""
生成字幕
:param video_file: 视频文件
:param subtitle_file: 字幕文件, 不包含后缀
:return: 生成成功返回True,字幕语言,否则返回False, None
"""
# 获取文件元数据
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
self.error(f"获取视频文件元数据失败,跳过后续处理")
return False, None
# 获取视频文件音轨和语言信息
ret, audio_index, audio_lang = self.__get_video_prefer_audio(video_meta)
if not ret:
return False, None
if not iso639.find(audio_lang) or not iso639.to_iso639_1(audio_lang):
self.info(f"未知语言音轨")
audio_lang = 'auto'
expert_subtitle_langs = ['en', 'eng'] if audio_lang == 'auto' else [audio_lang, iso639.to_iso639_1(audio_lang)]
self.info(f"使用 {expert_subtitle_langs} 匹配已有外挂字幕文件 ...")
exist, lang = self.__external_subtitle_exists(video_file, expert_subtitle_langs)
if exist:
self.info(f"外挂字幕文件已经存在,字幕语言 {lang}")
return True, iso639.to_iso639_1(lang)
self.info(f"外挂字幕文件不存在,使用 {expert_subtitle_langs} 匹配内嵌字幕文件 ...")
# 获取视频文件字幕信息
ret, subtitle_index, \
subtitle_lang, subtitle_count = self.__get_video_prefer_subtitle(video_meta, expert_subtitle_langs)
if ret and (audio_lang == subtitle_lang or subtitle_count == 1):
if audio_lang == subtitle_lang:
# 如果音轨和字幕语言一致, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言一致,直接提取字幕 ...")
elif subtitle_count == 1:
# 如果音轨和字幕语言不一致,但只有一个字幕, 则直接提取字幕
self.info(f"内嵌音轨和字幕语言不一致,但只有一个字幕,直接提取字幕 ...")
audio_lang = iso639.to_iso639_1(subtitle_lang) \
if (iso639.find(subtitle_lang) and iso639.to_iso639_1(subtitle_lang)) else 'und'
FfmpegHelper().extract_subtitle_from_video(video_file, f"{subtitle_file}.{audio_lang}.srt", subtitle_index)
self.info(f"提取字幕完成:{subtitle_file}.{audio_lang}.srt")
return True, audio_lang
if audio_lang != 'auto':
audio_lang = iso639.to_iso639_1(audio_lang)
if only_extract:
self.info(f"未开启语音识别,且无已有字幕文件,跳过后续处理")
return False, None
# 清理异常退出的临时文件
tempdir = tempfile.gettempdir()
for file in os.listdir(tempdir):
if file.startswith('autosub-'):
os.remove(os.path.join(tempdir, file))
with tempfile.NamedTemporaryFile(prefix='autosub-', suffix='.wav', delete=True) as audio_file:
# 提取音频
self.info(f"提取音频:{audio_file.name} ...")
FfmpegHelper().extract_wav_from_video(video_file, audio_file.name, audio_index)
self.info(f"提取音频完成:{audio_file.name}")
# 生成字幕
self.info(f"开始生成字幕, 语言 {audio_lang} ...")
ret, lang = self.__do_speech_recognition(audio_lang, audio_file.name)
if ret:
self.info(f"生成字幕成功,原始语言:{lang}")
# 复制字幕文件
SystemUtils.copy(f"{audio_file.name}.srt", f"{subtitle_file}.{lang}.srt")
self.info(f"复制字幕文件:{subtitle_file}.{lang}.srt")
# 删除临时文件
os.remove(f"{audio_file.name}.srt")
return ret, lang
else:
self.error(f"生成字幕失败")
return False, None
@staticmethod
def __get_library_files(in_path, exclude_path=None):
"""
获取目录媒体文件列表
"""
if not os.path.isdir(in_path):
yield in_path
return
for root, dirs, files in os.walk(in_path):
if exclude_path and any(os.path.abspath(root).startswith(os.path.abspath(path))
for path in exclude_path.split(",")):
continue
for file in files:
cur_path = os.path.join(root, file)
# 检查后缀
if os.path.splitext(file)[-1].lower() in RMT_MEDIAEXT:
yield cur_path
@staticmethod
def __load_srt(file_path):
"""
加载字幕文件
:param file_path: 字幕文件路径
:return:
"""
with open(file_path, 'r', encoding="utf8") as f:
srt_text = f.read()
return list(srt.parse(srt_text))
@staticmethod
def __save_srt(file_path, srt_data):
"""
保存字幕文件
:param file_path: 字幕文件路径
:param srt_data: 字幕数据
:return:
"""
with open(file_path, 'w', encoding="utf8") as f:
f.write(srt.compose(srt_data))
def __get_video_prefer_audio(self, video_meta, prefer_lang=None):
"""
获取视频的首选音轨,如果有多音轨, 优先指定语言音轨,否则获取默认音轨
:param video_meta
:return:
"""
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选音轨
audio_lang = None
audio_index = None
audio_stream = filter(lambda x: x.get('codec_type') == 'audio', video_meta.get('streams', []))
for index, stream in enumerate(audio_stream):
if not audio_index:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取默认音轨
if stream.get('disposition', {}).get('default'):
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
# 获取指定语言音轨
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
audio_index = index
audio_lang = stream.get('tags', {}).get('language', 'und')
break
# 如果没有音轨, 则不处理
if audio_index is None:
self.warn(f"没有音轨,不进行处理")
return False, None, None
self.info(f"选中音轨信息:{audio_index}, {audio_lang}")
return True, audio_index, audio_lang
def __get_video_prefer_subtitle(self, video_meta, prefer_lang=None):
"""
获取视频的首选字幕,如果有多字幕, 优先指定语言字幕, 否则获取默认字幕
:param video_meta:
:return:
"""
# from https://wiki.videolan.org/Subtitles_codecs/
"""
https://trac.ffmpeg.org/wiki/ExtractSubtitles
ffmpeg -codecs | grep subtitle
DES... ass ASS (Advanced SSA) subtitle (decoders: ssa ass ) (encoders: ssa ass )
DES... dvb_subtitle DVB subtitles (decoders: dvbsub ) (encoders: dvbsub )
DES... dvd_subtitle DVD subtitles (decoders: dvdsub ) (encoders: dvdsub )
D.S... hdmv_pgs_subtitle HDMV Presentation Graphic Stream subtitles (decoders: pgssub )
..S... hdmv_text_subtitle HDMV Text subtitle
D.S... jacosub JACOsub subtitle
D.S... microdvd MicroDVD subtitle
D.S... mpl2 MPL2 subtitle
D.S... pjs PJS (Phoenix Japanimation Society) subtitle
D.S... realtext RealText subtitle
D.S... sami SAMI subtitle
..S... srt SubRip subtitle with embedded timing
..S... ssa SSA (SubStation Alpha) subtitle
D.S... stl Spruce subtitle format
DES... subrip SubRip subtitle (decoders: srt subrip ) (encoders: srt subrip )
D.S... subviewer SubViewer subtitle
D.S... subviewer1 SubViewer v1 subtitle
D.S... vplayer VPlayer subtitle
DES... webvtt WebVTT subtitle
"""
image_based_subtitle_codecs = (
'dvd_subtitle',
'dvb_subtitle',
'hdmv_pgs_subtitle',
)
if type(prefer_lang) == str and prefer_lang:
prefer_lang = [prefer_lang]
# 获取首选字幕
subtitle_lang = None
subtitle_index = None
subtitle_count = 0
subtitle_stream = filter(lambda x: x.get('codec_type') == 'subtitle', video_meta.get('streams', []))
for index, stream in enumerate(subtitle_stream):
# 如果是强制字幕,则跳过
if stream.get('disposition', {}).get('forced'):
continue
# image-based 字幕,跳过
if (
'width' in stream
or stream.get('codec_name') in image_based_subtitle_codecs
):
continue
if not subtitle_index:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取默认字幕
if stream.get('disposition', {}).get('default'):
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
# 获取指定语言字幕
if prefer_lang and stream.get('tags', {}).get('language') in prefer_lang:
subtitle_index = index
subtitle_lang = stream.get('tags', {}).get('language')
subtitle_count += 1
# 如果没有字幕, 则不处理
if subtitle_index is None:
self.debug(f"没有内嵌字幕")
return False, None, None, None
self.debug(f"命中内嵌字幕信息:{subtitle_index}, {subtitle_lang}")
return True, subtitle_index, subtitle_lang, subtitle_count
def __is_noisy_subtitle(self, content):
"""
判断是否为背景音等字幕
:param content:
:return:
"""
for token in self._noisy_token:
if content.startswith(token[0]) and content.endswith(token[1]):
return True
return False
def __merge_srt(self, subtitle_data):
"""
合并整句字幕
:param subtitle_data:
:return:
"""
subtitle_data = copy.deepcopy(subtitle_data)
# 合并字幕
merged_subtitle = []
sentence_end = True
for index, item in enumerate(subtitle_data):
# 当前字幕先将多行合并为一行,再去除首尾空格
content = item.content.replace('\n', ' ').strip()
# 去除html标签
parse = etree.HTML(content)
if parse is not None:
content = parse.xpath('string(.)')
if content == '':
continue
item.content = content
# 背景音等字幕,跳过
if self.__is_noisy_subtitle(content):
merged_subtitle.append(item)
sentence_end = True
continue
if not merged_subtitle or sentence_end:
merged_subtitle.append(item)
elif not sentence_end:
merged_subtitle[-1].content = f"{merged_subtitle[-1].content} {content}"
merged_subtitle[-1].end = item.end
# 如果当前字幕内容以标志符结尾,则设置语句已经终结
if content.endswith(tuple(self._end_token)):
sentence_end = True
# 如果上句字幕超过一定长度,则设置语句已经终结
elif len(merged_subtitle[-1].content) > 350:
sentence_end = True
else:
sentence_end = False
return merged_subtitle
def __do_translate_with_retry(self, text, retry=3):
# 调用OpenAI翻译
# 免费OpenAI Api Limit: 20 / minute
ret, result = OpenAiHelper().translate_to_zh(text)
for i in range(retry):
if ret and result:
break
if "Rate limit reached" in result:
self.info(f"OpenAI Api Rate limit reached, sleep 60s ...")
time.sleep(60)
else:
self.warn(f"翻译失败,重试第{i + 1}次")
ret, result = OpenAiHelper().translate_to_zh(text)
if not ret or not result:
return None
return result
def __translate_zh_subtitle(self, source_lang, source_subtitle, dest_subtitle):
"""
调用OpenAI 翻译字幕
:param source_subtitle:
:param dest_subtitle:
:return:
"""
# 读取字幕文件
srt_data = self.__load_srt(source_subtitle)
# 合并字幕语句,目前带标点带英文效果较好,非英文或者无标点的需要NLP处理
if source_lang in ['en', 'eng']:
self.info(f"开始合并字幕语句 ...")
merged_data = self.__merge_srt(srt_data)
self.info(f"合并字幕语句完成,合并前字幕数量:{len(srt_data)}, 合并后字幕数量:{len(merged_data)}")
srt_data = merged_data
batch = []
max_batch_tokens = 1000
for srt_item in srt_data:
# 跳过空行和无意义的字幕
if not srt_item.content:
continue
if self.__is_noisy_subtitle(srt_item.content):
continue
# 批量翻译,减少调用次数
batch.append(srt_item)
# 当前批次字符数
batch_tokens = sum([len(x.content) for x in batch])
# 如果当前批次字符数小于最大批次字符数,且不是最后一条字幕,则继续
if batch_tokens < max_batch_tokens and srt_item != srt_data[-1]:
continue
batch_content = '\n'.join([x.content for x in batch])
result = self.__do_translate_with_retry(batch_content)
# 如果翻译失败,则跳过
if not result:
batch = []
continue
translated = result.split('\n')
if len(translated) != len(batch):
self.info(
f"翻译结果数量不匹配,翻译结果数量:{len(translated)}, 需要翻译数量:{len(batch)}, 退化为单条翻译 ...")
# 如果翻译结果数量不匹配,则退化为单条翻译
for index, item in enumerate(batch):
result = self.__do_translate_with_retry(item.content)
if not result:
continue
item.content = result + '\n' + item.content
else:
self.debug(f"翻译结果数量匹配,翻译结果数量:{len(translated)}")
for index, item in enumerate(batch):
item.content = translated[index].strip() + '\n' + item.content
batch = []
# 保存字幕文件
self.__save_srt(dest_subtitle, srt_data)
@staticmethod
def __external_subtitle_exists(video_file, prefer_langs=None):
"""
外部字幕文件是否存在
:param video_file:
:return:
"""
video_dir, video_name = os.path.split(video_file)
video_name, video_ext = os.path.splitext(video_name)
if type(prefer_langs) == str and prefer_langs:
prefer_langs = [prefer_langs]
for subtitle_lang in prefer_langs:
dest_subtitle = os.path.join(video_dir, f"{video_name}.{subtitle_lang}.srt")
if os.path.exists(dest_subtitle):
return True, subtitle_lang
return False, None
def __target_subtitle_exists(self, video_file):
"""
目标字幕文件是否存在
:param video_file:
:return:
"""
if self.translate_zh:
prefer_langs = ['zh', 'chi']
else:
prefer_langs = ['en', 'eng']
exist, lang = self.__external_subtitle_exists(video_file, prefer_langs)
if exist:
return True
video_meta = FfmpegHelper().get_video_metadata(video_file)
if not video_meta:
return False
ret, subtitle_index, subtitle_lang, _ = self.__get_video_prefer_subtitle(video_meta, prefer_lang=prefer_langs)
if ret and subtitle_lang in prefer_langs:
return True
return False
def get_state(self):
return False
def stop_service(self):
"""
退出插件
"""
pass
| 40,232 | Python | .py | 869 | 25.339471 | 119 | 0.454571 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,110 | _base.py | demigody_nas-tools/app/plugins/modules/_base.py | import json
import os
from abc import ABCMeta, abstractmethod
import log
from app.conf import SystemConfig
from app.helper import DbHelper
from app.message import Message
from config import Config
class _IPluginModule(metaclass=ABCMeta):
"""
插件模块基类,通过继续该类实现插件功能
除内置属性外,还有以下方法可以扩展或调用:
- get_fields() 获取配置字典,用于生成插件配置表单
- get_state() 获取插件启用状态,用于展示运行状态
- stop_service() 停止插件服务
- get_config() 获取配置信息
- update_config() 更新配置信息
- init_config() 生效配置信息
- info(msg) 记录INFO日志
- warn(msg) 记录插件WARN日志
- error(msg) 记录插件ERROR日志
- debug(msg) 记录插件DEBUG日志
- get_page() 插件额外页面数据,在插件配置页面左下解按钮展示
- get_script() 插件额外脚本(Javascript),将会写入插件页面,可在插件元素中绑定使用,,XX_PluginInit为初始化函数
- send_message() 发送消息
- get_data_path() 获取插件数据保存目录
- history() 记录插件运行数据,key需要唯一,value为对象
- get_history() 获取插件运行数据
- update_history() 更新插件运行数据
- delete_history() 删除插件运行数据
- get_command() 获取插件命令,使用消息机制通过远程控制
"""
# 插件名称
module_name = ""
# 插件描述
module_desc = ""
# 插件图标
module_icon = ""
# 主题色
module_color = ""
# 插件版本
module_version = "1.0"
# 插件作者
module_author = ""
# 作者主页
author_url = ""
# 插件配置项ID前缀:为了避免各插件配置表单相冲突,配置表单元素ID自动在前面加上此前缀
module_config_prefix = "plugin_"
# 显示顺序
module_order = 0
# 可使用的用户级别
auth_level = 1
@staticmethod
@abstractmethod
def get_fields():
"""
获取配置字典,用于生成表单
"""
pass
@abstractmethod
def get_state(self):
"""
获取插件启用状态
"""
pass
@abstractmethod
def init_config(self, config: dict = None):
"""
生效配置信息
:param config: 配置信息字典
"""
pass
@abstractmethod
def stop_service(self):
"""
停止插件
"""
pass
@staticmethod
def __is_obj(obj):
if isinstance(obj, list) or isinstance(obj, dict):
return True
else:
return str(obj).startswith("{") or str(obj).startswith("[")
def update_config(self, config: dict, plugin_id=None):
"""
更新配置信息
:param config: 配置信息字典
:param plugin_id: 插件ID
"""
if not plugin_id:
plugin_id = self.__class__.__name__
return SystemConfig().set("plugin.%s" % plugin_id, config)
def get_config(self, plugin_id=None):
"""
获取配置信息
:param plugin_id: 插件ID
"""
if not plugin_id:
plugin_id = self.__class__.__name__
return SystemConfig().get("plugin.%s" % plugin_id)
def get_data_path(self, plugin_id=None):
"""
获取插件数据保存目录
"""
if not plugin_id:
plugin_id = self.__class__.__name__
data_path = os.path.join(Config().get_user_plugin_path(), plugin_id)
if not os.path.exists(data_path):
os.makedirs(data_path, exist_ok=True)
return data_path
def history(self, key, value):
"""
记录插件运行数据,key需要唯一,value为对象是自动转换为str,
"""
if not key or not value:
return
if self.__is_obj(value):
value = json.dumps(value)
DbHelper().insert_plugin_history(plugin_id=self.__class__.__name__,
key=key,
value=value)
def get_history(self, key=None, plugin_id=None):
"""
获取插件运行数据,只返回一条,自动识别转换为对象
"""
if not plugin_id:
plugin_id = self.__class__.__name__
historys = DbHelper().get_plugin_history(plugin_id=plugin_id, key=key)
if not isinstance(historys, list):
historys = [historys]
result = []
for history in historys:
if not history:
continue
if self.__is_obj(history.VALUE):
try:
if key:
return json.loads(history.VALUE)
else:
result.append(json.loads(history.VALUE))
continue
except Exception as err:
print(str(err))
if key:
return history.VALUE
else:
result.append(history.VALUE)
return None if key else result
def update_history(self, key, value, plugin_id=None):
"""
更新插件运行数据
"""
if not key or not value:
return False
if not plugin_id:
plugin_id = self.__class__.__name__
if self.__is_obj(value):
value = json.dumps(value)
return DbHelper().update_plugin_history(plugin_id=plugin_id, key=key, value=value)
def delete_history(self, key, plugin_id=None):
"""
删除插件运行数据
"""
if not key:
return False
if not plugin_id:
plugin_id = self.__class__.__name__
return DbHelper().delete_plugin_history(plugin_id=plugin_id, key=key)
@staticmethod
def send_message(title, text=None, image=None):
"""
发送消息
"""
return Message().send_plugin_message(title=title,
text=text,
image=image)
def info(self, msg):
"""
记录INFO日志
:param msg: 日志信息
"""
log.info(f"【Plugin】{self.module_name} - {msg}")
def warn(self, msg):
"""
记录插件WARN日志
:param msg: 日志信息
"""
log.warn(f"【Plugin】{self.module_name} - {msg}")
def error(self, msg):
"""
记录插件ERROR日志
:param msg: 日志信息
"""
log.error(f"【Plugin】{self.module_name} - {msg}")
def debug(self, msg):
"""
记录插件Debug日志
:param msg: 日志信息
"""
log.debug(f"【Plugin】{self.module_name} - {msg}")
| 6,885 | Python | .py | 201 | 20.039801 | 90 | 0.537464 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,111 | zhuque.py | demigody_nas-tools/app/plugins/modules/_autosignin/zhuque.py | import json
from lxml import etree
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class ZhuQue(_ISiteSigninHandler):
"""
ZHUQUE签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "zhuque.in"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url="https://zhuque.in")
if not html_res or html_res.status_code != 200:
self.error(f"模拟登录失败,请检查站点连通性")
return False, f'【{site}】模拟登录失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"模拟登录失败,cookie失效")
return False, f'【{site}】模拟登录失败,cookie失效'
html = etree.HTML(html_res.text)
if not html:
return False, f'【{site}】模拟登录失败'
# 释放技能
msg = '失败'
x_csrf_token = html.xpath("//meta[@name='x-csrf-token']/@content")[0]
if x_csrf_token:
data = {
"all": 1,
"resetModal": "true"
}
headers = {
"x-csrf-token": str(x_csrf_token),
"Content-Type": "application/json; charset=utf-8",
"User-Agent": ua
}
skill_res = RequestUtils(headers=headers, cookies=site_cookie,
proxies=proxy).post_res(url="https://zhuque.in/api/gaming/fireGenshinCharacterMagic", json=data)
if not skill_res or skill_res.status_code != 200:
self.error(f"模拟登录失败,释放技能失败")
# '{"status":200,"data":{"code":"FIRE_GENSHIN_CHARACTER_MAGIC_SUCCESS","bonus":0}}'
skill_dict = json.loads(skill_res.text)
if skill_dict['status'] == 200:
bonus = int(skill_dict['data']['bonus'])
msg = f'成功,获得{bonus}魔力'
self.info(f'【{site}】模拟登录成功,技能释放{msg}')
return True, f'【{site}】模拟登录成功,技能释放{msg}'
| 3,007 | Python | .py | 64 | 29.859375 | 133 | 0.573836 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,112 | hdarea.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdarea.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDArea(_ISiteSigninHandler):
"""
好大签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdarea.club"
# 签到成功
_success_text = "此次签到您获得"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
data = {
'action': 'sign_in'
}
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url="https://hdarea.club/sign_in.php", data=data)
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
# '已连续签到278天,此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._repeat_text in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,签到接口返回 {html_res.text}")
return False, f'【{site}】签到失败'
| 2,345 | Python | .py | 52 | 27 | 105 | 0.592018 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,113 | u2.py | demigody_nas-tools/app/plugins/modules/_autosignin/u2.py | import random
import re
import datetime
from lxml import etree
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class U2(_ISiteSigninHandler):
"""
U2签到 随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "u2.dmhy.org"
# 已签到
_sign_regex = ['<a href="showup.php">已签到</a>',
'<a href="showup.php">Show Up</a>',
'<a href="showup.php">Показать</a>',
'<a href="showup.php">已簽到</a>',
'<a href="showup.php">已簽到</a>']
# 签到成功
_success_text = "window.location.href = 'showup.php';</script>"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
now = datetime.datetime.now()
# 判断当前时间是否小于9点
if now.hour < 9:
self.error(f"签到失败,9点前不签到")
return False, f'【{site}】签到失败,9点前不签到'
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://u2.dmhy.org/showup.php")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
html_res.encoding = "utf-8"
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(html_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取签到参数
req = html.xpath("//form//td/input[@name='req']/@value")[0]
hash_str = html.xpath("//form//td/input[@name='hash']/@value")[0]
form = html.xpath("//form//td/input[@name='form']/@value")[0]
submit_name = html.xpath("//form//td/input[@type='submit']/@name")
submit_value = html.xpath("//form//td/input[@type='submit']/@value")
if not re or not hash_str or not form or not submit_name or not submit_value:
self.error("签到失败,未获取到相关签到参数")
return False, f'【{site}】签到失败'
# 随机一个答案
answer_num = random.randint(0, 3)
data = {
'req': req,
'hash': hash_str,
'form': form,
'message': '一切随缘~',
submit_name[answer_num]: submit_value[answer_num]
}
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url="https://u2.dmhy.org/showup.php?action=show",
data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
# sign_res.text = "<script type="text/javascript">window.location.href = 'showup.php';</script>"
if self._success_text in sign_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
self.error(f"签到失败,未知原因")
return False, f'【{site}】签到失败,未知原因'
| 4,596 | Python | .py | 97 | 29.42268 | 105 | 0.553175 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,114 | hdtime.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdtime.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDTime(_ISiteSigninHandler):
"""
时光签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdtime.org"
# 签到成功
_success_text = "签到成功"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://hdtime.org/attendance.php")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
# '已连续签到278天,此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._repeat_text in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,签到接口返回 {html_res.text}")
return False, f'【{site}】签到失败'
| 2,266 | Python | .py | 49 | 27.795918 | 95 | 0.600923 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,115 | btschool.py | demigody_nas-tools/app/plugins/modules/_autosignin/btschool.py | import time
from app.helper import ChromeHelper
from app.helper.cloudflare_helper import under_challenge
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class BTSchool(_ISiteSigninHandler):
"""
学校签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "pt.btschool.club"
# 已签到
_sign_text = '每日签到'
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 首页
chrome = ChromeHelper()
if site_info.get("chrome") and chrome.get_status():
self.info(f"{site} 开始仿真签到")
msg, html_text = self.__chrome_visit(chrome=chrome,
url="https://pt.btschool.club/index.php",
ua=ua,
site_cookie=site_cookie,
proxy=proxy,
site=site)
# 仿真访问失败
if msg:
return False, msg
# 已签到
if self._sign_text not in html_text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 仿真签到
msg, html_text = self.__chrome_visit(chrome=chrome,
url="https://pt.btschool.club/index.php?action=addbonus",
ua=ua,
site_cookie=site_cookie,
proxy=proxy,
site=site)
if msg:
return False, msg
# 签到成功
if self._sign_text not in html_text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
self.info(f"{site} 开始签到")
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://pt.btschool.club")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 已签到
if self._sign_text not in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://pt.btschool.club/index.php?action=addbonus")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 签到成功
if self._sign_text not in sign_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
def __chrome_visit(self, chrome, url, ua, site_cookie, proxy, site):
if not chrome.visit(url=url, ua=ua, cookie=site_cookie,
proxy=proxy):
self.warn("%s 无法打开网站" % site)
return f"【{site}】仿真签到失败,无法打开网站!", None
# 检测是否过cf
time.sleep(3)
if under_challenge(chrome.get_html()):
# 循环检测是否过cf
cloudflare = chrome.pass_cloudflare()
if not cloudflare:
self.warn("%s 跳转站点失败" % site)
return f"【{site}】仿真签到失败,跳转站点失败!", None
# 获取html
html_text = chrome.get_html()
if not html_text:
self.warn("%s 获取站点源码失败" % site)
return f"【{site}】仿真签到失败,获取站点源码失败!", None
if "魔力值" not in html_text:
self.error(f"签到失败,站点无法访问")
return f'【{site}】仿真签到失败,站点无法访问', None
# 站点访问正常,返回html
return None, html_text
| 5,311 | Python | .py | 108 | 26.861111 | 116 | 0.498301 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,116 | 52pt.py | demigody_nas-tools/app/plugins/modules/_autosignin/52pt.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class FWpt(_ISiteSigninHandler):
"""
52pt
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "52pt.site"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/52pt.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file), exist_ok=True)
# 判断今日是否已签到
index_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url='https://52pt.site/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url='https://52pt.site/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"{site}签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| 9,070 | Python | .py | 192 | 27.619792 | 108 | 0.528001 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,117 | ttg.py | demigody_nas-tools/app/plugins/modules/_autosignin/ttg.py | import re
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class TTG(_ISiteSigninHandler):
"""
TTG签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "totheglory.im"
# 已签到
_sign_regex = ['<b style="color:green;">已签到</b>']
_sign_text = '亲,您今天已签到过,不要太贪哦'
# 签到成功
_success_text = '您已连续签到'
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url="https://totheglory.im")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
html_res.encoding = "utf-8"
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 获取签到参数
signed_timestamp = re.search('(?<=signed_timestamp: ")\\d{10}', html_res.text).group()
signed_token = re.search('(?<=signed_token: ").*(?=")', html_res.text).group()
self.debug(f"signed_timestamp={signed_timestamp} signed_token={signed_token}")
data = {
'signed_timestamp': signed_timestamp,
'signed_token': signed_token
}
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url="https://totheglory.im/signed.php",
data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
sign_res.encoding = "utf-8"
if self._success_text in sign_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._sign_text in sign_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,未知原因")
return False, f'【{site}】签到失败,未知原因'
| 3,434 | Python | .py | 72 | 29.777778 | 116 | 0.574552 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,118 | hdchina.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdchina.py | import json
from lxml import etree
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDChina(_ISiteSigninHandler):
"""
瓷器签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdchina.org"
# 已签到
_sign_regex = ['<a class="label label-default" href="#">已签到</a>']
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 尝试解决瓷器cookie每天签到后过期,只保留hdchina=部分
cookie = ""
# 按照分号进行字符串拆分
sub_strs = site_cookie.split(";")
# 遍历每个子字符串
for sub_str in sub_strs:
if "hdchina=" in sub_str:
# 如果子字符串包含"hdchina=",则保留该子字符串
cookie += sub_str + ";"
if "hdchina=" not in cookie:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
site_cookie = cookie
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://hdchina.org/index.php")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text or "阻断页面" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 获取新返回的cookie进行签到
site_cookie = ';'.join(['{}={}'.format(k, v) for k, v in html_res.cookies.get_dict().items()])
# 判断是否已签到
html_res.encoding = "utf-8"
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(html_res.text)
if not html:
return False, f'【{site}】签到失败'
# x_csrf
x_csrf = html.xpath("//meta[@name='x-csrf']/@content")[0]
if not x_csrf:
self.error("签到失败,获取x-csrf失败")
return False, f'【{site}】签到失败'
self.debug(f"获取到x-csrf {x_csrf}")
# 签到
data = {
'csrf': x_csrf
}
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url="https://hdchina.org/plugin_sign-in.php?cmd=signin", data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
sign_dict = json.loads(sign_res.text)
self.debug(f"签到返回结果 {sign_dict}")
if sign_dict['state']:
# {'state': 'success', 'signindays': 10, 'integral': 20}
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
# {'state': False, 'msg': '不正确的CSRF / Incorrect CSRF token'}
self.error(f"签到失败,不正确的CSRF / Incorrect CSRF token")
return False, f'【{site}】签到失败'
| 4,293 | Python | .py | 91 | 29.076923 | 123 | 0.563437 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,119 | hdsky.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdsky.py | import json
import time
from app.helper import OcrHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDSky(_ISiteSigninHandler):
"""
天空ocr签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdsky.me"
# 已签到
_sign_regex = ['已签到']
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 判断今日是否已签到
index_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url='https://hdsky.me')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 获取验证码请求,考虑到网络问题获取失败,多获取几次试试
res_times = 0
img_hash = None
while not img_hash and res_times <= 3:
image_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url='https://hdsky.me/image_code_ajax.php',
data={'action': 'new'})
if image_res and image_res.status_code == 200:
image_json = json.loads(image_res.text)
if image_json["success"]:
img_hash = image_json["code"]
break
res_times += 1
self.debug(f"获取{site}验证码失败,正在进行重试,目前重试次数 {res_times}")
time.sleep(1)
# 获取到二维码hash
if img_hash:
# 完整验证码url
img_get_url = 'https://hdsky.me/image.php?action=regimage&imagehash=%s' % img_hash
self.debug(f"获取到{site}验证码链接 {img_get_url}")
# ocr识别多次,获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
self.debug(f"ocr识别{site}验证码 {ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
self.info(f"ocr识别{site}验证码成功 {ocr_result}")
break
times += 1
self.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'action': 'showup',
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url='https://hdsky.me/showup.php', data=data)
if res and res.status_code == 200:
if json.loads(res.text)["success"]:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
elif str(json.loads(res.text)["message"]) == "date_unmatch":
# 重复签到
self.warn(f"重复成功")
return True, f'【{site}】今日已签到'
elif str(json.loads(res.text)["message"]) == "invalid_imagehash":
# 验证码错误
self.warn(f"签到失败:验证码错误")
return False, f'【{site}】签到失败:验证码错误'
self.error(f'签到失败:未获取到验证码')
return False, f'【{site}】签到失败:未获取到验证码'
| 5,250 | Python | .py | 106 | 27.207547 | 112 | 0.497821 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,120 | hdfans.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdfans.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDFans(_ISiteSigninHandler):
"""
hdfans签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdfans.org"
# 签到成功
_success_text = "签到成功"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://hdfans.org/attendance.php")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
# '已连续签到278天,此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._repeat_text in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,签到接口返回 {html_res.text}")
return False, f'【{site}】签到失败'
| 2,266 | Python | .py | 49 | 27.877551 | 95 | 0.601841 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,121 | hdcity.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdcity.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDCity(_ISiteSigninHandler):
"""
城市签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hdcity.city"
# 签到成功
_success_text = '本次签到获得魅力'
# 重复签到
_repeat_text = '已签到'
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url="https://hdcity.city/sign")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
# '已连续签到278天,此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._repeat_text in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,签到接口返回 {html_res.text}")
return False, f'【{site}】签到失败'
| 2,233 | Python | .py | 49 | 27.673469 | 119 | 0.608824 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,122 | opencd.py | demigody_nas-tools/app/plugins/modules/_autosignin/opencd.py | import json
import time
from lxml import etree
from app.helper import OcrHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class Opencd(_ISiteSigninHandler):
"""
皇后ocr签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "open.cd"
# 已签到
_repeat_text = "/plugin_sign-in.php?cmd=show-log"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 判断今日是否已签到
index_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url='https://www.open.cd')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
if self._repeat_text in index_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 获取签到参数
sign_param_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url='https://www.open.cd/plugin_sign-in.php')
if not sign_param_res or sign_param_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
# 没有签到则解析html
html = etree.HTML(sign_param_res.text)
if not html:
return False, f'【{site}】签到失败'
# 签到参数
img_url = html.xpath('//form[@id="frmSignin"]//img/@src')[0]
img_hash = html.xpath('//form[@id="frmSignin"]//input[@name="imagehash"]/@value')[0]
if not img_url or not img_hash:
self.error(f"签到失败,获取签到参数失败")
return False, f'【{site}】签到失败,获取签到参数失败'
# 完整验证码url
img_get_url = 'https://www.open.cd/%s' % img_url
self.debug(f"获取到{site}验证码链接 {img_get_url}")
# ocr识别多次,获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
self.debug(f"ocr识别{site}验证码 {ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
self.info(f"ocr识别{site}验证码成功 {ocr_result}")
break
times += 1
self.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url='https://www.open.cd/plugin_sign-in.php?cmd=signin', data=data)
if sign_res and sign_res.status_code == 200:
self.debug(f"sign_res返回 {sign_res.text}")
# sign_res.text = '{"state":"success","signindays":"0","integral":"10"}'
sign_dict = json.loads(sign_res.text)
if sign_dict['state']:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
self.error(f"签到失败,签到接口返回 {sign_dict}")
return False, f'【{site}】签到失败'
self.error(f'签到失败:未获取到验证码')
return False, f'【{site}】签到失败:未获取到验证码'
| 4,913 | Python | .py | 101 | 29.09901 | 127 | 0.546087 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,123 | tjupt.py | demigody_nas-tools/app/plugins/modules/_autosignin/tjupt.py | import json
import os
import time
import zhconv
import re
from io import BytesIO
from PIL import Image
from lxml import etree
from bs4 import BeautifulSoup
from app.helper import ChromeHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class Tjupt(_ISiteSigninHandler):
"""
北洋签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "tjupt.org"
# 签到地址
_sign_in_url = 'https://www.tjupt.org/attendance.php'
# 已签到
_sign_regex = ['<a href="attendance.php">今日已签到</a>']
# 签到成功
_succeed_regex = ['这是您的首次签到,本次签到获得\\d+个魔力值。',
'签到成功,这是您的第\\d+次签到,已连续签到\\d+天,本次签到获得\\d+个魔力值。',
'重新签到成功,本次签到获得\\d+个魔力值'],
# 存储正确的答案,后续可直接查
_answer_path = Config().get_config_path() + "/temp/signin"
_answer_file = _answer_path + "/tjupt.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file), exist_ok=True)
# 获取北洋签到页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url=self._sign_in_url)
# 获取签到后返回html,判断是否签到成功
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(html_res.text)
if not html:
return False, f'【{site}】签到失败'
img_url = html.xpath('//table[@class="captcha"]//img/@src')[0]
if not img_url:
self.error(f"签到失败,未获取到签到图片")
return False, f'【{site}】签到失败,未获取到签到图片'
# 签到图片
img_url = "https://www.tjupt.org" + img_url
self.info(f"获取到签到图片 {img_url}")
# 获取签到图片hash
captcha_img_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url=img_url)
if not captcha_img_res or captcha_img_res.status_code != 200:
self.error(f"签到图片 {img_url} 请求失败")
return False, f'【{site}】签到失败,未获取到签到图片'
captcha_img = Image.open(BytesIO(captcha_img_res.content))
captcha_img_hash = self._tohash(captcha_img)
self.debug(f"签到图片hash {captcha_img_hash}")
# 签到答案选项
values = html.xpath("//input[@name='answer']/@value")
options = html.xpath("//input[@name='answer']/following-sibling::text()")
if not values or not options:
self.error(f"签到失败,未获取到答案选项")
return False, f'【{site}】签到失败,未获取到答案选项'
# value+选项
answers = list(zip(values, options))
self.debug(f"获取到所有签到选项 {answers}")
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
captcha_answer = exits_answers[captcha_img_hash]
# 本地存在本次hash对应的正确答案再遍历查询
if captcha_answer:
for value, answer in answers:
if str(captcha_answer) == str(answer):
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 本地不存在正确答案则请求豆瓣查询匹配
for value, answer in answers:
if answer:
# 豆瓣检索
db_res = RequestUtils().get_res(url=f'https://movie.douban.com/j/subject_suggest?q={answer}')
if not db_res or db_res.status_code != 200:
self.debug(f"签到选项 {answer} 未查询到豆瓣数据")
continue
# 豆瓣返回结果
db_answers = json.loads(db_res.text)
if not isinstance(db_answers, list):
db_answers = [db_answers]
if len(db_answers) == 0:
self.debug(f"签到选项 {answer} 查询到豆瓣数据为空")
for db_answer in db_answers:
answer_img_url = db_answer['img']
# 获取答案hash
answer_img_res = RequestUtils().get_res(url=answer_img_url)
if not answer_img_res or answer_img_res.status_code != 200:
self.debug(f"签到答案 {answer} {answer_img_url} 请求失败")
continue
answer_img = Image.open(BytesIO(answer_img_res.content))
answer_img_hash = self._tohash(answer_img)
self.debug(f"签到答案图片hash {answer} {answer_img_hash}")
# 获取选项图片与签到图片相似度,大于0.9默认是正确答案
score = self._comparehash(captcha_img_hash, answer_img_hash)
self.info(f"签到图片与选项 {answer} 豆瓣图片相似度 {score}")
if score > 0.9:
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
captcha_img_hash=captcha_img_hash)
# 间隔5s,防止请求太频繁被豆瓣屏蔽ip
time.sleep(5)
self.error(f"豆瓣图片匹配,未获取到匹配答案")
# 豆瓣未获取到答案,使用google识图
image_search_url = f"https://lens.google.com/uploadbyurl?url={img_url}"
chrome = ChromeHelper()
chrome.visit(url=image_search_url, proxy=Config().get_proxies())
# 等待页面加载
time.sleep(3)
# 获取识图结果
html_text = chrome.get_html()
search_results = BeautifulSoup(html_text, "lxml").find_all("div", class_="UAiK1e")
if not search_results:
self.info(f'Google识图失败,未获取到识图结果')
else:
res_count = len(search_results)
# 繁体转简体,合成查询内容
search_results = "@".join(
[zhconv.convert(result.text, "zh-hans") for result in search_results if result.text]
)
# 查询每个选项出现的次数
count_results = []
count_flag = False
for value, answer in answers:
answer_re = re.compile(re.sub(r"\d$", "", answer))
count = len(re.findall(answer_re, search_results))
if count >= min(res_count, 3):
count_flag = True
count_results.append((count, value, answer))
if count_flag:
log_content = f'Google识图结果共{res_count}条,各选项出现次数:'
count_results.sort(key=lambda x: x[0], reverse=True)
for result in count_results:
count, value, answer = result
log_content += f'{answer} {count}次;'
log_content += f'其中选项 {count_results[0][2]} 出现次数最多,认为是正确答案'
self.info(log_content)
return self.__signin(answer=count_results[0][1],
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
captcha_img_hash=captcha_img_hash)
else:
self.info(f'Google识图结果中未有选项符合条件')
# 没有匹配签到成功,则签到失败
return False, f'【{site}】签到失败,未获取到匹配答案'
def __signin(self, answer, site_cookie, ua, proxy, site, exits_answers=None, captcha_img_hash=None):
"""
签到请求
"""
data = {
'answer': answer,
'submit': '提交'
}
self.debug(f"提交data {data}")
sign_in_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).post_res(url=self._sign_in_url, data=data)
if not sign_in_res or sign_in_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 获取签到后返回html,判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_in_res.text,
regexs=self._succeed_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and captcha_img_hash:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
captcha_img_hash=captcha_img_hash,
answer=answer)
return True, f'【{site}】签到成功'
else:
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, captcha_img_hash, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[captcha_img_hash] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
@staticmethod
def _tohash(img, shape=(10, 10)):
"""
获取图片hash
"""
img = img.resize(shape)
gray = img.convert('L')
s = 0
hash_str = ''
for i in range(shape[1]):
for j in range(shape[0]):
s = s + gray.getpixel((j, i))
avg = s / (shape[0] * shape[1])
for i in range(shape[1]):
for j in range(shape[0]):
if gray.getpixel((j, i)) > avg:
hash_str = hash_str + '1'
else:
hash_str = hash_str + '0'
return hash_str
@staticmethod
def _comparehash(hash1, hash2, shape=(10, 10)):
"""
比较图片hash
返回相似度
"""
n = 0
if len(hash1) != len(hash2):
return -1
for i in range(len(hash1)):
if hash1[i] == hash2[i]:
n = n + 1
return n / (shape[0] * shape[1])
| 13,144 | Python | .py | 269 | 28.215613 | 125 | 0.516307 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,124 | hares.py | demigody_nas-tools/app/plugins/modules/_autosignin/hares.py | import json
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class Hares(_ISiteSigninHandler):
"""
白兔签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "club.hares.top"
# 已签到
_sign_text = '已签到'
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url="https://club.hares.top")
if not html_res or html_res.status_code != 200:
self.error(f"模拟访问失败,请检查站点连通性")
return False, f'【{site}】模拟访问失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"模拟访问失败,cookie失效")
return False, f'【{site}】模拟访问失败,cookie失效'
# if self._sign_text in html_res.text:
# self.info(f"今日已签到")
# return True, f'【{site}】今日已签到'
headers = {
'Accept': 'application/json',
"User-Agent": ua
}
sign_res = RequestUtils(headers=headers, cookies=site_cookie,
proxies=proxy).get_res(url="https://club.hares.top/attendance.php?action=sign")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# {"code":1,"msg":"您今天已经签到过了"}
# {"code":0,"msg":"签到成功"}
sign_dict = json.loads(sign_res.text)
if sign_dict['code'] == 0:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
| 2,705 | Python | .py | 59 | 28.59322 | 117 | 0.584844 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,125 | chdbits.py | demigody_nas-tools/app/plugins/modules/_autosignin/chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "ptchdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file), exist_ok=True)
# 判断今日是否已签到
index_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url='https://ptchdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url='https://ptchdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| 9,090 | Python | .py | 192 | 27.671875 | 108 | 0.528885 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,126 | hhanclub.py | demigody_nas-tools/app/plugins/modules/_autosignin/hhanclub.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class Hhanclub(_ISiteSigninHandler):
"""
海胆签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "hhanclub.top"
# 签到成功
_succeed_regex = ["今日签到排名:(\\d+) / \\d+"]
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://hhanclub.top/attendance.php")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "rule-tips" not in sign_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._succeed_regex)
if sign_status:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
self.error(f"签到失败,签到接口返回 {sign_res.text}")
return False, f'【{site}】签到失败'
| 2,096 | Python | .py | 45 | 28.755556 | 97 | 0.591947 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,127 | pterclub.py | demigody_nas-tools/app/plugins/modules/_autosignin/pterclub.py | import json
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class PTerClub(_ISiteSigninHandler):
"""
猫签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "pterclub.com"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://pterclub.com/attendance-ajax.php")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,请检查cookie是否失效'
sign_dict = json.loads(sign_res.text)
if sign_dict['status'] == '1':
# {"status":"1","data":" (签到已成功300)","message":"<p>这是您的第<b>237</b>次签到,
# 已连续签到<b>237</b>天。</p><p>本次签到获得<b>300</b>克猫粮。</p>"}
self.info(f"签到成功")
return True, f'【{site}】签到成功'
else:
# {"status":"0","data":"抱歉","message":"您今天已经签到过了,请勿重复刷新。"}
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
| 2,098 | Python | .py | 44 | 29.431818 | 102 | 0.587156 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,128 | haidan.py | demigody_nas-tools/app/plugins/modules/_autosignin/haidan.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HaiDan(_ISiteSigninHandler):
"""
海胆签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "haidan.video"
# 签到成功
_succeed_regex = ['(?<=value=")已经打卡(?=")']
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://www.haidan.video/signin.php")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in sign_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._succeed_regex)
if sign_status:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
self.error(f"签到失败,签到接口返回 {sign_res.text}")
return False, f'【{site}】签到失败'
| 2,085 | Python | .py | 45 | 28.644444 | 97 | 0.590717 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,129 | carpt.py | demigody_nas-tools/app/plugins/modules/_autosignin/carpt.py | from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CarPT(_ISiteSigninHandler):
"""
车站签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "carpt.net"
# 签到成功
_success_text = "签到成功"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
html_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).get_res(url="https://carpt.net/attendance.php")
if not html_res or html_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in html_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
# 判断是否已签到
# '已连续签到278天,此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
if self._repeat_text in html_res.text:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,签到接口返回 {html_res.text}")
return False, f'【{site}】签到失败'
| 2,263 | Python | .py | 49 | 27.734694 | 94 | 0.600231 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,130 | hdupt.py | demigody_nas-tools/app/plugins/modules/_autosignin/hdupt.py | import re
import log
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class HDUpt(_ISiteSigninHandler):
"""
hdu签到
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "pt.hdupt.com"
# 已签到
_sign_regex = ['<span id="yiqiandao">']
# 签到成功
_success_text = '本次签到获得魅力'
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 获取页面html
index_res = RequestUtils(headers=ua, cookies=site_cookie, proxies=proxy).get_res(url="https://pt.hdupt.com")
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 签到
sign_res = RequestUtils(headers=ua, cookies=site_cookie,
proxies=proxy).post_res(url="https://pt.hdupt.com/added.php?action=qiandao")
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
log.debug(f"签到接口返回 {sign_res.text}")
# 判断是否已签到 sign_res.text = ".23"
if len(list(map(int, re.findall("\d+", sign_res.text)))) > 0:
self.info(f"签到成功")
return True, f'【{site}】签到成功'
self.error(f"签到失败,签到接口返回 {sign_res.text}")
return False, f'【{site}】签到失败'
| 2,797 | Python | .py | 59 | 29.745763 | 116 | 0.593091 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,131 | _base.py | demigody_nas-tools/app/plugins/modules/_autosignin/_base.py | # -*- coding: utf-8 -*-
import re
from abc import ABCMeta, abstractmethod
import log
from app.utils import StringUtils
class _ISiteSigninHandler(metaclass=ABCMeta):
"""
实现站点签到的基类,所有站点签到类都需要继承此类,并实现match和signin方法
实现类放置到sitesignin目录下将会自动加载
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = ""
@abstractmethod
def match(self, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, self.site_url) else False
@abstractmethod
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: True|False,签到结果信息
"""
pass
@staticmethod
def sign_in_result(html_res, regexs):
"""
判断是否签到成功
"""
html_text = re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_res))
for regex in regexs:
if re.search(str(regex), html_text):
return True
return False
def info(self, msg):
"""
记录INFO日志
:param msg: 日志信息
"""
log.info(f"【Sites】{self.__class__.__name__} - {msg}")
def warn(self, msg):
"""
记录WARN日志
:param msg: 日志信息
"""
log.warn(f"【Sites】{self.__class__.__name__} - {msg}")
def error(self, msg):
"""
记录ERROR日志
:param msg: 日志信息
"""
log.error(f"【Sites】{self.__class__.__name__} - {msg}")
def debug(self, msg):
"""
记录Debug日志
:param msg: 日志信息
"""
log.debug(f"【Sites】{self.__class__.__name__} - {msg}")
| 2,086 | Python | .py | 62 | 19.806452 | 75 | 0.549877 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,132 | iyuu_helper.py | demigody_nas-tools/app/plugins/modules/iyuu/iyuu_helper.py | import hashlib
import json
import time
from app.utils import RequestUtils
from app.utils.commons import singleton
@singleton
class IyuuHelper(object):
_version = "2.0.0"
_api_base = "http://api.bolahg.cn/%s"
_sites = {}
_token = None
def __init__(self, token):
self._token = token
if self._token:
self.init_config()
def init_config(self):
pass
def __request_iyuu(self, url, method="get", params=None):
"""
向IYUUApi发送请求
"""
if params:
if not params.get("sign"):
params.update({"sign": self._token})
if not params.get("version"):
params.update({"version": self._version})
else:
params = {"sign": self._token, "version": self._version}
# 开始请求
if method == "get":
ret = RequestUtils(accept_type="application/json").get_res(f"{url}", params=params)
else:
ret = RequestUtils(accept_type="application/json").post_res(f"{url}", data=params)
if ret:
result = ret.json()
if result.get('ret') == 200:
return result.get('data'), ""
else:
return None, f"请求IYUU失败,状态码:{result.get('ret')},返回信息:{result.get('msg')}"
elif ret is not None:
return None, f"请求IYUU失败,状态码:{ret.status_code},错误原因:{ret.reason}"
else:
return None, f"请求IYUU失败,未获取到返回信息"
def get_torrent_url(self, sid):
if not sid:
return None, None
if not self._sites:
self._sites = self.__get_sites()
if not self._sites.get(sid):
return None, None
site = self._sites.get(sid)
return site.get('base_url'), site.get('download_page')
def __get_sites(self):
"""
返回支持辅种的全部站点
:return: 站点列表、错误信息
{
"ret": 200,
"data": {
"sites": [
{
"id": 1,
"site": "keepfrds",
"nickname": "朋友",
"base_url": "pt.keepfrds.com",
"download_page": "download.php?id={}&passkey={passkey}",
"reseed_check": "passkey",
"is_https": 2
},
]
}
}
"""
result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Sites')
if result:
ret_sites = {}
sites = result.get('sites') or []
for site in sites:
ret_sites[site.get('id')] = site
return ret_sites
else:
print(msg)
return {}
def get_seed_info(self, info_hashs: list):
"""
返回info_hash对应的站点id、种子id
{
"ret": 200,
"data": [
{
"sid": 3,
"torrent_id": 377467,
"info_hash": "a444850638e7a6f6220e2efdde94099c53358159"
},
{
"sid": 7,
"torrent_id": 35538,
"info_hash": "cf7d88fd656d10fe5130d13567aec27068b96676"
}
],
"msg": "",
"version": "1.0.0"
}
"""
info_hashs.sort()
json_data = json.dumps(info_hashs, separators=(',', ':'), ensure_ascii=False)
sha1 = self.get_sha1(json_data)
result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Infohash',
method="post",
params={
"timestamp": time.time(),
"hash": json_data,
"sha1": sha1
})
return result, msg
@staticmethod
def get_sha1(json_str) -> str:
return hashlib.sha1(json_str.encode('utf-8')).hexdigest()
def get_auth_sites(self):
"""
返回支持鉴权的站点列表
[
{
"id": 2,
"site": "pthome",
"bind_check": "passkey,uid"
}
]
"""
result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.GetRecommendSites')
if result:
return result.get('recommend') or []
else:
print(msg)
return []
def bind_site(self, site, passkey, uid):
"""
绑定站点
:param site: 站点名称
:param passkey: passkey
:param uid: 用户id
:return: 状态码、错误信息
"""
result, msg = self.__request_iyuu(url=self._api_base % 'App.Api.Bind',
method="get",
params={
"token": self._token,
"site": site,
"passkey": self.get_sha1(passkey),
"id": uid
})
return result, msg
| 5,472 | Python | .py | 152 | 20.105263 | 95 | 0.42621 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,133 | moduleconf.py | demigody_nas-tools/app/conf/moduleconf.py | # coding: utf-8
from app.utils.types import *
class ModuleConf(object):
# 菜单对应关系,配置WeChat应用中配置的菜单ID与执行命令的对应关系,需要手工修改
# 菜单序号在https://work.weixin.qq.com/wework_admin/frame#apps 应用自定义菜单中维护,然后看日志输出的菜单序号是啥(按顺利能猜到的)....
# 命令对应关系:/ptt 下载文件转移;/ptr 删种;/pts 站点签到;/rst 目录同步;/db 豆瓣同步;/utf 重新识别;
# /ssa 订阅搜索;/tbl 清理转移缓存;/trh 清理RSS缓存;/rss RSS下载;/udt 系统更新;/sta 数据统计
WECHAT_MENU = {
'_0_0': '/ptt', # 下载->下载文件转移
'_0_1': '/ptr', # 下载->删种
'_0_2': '/rss', # 下载->RSS下载
'_0_3': '/ssa', # 下载->订阅搜索
'_1_0': '/rst', # 同步->目录同步
'_1_1': '/db', # 同步->豆瓣同步
'_1_2': '/utf', # 同步->重新识别
'_2_0': '/pts', # 管理->站点签到
#'_2_1': '/udt', 管理->系统更新
'_2_2': '/tbl', # 管理->清理转移缓存
'_2_3': '/trh', # 管理->清理RSS缓存
'_2_4': '/sta' # 管理->数据统计
}
# 全量转移模式
RMT_MODES = {
"copy": RmtMode.COPY,
"link": RmtMode.LINK,
"softlink": RmtMode.SOFTLINK,
"move": RmtMode.MOVE,
"rclone": RmtMode.RCLONE,
"rclonecopy": RmtMode.RCLONECOPY,
"minio": RmtMode.MINIO,
"miniocopy": RmtMode.MINIOCOPY
}
# 精简版转移模式
RMT_MODES_LITE = {
"copy": RmtMode.COPY,
"link": RmtMode.LINK,
"softlink": RmtMode.SOFTLINK,
"move": RmtMode.MOVE
}
# 远程转移模式
REMOTE_RMT_MODES = [RmtMode.RCLONE, RmtMode.RCLONECOPY, RmtMode.MINIO, RmtMode.MINIOCOPY]
# 消息通知类型
MESSAGE_CONF = {
"client": {
"telegram": {
"name": "Telegram",
"img_url": "../static/img/message/telegram.png",
"color": "#22A7E7",
"search_type": SearchType.TG,
"config": {
"token": {
"id": "telegram_token",
"required": True,
"title": "Bot Token",
"tooltip": "telegram机器人的Token,关注BotFather创建机器人",
"type": "text"
},
"chat_id": {
"id": "telegram_chat_id",
"required": True,
"title": "Chat ID",
"tooltip": "接受消息通知的用户、群组或频道Chat ID,关注@getidsbot获取",
"type": "text"
},
"thread_id": {
"id": "telegram_thread_id",
"required": False,
"title": "Message Thread ID",
"tooltip": "接受消息通知的群组话题 ID,https://api.telegram.org/bot${BOT_TOKEN}/getUpdates 获取",
"type": "text"
},
"user_ids": {
"id": "telegram_user_ids",
"required": False,
"title": "User IDs",
"tooltip": "允许使用交互的用户Chat ID,留空则只允许管理用户使用,关注@getidsbot获取",
"type": "text",
"placeholder": "使用,分隔多个Id"
},
"admin_ids": {
"id": "telegram_admin_ids",
"required": False,
"title": "Admin IDs",
"tooltip": "允许使用管理命令的用户Chat ID,关注@getidsbot获取",
"type": "text",
"placeholder": "使用,分隔多个Id"
},
"webhook": {
"id": "telegram_webhook",
"required": False,
"title": "Webhook",
"tooltip": "Telegram机器人消息有两种模式:Webhook或消息轮循;开启后将使用Webhook方式,需要在基础设置中正确配置好外网访问地址,同时受Telegram官方限制,外网访问地址需要设置为以下端口之一:443, 80, 88, 8443,且需要有公网认证的可信SSL证书;关闭后将使用消息轮循方式,使用该方式需要在基础设置->安全处将Telegram ipv4源地址设置为127.0.0.1,如同时使用了内置的SSL证书功能,消息轮循方式可能无法正常使用",
"type": "switch"
}
}
},
"wechat": {
"name": "微信",
"img_url": "../static/img/message/wechat.png",
"color": "#00D20B",
"search_type": SearchType.WX,
"max_length": 2048,
"config": {
"corpid": {
"id": "wechat_corpid",
"required": True,
"title": "企业ID",
"tooltip": "每个企业都拥有唯一的corpid,获取此信息可在管理后台“我的企业”-“企业信息”下查看“企业ID”(需要有管理员权限)",
"type": "text"
},
"corpsecret": {
"id": "wechat_corpsecret",
"required": True,
"title": "应用Secret",
"tooltip": "每个应用都拥有唯一的secret,获取此信息可在管理后台“应用与小程序”-“自建”下查看“Secret”(需要有管理员权限)",
"type": "text",
"placeholder": "Secret"
},
"agentid": {
"id": "wechat_agentid",
"required": True,
"title": "应用ID",
"tooltip": "每个应用都拥有唯一的agentid,获取此信息可在管理后台“应用与小程序”-“自建”下查看“AgentId”(需要有管理员权限)",
"type": "text",
"placeholder": "AgentId",
},
"default_proxy": {
"id": "wechat_default_proxy",
"required": False,
"title": "消息推送代理",
"tooltip": "由于微信官方限制,2022年6月20日后创建的企业微信应用需要有固定的公网IP地址并加入IP白名单后才能发送消息,使用有固定公网IP的代理服务器转发可解决该问题;代理服务器需自行搭建,搭建方法可参考项目主页说明",
"type": "text",
"placeholder": "消息推送代理的网址"
},
"token": {
"id": "wechat_token",
"required": False,
"title": "Token",
"tooltip": "需要交互功能时才需要填写,在微信企业应用管理后台-接收消息设置页面生成,填入完成后重启本应用,然后再在微信页面输入地址确定",
"type": "text",
"placeholder": "API接收消息Token"
},
"encodingAESKey": {
"id": "wechat_encodingAESKey",
"required": False,
"title": "EncodingAESKey",
"tooltip": "需要交互功能时才需要填写,在微信企业应用管理后台-接收消息设置页面生成,填入完成后重启本应用,然后再在微信页面输入地址确定",
"type": "text",
"placeholder": "API接收消息EncodingAESKey"
},
"adminUser": {
"id": "wechat_adminUser",
"required": False,
"title": "AdminUser",
"tooltip": "需要交互功能时才需要填写,可执行交互菜单命令的用户名,为空则不限制,多个;号分割。可在企业微信后台查看成员的Account ID",
"type": "text",
"placeholder": "可执行交互菜单的用户名"
}
}
},
"serverchan": {
"name": "Server酱",
"img_url": "../static/img/message/serverchan.png",
"color": "#FEE6DB",
"config": {
"sckey": {
"id": "serverchan_sckey",
"required": True,
"title": "SCKEY",
"tooltip": "填写ServerChan的API Key,SCT类型,在https://sct.ftqq.com/中申请",
"type": "text",
"placeholder": "SCT..."
}
}
},
"bark": {
"name": "Bark",
"img_url": "../static/img/message/bark.webp",
"color": "#FF3B30",
"config": {
"server": {
"id": "bark_server",
"required": True,
"title": "Bark服务器地址",
"tooltip": "自己搭建Bark服务端请实际配置,否则可使用:https://api.day.app",
"type": "text",
"placeholder": "https://api.day.app",
"default": "https://api.day.app"
},
"apikey": {
"id": "bark_apikey",
"required": True,
"title": "API Key",
"tooltip": "在Bark客户端中点击右上角的“...”按钮,选择“生成Bark Key”,然后将生成的KEY填入此处",
"type": "text"
},
"params": {
"id": "bark_params",
"required": False,
"title": "附加参数",
"tooltip": "添加到Bark通知中的附加参数,可用于自定义通知特性",
"type": "text",
"placeholder": "group=xxx&sound=xxx&url=xxx"
}
}
},
"pushdeer": {
"name": "PushDeer",
"img_url": "../static/img/message/pushdeer.png",
"color": "#444E98",
"config": {
"server": {
"id": "pushdeer_server",
"required": True,
"title": "PushDeer服务器地址",
"tooltip": "自己搭建pushdeer服务端请实际配置,否则可使用:https://api2.pushdeer.com",
"type": "text",
"placeholder": "https://api2.pushdeer.com",
"default": "https://api2.pushdeer.com"
},
"apikey": {
"id": "pushdeer_apikey",
"required": True,
"title": "API Key",
"tooltip": "pushdeer客户端生成的KEY",
"type": "text"
}
}
},
"pushplus": {
"name": "PushPlus",
"img_url": "../static/img/message/pushplus.jpg",
"color": "#047AEB",
"config": {
"token": {
"id": "pushplus_token",
"required": True,
"title": "Token",
"tooltip": "在PushPlus官网中申请,申请地址:http://pushplus.plus/",
"type": "text"
},
"channel": {
"id": "pushplus_channel",
"required": True,
"title": "推送渠道",
"tooltip": "使用PushPlus中配置的发送渠道,具体参考pushplus.plus官网文档说明,支持第三方webhook、钉钉、飞书、邮箱等",
"type": "select",
"options": {
"wechat": "微信",
"mail": "邮箱",
"webhook": "第三方Webhook"
},
"default": "wechat"
},
"topic": {
"id": "pushplus_topic",
"required": False,
"title": "群组编码",
"tooltip": "PushPlus中创建的群组,如未设置可为空",
"type": "text"
},
"webhook": {
"id": "pushplus_webhook",
"required": False,
"title": "Webhook编码",
"tooltip": "PushPlus中创建的webhook编码,发送渠道为第三方webhook时需要填入",
}
}
},
"iyuu": {
"name": "爱语飞飞",
"img_url": "../static/img/message/iyuu.png",
"color": "#F5BD08",
"config": {
"token": {
"id": "iyuumsg_token",
"required": True,
"title": "令牌Token",
"tooltip": "在爱语飞飞官网中申请,申请地址:https://iyuu.cn/",
"type": "text",
"placeholder": "登录https://iyuu.cn获取"
}
}
},
"slack": {
"name": "Slack",
"img_url": "../static/img/message/slack.png",
"color": "#E01D5A",
"search_type": SearchType.SLACK,
"config": {
"bot_token": {
"id": "slack_bot_token",
"required": True,
"title": "Bot User OAuth Token",
"tooltip": "在Slack中创建应用,获取Bot User OAuth Token",
"type": "text",
"placeholder": "xoxb-xxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx"
},
"app_token": {
"id": "slack_app_token",
"required": True,
"title": "App-Level Token",
"tooltip": "在Slack中创建应用,获取App-Level Token",
"type": "text",
"placeholder": "xapp-xxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxx"
},
"channel": {
"id": "slack_channel",
"required": False,
"title": "频道名称",
"tooltip": "Slack中的频道名称,默认为全体;需要将机器人添加到该频道,以接收非交互类的通知消息",
"type": "text",
"placeholder": "全体"
}
}
},
"gotify": {
"name": "Gotify",
"img_url": "../static/img/message/gotify.png",
"color": "#72CAEE",
"config": {
"server": {
"id": "gotify_server",
"required": True,
"title": "Gotify服务器地址",
"tooltip": "自己搭建gotify服务端地址",
"type": "text",
"placeholder": "http://localhost:8800"
},
"token": {
"id": "gotify_token",
"required": True,
"title": "令牌Token",
"tooltip": "Gotify服务端APPS下创建的token",
"type": "text"
},
"priority": {
"id": "gotify_priority",
"required": False,
"title": "消息Priority",
"tooltip": "消息通知优先级, 请填写数字(1-8), 默认: 8",
"type": "text",
"placeholder": "8"
}
}
},
"ntfy": {
"name": "ntfy",
"img_url": "../static/img/message/ntfy.webp",
"color": "#409D8A",
"config": {
"server": {
"id": "ntfy_server",
"required": True,
"title": "ntfy服务器地址",
"tooltip": "自己搭建ntfy服务端地址",
"type": "text",
"placeholder": "http://localhost:8800"
},
"token": {
"id": "ntfy_token",
"required": True,
"title": "令牌Token",
"tooltip": "ntfy服务端创建的token",
"type": "text"
},
"topic": {
"id": "ntfy_topic",
"required": True,
"title": "topic",
"tooltip": "ntfy创建的topic",
"type": "text"
},
"priority": {
"id": "ntfy_priority",
"required": False,
"title": "消息Priority",
"tooltip": "消息通知优先级, 请填写数字(1-5), 默认: 4",
"type": "text",
"placeholder": "4"
},
"tags": {
"id": "ntfy_tags",
"required": False,
"title": "消息tags",
"tooltip": "消息tags,以逗号分隔, 请参阅ntfy官网, 默认: rotating_light",
"type": "text",
"placeholder": "rotating_light"
}
}
},
"chanify": {
"name": "Chanify",
"img_url": "../static/img/message/chanify.png",
"color": "#0B84FF",
"config": {
"server": {
"id": "chanify_server",
"required": True,
"title": "Chanify服务器地址",
"tooltip": "自己搭建Chanify服务端地址或使用https://api.chanify.net",
"type": "text",
"placeholder": "https://api.chanify.net",
"default": "https://api.chanify.net"
},
"token": {
"id": "chanify_token",
"required": True,
"title": "令牌",
"tooltip": "在Chanify客户端频道中获取",
"type": "text"
},
"params": {
"id": "chanify_params",
"required": False,
"title": "附加参数",
"tooltip": "添加到Chanify通知中的附加参数,可用于自定义通知特性",
"type": "text",
"placeholder": "sound=0&interruption-level=active"
}
}
},
"synologychat": {
"name": "Synology Chat",
"img_url": "../static/img/message/synologychat.png",
"color": "#26C07A",
"search_type": SearchType.SYNOLOGY,
"config": {
"webhook_url": {
"id": "synologychat_webhook_url",
"required": True,
"title": "机器人传入URL",
"tooltip": "在Synology Chat中创建机器人,获取机器人传入URL",
"type": "text",
"placeholder": "https://xxx/webapi/entry.cgi?api=xxx"
},
"token": {
"id": "synologychat_token",
"required": True,
"title": "令牌",
"tooltip": "在Synology Chat中创建机器人,获取机器人令牌",
"type": "text",
"placeholder": ""
}
}
},
},
"switch": {
"download_start": {
"name": "新增下载",
"fuc_name": "download_start"
},
"download_fail": {
"name": "下载失败",
"fuc_name": "download_fail"
},
"transfer_finished": {
"name": "入库完成",
"fuc_name": "transfer_finished"
},
"transfer_fail": {
"name": "入库失败",
"fuc_name": "transfer_fail"
},
"rss_added": {
"name": "新增订阅",
"fuc_name": "rss_added"
},
"rss_finished": {
"name": "订阅完成",
"fuc_name": "rss_finished"
},
"site_signin": {
"name": "站点签到",
"fuc_name": "site_signin"
},
"site_message": {
"name": "站点消息",
"fuc_name": "site_message"
},
"brushtask_added": {
"name": "刷流下种",
"fuc_name": "brushtask_added"
},
"brushtask_remove": {
"name": "刷流删种",
"fuc_name": "brushtask_remove"
},
"auto_remove_torrents": {
"name": "自动删种",
"fuc_name": "auto_remove_torrents"
},
"ptrefresh_date_message": {
"name": "数据统计",
"fuc_name": "ptrefresh_date_message"
},
"mediaserver_message": {
"name": "媒体服务",
"fuc_name": "mediaserver_message"
},
"custom_message": {
"name": "插件消息",
"fuc_name": "custom_message"
}
}
}
# 自动删种配置
TORRENTREMOVER_DICT = {
"qbittorrent": {
"name": "Qbittorrent",
"img_url": "../static/img/downloader/qbittorrent.png",
"downloader_type": DownloaderType.QB,
"torrent_state": {
"downloading": "正在下载_传输数据",
"stalledDL": "正在下载_未建立连接",
"uploading": "正在上传_传输数据",
"stalledUP": "正在上传_未建立连接",
"error": "暂停_发生错误",
"pausedDL": "暂停_下载未完成",
"pausedUP": "暂停_下载完成",
"missingFiles": "暂停_文件丢失",
"checkingDL": "检查中_下载未完成",
"checkingUP": "检查中_下载完成",
"checkingResumeData": "检查中_启动时恢复数据",
"forcedDL": "强制下载_忽略队列",
"queuedDL": "等待下载_排队",
"forcedUP": "强制上传_忽略队列",
"queuedUP": "等待上传_排队",
"allocating": "分配磁盘空间",
"metaDL": "获取元数据",
"moving": "移动文件",
"unknown": "未知状态",
}
},
"transmission": {
"name": "Transmission",
"img_url": "../static/img/downloader/transmission.png",
"downloader_type": DownloaderType.TR,
"torrent_state": {
"downloading": "正在下载",
"seeding": "正在上传",
"download_pending": "等待下载_排队",
"seed_pending": "等待上传_排队",
"checking": "正在检查",
"check_pending": "等待检查_排队",
"stopped": "暂停",
}
}
}
# 搜索种子过滤属性
TORRENT_SEARCH_PARAMS = {
"restype": {
"BLURAY": r"Blu-?Ray|BD|BDRIP",
"REMUX": r"REMUX",
"DOLBY": r"DOLBY|DOVI|\s+DV$|\s+DV\s+",
"WEB": r"WEB-?DL|WEBRIP",
"HDTV": r"U?HDTV",
"UHD": r"UHD",
"HDR": r"HDR",
"3D": r"3D"
},
"pix": {
"8k": r"8K",
"4k": r"4K|2160P|X2160",
"1080p": r"1080[PIX]|X1080",
"720p": r"720P"
}
}
# 网络测试对象,TMDB API除外
NETTEST_TARGETS = [
"www.themoviedb.org",
"image.tmdb.org",
"webservice.fanart.tv",
"api.telegram.org",
"qyapi.weixin.qq.com",
"www.opensubtitles.org"
]
# 下载器
DOWNLOADER_CONF = {
"qbittorrent": {
"name": "Qbittorrent",
"img_url": "../static/img/downloader/qbittorrent.png",
"color": "#3872C2",
"monitor_enable": True,
"speedlimit_enable": True,
"config": {
"host": {
"id": "qbittorrent_host",
"required": True,
"title": "地址",
"tooltip": "配置IP地址或域名,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "127.0.0.1"
},
"port": {
"id": "qbittorrent_port",
"required": True,
"title": "端口",
"type": "text",
"placeholder": "8080"
},
"username": {
"id": "qbittorrent_username",
"required": True,
"title": "用户名",
"type": "text",
"placeholder": "admin"
},
"password": {
"id": "qbittorrent_password",
"required": False,
"title": "密码",
"type": "password",
"placeholder": "password"
},
"torrent_management": {
"id": "qbittorrent_torrent_management",
"required": False,
"title": "种子管理模式",
"tooltip": """【默认】将使用Qbittorrent客户端中的设置,NAStool不进行修改;<br>
【手动】强制开启手动管理模式,下载目录由NAStool传递的下载目录决定;<br>
【自动】强制开启自动管理模式,下载目录由NAStool传递的分类标签决定,没有分类标签的将使用下载器中的默认保存路径;<br>
【注意】自动管理模式下,NAStool将在启动时根据下载目录设置自动为下载器创建相应分类(需设置下载保存目录和分类标签),下载器中已存在该分类且其保存目录与NAStool中设置的不一致时,将会覆盖下载器的设置。
""",
"type": "select",
"options": {
"default": "默认",
"manual": "手动",
"auto": "自动"
},
"default": "manual"
}
}
},
"transmission": {
"name": "Transmission",
"img_url": "../static/img/downloader/transmission.png",
"color": "#B30100",
"monitor_enable": True,
"speedlimit_enable": True,
"config": {
"host": {
"id": "transmission_host",
"required": True,
"title": "地址",
"tooltip": "配置IP地址或域名,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "127.0.0.1"
},
"port": {
"id": "transmission_port",
"required": True,
"title": "端口",
"type": "text",
"placeholder": "9091"
},
"username": {
"id": "transmission_username",
"required": True,
"title": "用户名",
"type": "text",
"placeholder": "admin"
},
"password": {
"id": "transmission_password",
"required": False,
"title": "密码",
"type": "password",
"placeholder": "password"
}
}
},
"aria2": {
"name": "Aria2",
"img_url": "../static/img/downloader/aria2.png",
"color": "#B30100",
"monitor_enable": True,
"config": {
"host": {
"id": "aria2_host",
"required": True,
"title": "IP地址",
"tooltip": "配置IP地址,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "127.0.0.1"
},
"port": {
"id": "aria2_port",
"required": True,
"title": "端口",
"type": "text",
"placeholder": "6800"
},
"secret": {
"id": "aria2_secret",
"required": True,
"title": "令牌",
"type": "text",
"placeholder": ""
}
}
},
"pan115": {
"name": "115网盘",
"img_url": "../static/img/downloader/115.jpg",
"background": "bg-azure",
"test_command": "app.downloader.client.pan115|Pan115",
"config": {
"cookie": {
"id": "pan115_cookie",
"required": True,
"title": "Cookie",
"tooltip": "115网盘Cookie,通过115网盘网页端抓取Cookie",
"type": "text",
"placeholder": "USERSESSIONID=xxx;115_lang=zh;UID=xxx;CID=xxx;SEID=xxx"
}
}
},
"pikpak": {
"name": "PikPak",
"img_url": "../static/img/downloader/pikpak.png",
"background": "bg-indigo",
"test_command": "app.downloader.client.pikpak|PikPak",
"config": {
"username": {
"id": "pikpak_username",
"required": True,
"title": "账号",
"tooltip": "PikPak的账号一般是手机号或者邮箱",
"type": "text",
"placeholder": ""
},
"password": {
"id": "pikpak_password",
"required": True,
"title": "密码",
"tooltip": "密码",
"type": "password",
"placeholder": ""
},
"proxy": {
"id": "pikpak_proxy",
"required": False,
"title": "代理",
"tooltip": "如果需要代理才能访问pikpak可以在此处填入代理地址",
"type": "text",
"placeholder": "127.0.0.1:7890"
}
}
}
}
# 媒体服务器
MEDIASERVER_CONF = {
"emby": {
"name": "Emby",
"img_url": "../static/img/mediaserver/emby.png",
"background": "bg-green",
"test_command": "app.mediaserver.client.emby|Emby",
"config": {
"host": {
"id": "emby.host",
"required": True,
"title": "服务器地址",
"tooltip": "配置IP地址和端口,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "http://127.0.0.1:8096"
},
"api_key": {
"id": "emby.api_key",
"required": True,
"title": "Api Key",
"tooltip": "在Emby设置->高级->API密钥处生成,注意不要复制到了应用名称",
"type": "text",
"placeholder": ""
},
"play_host": {
"id": "emby.play_host",
"required": False,
"title": "媒体播放地址",
"tooltip": "配置播放设备的访问地址,用于媒体详情页跳转播放页面;如为https则需要增加https://前缀,留空则默认与服务器地址一致",
"type": "text",
"placeholder": "http://127.0.0.1:8096"
}
}
},
"jellyfin": {
"name": "Jellyfin",
"img_url": "../static/img/mediaserver/jellyfin.jpg",
"background": "bg-purple",
"test_command": "app.mediaserver.client.jellyfin|Jellyfin",
"config": {
"host": {
"id": "jellyfin.host",
"required": True,
"title": "服务器地址",
"tooltip": "配置IP地址和端口,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "http://127.0.0.1:8096"
},
"api_key": {
"id": "jellyfin.api_key",
"required": True,
"title": "Api Key",
"tooltip": "在Jellyfin设置->高级->API密钥处生成",
"type": "text",
"placeholder": ""
},
"play_host": {
"id": "jellyfin.play_host",
"required": False,
"title": "媒体播放地址",
"tooltip": "配置播放设备的访问地址,用于媒体详情页跳转播放页面;如为https则需要增加https://前缀,留空则默认与服务器地址一致",
"type": "text",
"placeholder": "http://127.0.0.1:8096"
}
}
},
"plex": {
"name": "Plex",
"img_url": "../static/img/mediaserver/plex.png",
"background": "bg-yellow",
"test_command": "app.mediaserver.client.plex|Plex",
"config": {
"host": {
"id": "plex.host",
"required": True,
"title": "服务器地址",
"tooltip": "配置IP地址和端口,如为https则需要增加https://前缀",
"type": "text",
"placeholder": "http://127.0.0.1:32400"
},
"token": {
"id": "plex.token",
"required": False,
"title": "X-Plex-Token",
"tooltip": "Plex网页Url中的X-Plex-Token,通过浏览器F12->网络从请求URL中获取,如填写将优先使用;Token与服务器名称、用户名及密码 二选一,推荐使用Token,连接速度更快",
"type": "text",
"placeholder": "X-Plex-Token与其它认证信息二选一"
},
"servername": {
"id": "plex.servername",
"required": False,
"title": "服务器名称",
"tooltip": "配置Plex设置->左侧下拉框中看到的服务器名称;如填写了Token则无需填写服务器名称、用户名及密码",
"type": "text",
"placeholder": ""
},
"username": {
"id": "plex.username",
"required": False,
"title": "用户名",
"type": "text",
"placeholder": ""
},
"password": {
"id": "plex.password",
"required": False,
"title": "密码",
"type": "password",
"placeholder": ""
},
"play_host": {
"id": "plex.play_host",
"required": False,
"title": "媒体播放地址",
"tooltip": "配置播放设备的访问地址,用于媒体详情页跳转播放页面;如为https则需要增加https://前缀,留空则默认与服务器地址一致",
"type": "text",
"placeholder": "https://app.plex.tv"
}
}
},
}
# 索引器
INDEXER_CONF = {}
# 发现过滤器
DISCOVER_FILTER_CONF = {
"tmdb_movie": {
"sort_by": {
"name": "排序",
"type": "dropdown",
"options": [{'value': '', 'name': '默认'},
{'value': 'popularity.desc', 'name': '近期热度'},
{'value': 'vote_average.desc', 'name': '高分优先'},
{'value': 'release_date.desc', 'name': '首播时间'}]
},
"with_genres": {
"name": "类型",
"type": "dropdown",
"options": [{'value': '', 'name': '全部'},
{'value': '12', 'name': '冒险'},
{'value': '16', 'name': '动画'},
{'value': '35', 'name': '喜剧'},
{'value': '80', 'name': '犯罪'},
{'value': '18', 'name': '剧情'},
{'value': '14', 'name': '奇幻'},
{'value': '27', 'name': '恐怖'},
{'value': '9648', 'name': '悬疑'},
{'value': '10749', 'name': '爱情'},
{'value': '878', 'name': '科幻'},
{'value': '53', 'name': '惊悚'},
{'value': '10752', 'name': '战争'}]
},
"with_original_language": {
"name": "语言",
"type": "dropdown",
"options": [{'value': '', 'name': '全部'},
{'value': 'zh', 'name': '中文'},
{'value': 'en', 'name': '英语'},
{'value': 'ja', 'name': '日语'},
{'value': 'ko', 'name': '韩语'},
{'value': 'fr', 'name': '法语'},
{'value': 'de', 'name': '德语'},
{'value': 'ru', 'name': '俄语'},
{'value': 'hi', 'name': '印地语'}]
}
},
"tmdb_tv": {
"sort_by": {
"name": "排序",
"type": "dropdown",
"options": [{'value': '', 'name': '默认'},
{'value': 'popularity.desc', 'name': '近期热度'},
{'value': 'vote_average.desc', 'name': '高分优先'},
{'value': 'first_air_date.desc', 'name': '首播时间'}]
},
"with_genres": {
"name": "类型",
"type": "dropdown",
"options": [{'value': '', 'name': '全部'},
{'value': '10759', 'name': '动作冒险'},
{'value': '16', 'name': '动画'},
{'value': '35', 'name': '喜剧'},
{'value': '80', 'name': '犯罪'},
{'value': '99', 'name': '纪录'},
{'value': '18', 'name': '剧情'},
{'value': '10762', 'name': '儿童'},
{'value': '9648', 'name': '悬疑'},
{'value': '10764', 'name': '真人秀'},
{'value': '10765', 'name': '科幻'}]
},
"with_original_language": {
"name": "语言",
"type": "dropdown",
"options": [{'value': '', 'name': '全部'},
{'value': 'zh', 'name': '中文'},
{'value': 'en', 'name': '英语'},
{'value': 'ja', 'name': '日语'},
{'value': 'ko', 'name': '韩语'},
{'value': 'fr', 'name': '法语'},
{'value': 'de', 'name': '德语'},
{'value': 'ru', 'name': '俄语'},
{'value': 'hi', 'name': '印地语'}]
}
},
"douban_movie": {
"sort": {
"name": "排序",
"type": "dropdown",
"options": [{'value': '', 'name': '默认'},
{'value': 'U', 'name': '综合排序'},
{'value': 'T', 'name': '近期热度'},
{'value': 'S', 'name': '高分优先'},
{'value': 'R', 'name': '首播时间'}]
},
"tags": {
"name": "类型",
"type": "dropdown",
"options": [{"value": "", "name": "全部"},
{"value": "喜剧", "name": "喜剧"},
{"value": "爱情", "name": "爱情"},
{"value": "动作", "name": "动作"},
{"value": "科幻", "name": "科幻"},
{"value": "动画", "name": "动画"},
{"value": "悬疑", "name": "悬疑"},
{"value": "犯罪", "name": "犯罪"},
{"value": "惊悚", "name": "惊悚"},
{"value": "冒险", "name": "冒险"},
{"value": "奇幻", "name": "奇幻"},
{"value": "恐怖", "name": "恐怖"},
{"value": "战争", "name": "战争"},
{"value": "武侠", "name": "武侠"},
{"value": "灾难", "name": "灾难"}]
}
},
"douban_tv": {
"sort": {
"name": "排序",
"type": "dropdown",
"options": [{'value': '', 'name': '默认'},
{'value': 'U', 'name': '综合排序'},
{'value': 'T', 'name': '近期热度'},
{'value': 'S', 'name': '高分优先'},
{'value': 'R', 'name': '首播时间'}]
},
"tags": {
"name": "地区",
"type": "dropdown",
"options": [{"value": "", "name": "全部"},
{"value": "华语", "name": "华语"},
{"value": "中国大陆", "name": "中国大陆"},
{"value": "中国香港", "name": "中国香港"},
{"value": "中国台湾", "name": "中国台湾"},
{"value": "欧美", "name": "欧美"},
{"value": "韩国", "name": "韩国"},
{"value": "日本", "name": "日本"},
{"value": "印度", "name": "印度"},
{"value": "泰国", "name": "泰国"}]
}
}
}
@staticmethod
def get_enum_name(enum, value):
"""
根据Enum的value查询name
:param enum: 枚举
:param value: 枚举值
:return: 枚举名或None
"""
for e in enum:
if e.value == value:
return e.name
return None
@staticmethod
def get_enum_item(enum, value):
"""
根据Enum的value查询name
:param enum: 枚举
:param value: 枚举值
:return: 枚举项
"""
for e in enum:
if e.value == value:
return e
return None
@staticmethod
def get_dictenum_key(dictenum, value):
"""
根据Enum dict的value查询key
:param dictenum: 枚举字典
:param value: 枚举类(字典值)的值
:return: 字典键或None
"""
for k, v in dictenum.items():
if v.value == value:
return k
return None
| 46,886 | Python | .py | 1,046 | 20.6826 | 266 | 0.347014 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,134 | systemconfig.py | demigody_nas-tools/app/conf/systemconfig.py | import json
from app.helper import DictHelper
from app.utils.commons import singleton
from app.utils.types import SystemConfigKey
@singleton
class SystemConfig:
# 系统设置
systemconfig = {}
def __init__(self):
self.dicthelper = DictHelper()
self.init_config()
def init_config(self):
"""
缓存系统设置
"""
for item in self.dicthelper.list("SystemConfig"):
if not item:
continue
if self.__is_obj(item.VALUE):
self.systemconfig[item.KEY] = json.loads(item.VALUE)
else:
self.systemconfig[item.KEY] = item.VALUE
@staticmethod
def __is_obj(obj):
if isinstance(obj, list) or isinstance(obj, dict):
return True
else:
return str(obj).startswith("{") or str(obj).startswith("[")
def set(self, key: [SystemConfigKey, str], value):
"""
设置系统设置
"""
if isinstance(key, SystemConfigKey):
key = key.value
# 更新内存
self.systemconfig[key] = value
# 写入数据库
if self.__is_obj(value):
if value is not None:
value = json.dumps(value)
else:
value = ''
self.dicthelper.set("SystemConfig", key, value)
def get(self, key: [SystemConfigKey, str] = None):
"""
获取系统设置
"""
if not key:
return self.systemconfig
if isinstance(key, SystemConfigKey):
key = key.value
return self.systemconfig.get(key)
| 1,637 | Python | .py | 52 | 21.134615 | 71 | 0.558416 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,135 | script.py.mako | demigody_nas-tools/scripts/script.py.mako | """${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}
| 510 | Python | .py | 17 | 28.117647 | 43 | 0.718107 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,136 | env.py | demigody_nas-tools/scripts/env.py | from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from app.db.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
render_as_batch=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 2,168 | Python | .py | 60 | 31.333333 | 69 | 0.721264 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,137 | 69508d1aed24_1_2_1.py | demigody_nas-tools/scripts/versions/69508d1aed24_1_2_1.py | """1.2.1
Revision ID: 69508d1aed24
Revises: 6abeaa9ece15
Create Date: 2023-03-24 11:12:51.646014
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '69508d1aed24'
down_revision = '6abeaa9ece15'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 1.2.1
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.add_column(sa.Column('RSSURL', sa.Text, nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
| 655 | Python | .py | 23 | 24.869565 | 76 | 0.690705 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,138 | ae61cfa6ada6_1_2_4.py | demigody_nas-tools/scripts/versions/ae61cfa6ada6_1_2_4.py | """1.2.4
Revision ID: ae61cfa6ada6
Revises: 1f5cc26cdd3d
Create Date: 2023-04-11 10:24:45.522668
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ae61cfa6ada6'
down_revision = '1f5cc26cdd3d'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table("DOWNLOAD_HISTORY") as batch_op:
batch_op.add_column(sa.Column('SE', sa.Text, nullable=True))
batch_op.add_column(sa.Column('SAVE_PATH', sa.Text, nullable=True))
batch_op.create_index('ix_DOWNLOAD_HISTORY_SAVE_PATH', ['SAVE_PATH'])
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
| 801 | Python | .py | 24 | 29.083333 | 81 | 0.684416 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,139 | 13a58bd5311f_1_2_2.py | demigody_nas-tools/scripts/versions/13a58bd5311f_1_2_2.py | """1.2.2
Revision ID: 13a58bd5311f
Revises: 69508d1aed24
Create Date: 2023-04-04 08:49:43.453901
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '13a58bd5311f'
down_revision = '69508d1aed24'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 1.2.2
try:
with op.batch_alter_table("RSS_TVS") as batch_op:
batch_op.add_column(sa.Column('FILTER_INCLUDE', sa.Text, nullable=True))
batch_op.add_column(sa.Column('FILTER_EXCLUDE', sa.Text, nullable=True))
except Exception as e:
pass
try:
with op.batch_alter_table("RSS_MOVIES") as batch_op:
batch_op.add_column(sa.Column('FILTER_INCLUDE', sa.Text, nullable=True))
batch_op.add_column(sa.Column('FILTER_EXCLUDE', sa.Text, nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
| 1,020 | Python | .py | 30 | 29 | 84 | 0.672098 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,140 | 6abeaa9ece15_1_2_0.py.py | demigody_nas-tools/scripts/versions/6abeaa9ece15_1_2_0.py.py | """1.2.0
Revision ID: 6abeaa9ece15
Revises: None
Create Date: 2023-03-15 10:07:19.965255
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6abeaa9ece15'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 1.0.0
op.execute('DROP TABLE IF EXISTS IGNORED_WORDS')
op.execute('DROP TABLE IF EXISTS REPLACED_WORDS')
op.execute('DROP TABLE IF EXISTS OFFSET_WORDS')
try:
with op.batch_alter_table("CUSTOM_WORDS") as batch_op:
batch_op.alter_column('OFFSET', type_=sa.Text, existing_type=sa.Integer)
except Exception as e:
pass
# 1.0.1
try:
with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
except Exception as e:
pass
# 1.0.2
try:
with op.batch_alter_table("RSS_MOVIES") as batch_op:
batch_op.add_column(sa.Column('RSS_SITES', sa.Text))
batch_op.add_column(sa.Column('SEARCH_SITES', sa.Text))
batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer))
batch_op.add_column(sa.Column('FILTER_RESTYPE', sa.Text))
batch_op.add_column(sa.Column('FILTER_PIX', sa.Text))
batch_op.add_column(sa.Column('FILTER_RULE', sa.Integer))
batch_op.add_column(sa.Column('FILTER_TEAM', sa.Text))
batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
batch_op.add_column(sa.Column('FUZZY_MATCH', sa.Integer))
batch_op.add_column(sa.Column('NOTE', sa.Text))
except Exception as e:
pass
try:
with op.batch_alter_table("RSS_TVS") as batch_op:
batch_op.add_column(sa.Column('RSS_SITES', sa.Text))
batch_op.add_column(sa.Column('SEARCH_SITES', sa.Text))
batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer))
batch_op.add_column(sa.Column('FILTER_RESTYPE', sa.Text))
batch_op.add_column(sa.Column('FILTER_PIX', sa.Text))
batch_op.add_column(sa.Column('FILTER_RULE', sa.Integer))
batch_op.add_column(sa.Column('FILTER_TEAM', sa.Text))
batch_op.add_column(sa.Column('SAVE_PATH', sa.Text))
batch_op.add_column(sa.Column('DOWNLOAD_SETTING', sa.Integer))
batch_op.add_column(sa.Column('FUZZY_MATCH', sa.Integer))
batch_op.add_column(sa.Column('TOTAL_EP', sa.Integer))
batch_op.add_column(sa.Column('CURRENT_EP', sa.Integer))
batch_op.add_column(sa.Column('NOTE', sa.Text))
except Exception as e:
pass
# 1.0.3
try:
with op.batch_alter_table("TRANSFER_HISTORY") as batch_op:
batch_op.alter_column('FILE_PATH', new_column_name="SOURCE_PATH", existing_type=sa.Text)
batch_op.alter_column('FILE_NAME', new_column_name="SOURCE_FILENAME", existing_type=sa.Text)
batch_op.alter_column('SE', new_column_name="SEASON_EPISODE", existing_type=sa.Text)
batch_op.add_column(sa.Column('TMDBID', sa.Integer))
batch_op.add_column(sa.Column('DEST_PATH', sa.Text))
batch_op.add_column(sa.Column('DEST_FILENAME', sa.Text))
except Exception as e:
pass
try:
with op.batch_alter_table("DOWNLOAD_SETTING") as batch_op:
batch_op.add_column(sa.Column('DOWNLOADER', sa.Text))
except Exception as e:
pass
# 1.0.7
try:
with op.batch_alter_table("TRANSFER_UNKNOWN") as batch_op:
batch_op.add_column(sa.Column('MODE', sa.Text, nullable=True))
except Exception as e:
pass
# 1.0.8
try:
with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
batch_op.add_column(sa.Column('RECOGNIZATION', sa.Text, nullable=True))
batch_op.add_column(sa.Column('MEDIAINFOS', sa.Text, nullable=True))
except Exception as e:
pass
# 1.0.9
try:
with op.batch_alter_table("SITE_USER_INFO_STATS") as batch_op:
batch_op.drop_column('FAVICON')
except Exception as e:
pass
try:
with op.batch_alter_table("DOUBAN_MEDIAS") as batch_op:
batch_op.add_column(sa.Column('ADD_TIME', sa.Text, nullable=True))
except Exception as e:
pass
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.add_column(sa.Column('SENDMESSAGE', sa.Text, nullable=True))
except Exception as e:
pass
# 1.0.10
try:
with op.batch_alter_table("RSS_MOVIES") as batch_op:
batch_op.add_column(sa.Column('FILTER_ORDER', sa.Integer, nullable=True))
except Exception as e:
pass
try:
with op.batch_alter_table("RSS_TVS") as batch_op:
batch_op.add_column(sa.Column('FILTER_ORDER', sa.Integer, nullable=True))
except Exception as e:
pass
# 1.0.11
try:
with op.batch_alter_table("RSS_MOVIES") as batch_op:
batch_op.add_column(sa.Column('KEYWORD', sa.Text, nullable=True))
except Exception as e:
pass
try:
with op.batch_alter_table("RSS_TVS") as batch_op:
batch_op.add_column(sa.Column('KEYWORD', sa.Text, nullable=True))
except Exception as e:
pass
# 1.0.12
try:
with op.batch_alter_table("CONFIG_USER_RSS") as batch_op:
batch_op.add_column(sa.Column('OVER_EDITION', sa.Integer, nullable=True))
batch_op.add_column(sa.Column('SITES', sa.Text, nullable=True))
batch_op.add_column(sa.Column('FILTER_ARGS', sa.Text, nullable=True))
except Exception as e:
pass
# 1.1.1
try:
with op.batch_alter_table("DOWNLOAD_HISTORY") as batch_op:
batch_op.add_column(sa.Column('DOWNLOADER', sa.Text))
batch_op.add_column(sa.Column('DOWNLOAD_ID', sa.Text))
except Exception as e:
pass
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.add_column(sa.Column('LABEL', sa.Text))
except Exception as e:
pass
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.alter_column('DOWNLOAD_COUNT', type_=sa.Integer, existing_type=sa.Text)
batch_op.alter_column('REMOVE_COUNT', type_=sa.Integer, existing_type=sa.Text)
batch_op.alter_column('DOWNLOAD_SIZE', type_=sa.Integer, existing_type=sa.Text)
batch_op.alter_column('UPLOAD_SIZE', type_=sa.Integer, existing_type=sa.Text)
except Exception as e:
pass
# 1.1.2
try:
with op.batch_alter_table("DOWNLOADER") as batch_op:
batch_op.add_column(sa.Column('MATCH_PATH', sa.Integer))
except Exception as e:
pass
def downgrade() -> None:
pass
| 7,031 | Python | .py | 166 | 34.042169 | 104 | 0.625893 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,141 | 532e45e74cc0_3_3_17.py | demigody_nas-tools/scripts/versions/532e45e74cc0_3_3_17.py | """3.3.18
Revision ID: 532e45e74cc0
Revises: eb3437042cc8
Create Date: 2024-04-15 05:13:43.753754
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '532e45e74cc0'
down_revision = 'eb3437042cc8'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('CONFIG_SITE') as batch_op:
batch_op.add_column(sa.Column('APIKEY', sa.TEXT(), nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 743 | Python | .py | 24 | 27.291667 | 78 | 0.683544 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,142 | 702b7666a634_1_2_5.py | demigody_nas-tools/scripts/versions/702b7666a634_1_2_5.py | """1.2.5
Revision ID: 702b7666a634
Revises: ae61cfa6ada6
Create Date: 2023-04-14 13:08:44.689878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '702b7666a634'
down_revision = 'ae61cfa6ada6'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table("DOWNLOAD_SETTING") as batch_op:
batch_op.drop_column('CONTENT_LAYOUT')
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 719 | Python | .py | 24 | 26.291667 | 66 | 0.687045 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,143 | a19a48dbb41b_1_2_7.py | demigody_nas-tools/scripts/versions/a19a48dbb41b_1_2_7.py | """1.2.7
Revision ID: a19a48dbb41b
Revises: d68a85a8f10d
Create Date: 2023-05-09 14:44:03.251571
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a19a48dbb41b'
down_revision = 'd68a85a8f10d'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
op.create_index(op.f('ix_TRANSFER_HISTORY_DATE'), 'TRANSFER_HISTORY', ['DATE'], unique=False)
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
| 602 | Python | .py | 21 | 25.571429 | 101 | 0.705061 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,144 | d68a85a8f10d_1_2_6.py | demigody_nas-tools/scripts/versions/d68a85a8f10d_1_2_6.py | """1.2.6
Revision ID: d68a85a8f10d
Revises: 702b7666a634
Create Date: 2023-04-16 14:03:56.871650
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd68a85a8f10d'
down_revision = '702b7666a634'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table("CONFIG_SYNC_PATHS") as batch_op:
batch_op.add_column(sa.Column('COMPATIBILITY', sa.Integer, nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 756 | Python | .py | 24 | 27.833333 | 86 | 0.690608 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,145 | 7c14267ffbe4_1_2_8.py | demigody_nas-tools/scripts/versions/7c14267ffbe4_1_2_8.py | """1.2.8
Revision ID: 7c14267ffbe4
Revises: a19a48dbb41b
Create Date: 2023-05-13 11:42:58.215215
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7c14267ffbe4'
down_revision = 'a19a48dbb41b'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.add_column(sa.Column('UP_LIMIT', sa.Text, nullable=True))
batch_op.add_column(sa.Column('DL_LIMIT', sa.Text, nullable=True))
except Exception as e:
pass
def downgrade() -> None:
pass
| 689 | Python | .py | 22 | 27.409091 | 78 | 0.698027 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,146 | 1f5cc26cdd3d_1_2_3.py | demigody_nas-tools/scripts/versions/1f5cc26cdd3d_1_2_3.py | """1.2.3
Revision ID: 1f5cc26cdd3d
Revises: 13a58bd5311f
Create Date: 2023-04-07 08:23:05.282129
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1f5cc26cdd3d'
down_revision = '13a58bd5311f'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 1.2.2
try:
with op.batch_alter_table("SITE_BRUSH_TASK") as batch_op:
batch_op.add_column(sa.Column('SAVEPATH', sa.Text, nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
| 657 | Python | .py | 23 | 24.956522 | 78 | 0.691693 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,147 | eb3437042cc8_1_3_1.py | demigody_nas-tools/scripts/versions/eb3437042cc8_1_3_1.py | """1.3.1
Revision ID: eb3437042cc8
Revises: ff1b04a637f8
Create Date: 2023-11-22 17:07:42.765426
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eb3437042cc8'
down_revision = 'ff1b04a637f8'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('DOWNLOADER') as batch_op:
batch_op.add_column(sa.Column('ONLY_NASTOOL', sa.Integer(), nullable=True))
except Exception as e:
pass
try:
with op.batch_alter_table('TORRENT_REMOVE_TASK') as batch_op:
batch_op.add_column(sa.Column('ONLY_NASTOOL', sa.Integer(), nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 957 | Python | .py | 29 | 28.551724 | 87 | 0.675 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,148 | ff1b04a637f8_1_3_0.py | demigody_nas-tools/scripts/versions/ff1b04a637f8_1_3_0.py | """1.3.0
Revision ID: ff1b04a637f8
Revises: 7c14267ffbe4
Create Date: 2023-09-17 09:35:42.773359
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ff1b04a637f8'
down_revision = '7c14267ffbe4'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table('CONFIG_SYNC_PATHS') as batch_op:
batch_op.add_column(sa.Column('LOCATING', sa.Integer(), nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 753 | Python | .py | 24 | 27.708333 | 83 | 0.686546 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,149 | trayicon.py | demigody_nas-tools/package/trayicon.py | import os
import sys
import webbrowser
import wx
import wx.adv
class Balloon(wx.adv.TaskBarIcon):
ICON = os.path.dirname(__file__).replace("package", "") + "nas-tools.ico"
def __init__(self, homepage, log_path):
wx.adv.TaskBarIcon.__init__(self)
self.SetIcon(wx.Icon(self.ICON))
self.Bind(wx.adv.EVT_TASKBAR_LEFT_DCLICK, self.OnTaskBarLeftDClick)
self.homepage = homepage
self.log_path = log_path
# Menu数据
def setMenuItemData(self):
return ("Log", self.Onlog), ("Close", self.OnClose)
# 创建菜单
def CreatePopupMenu(self):
menu = wx.Menu()
for itemName, itemHandler in self.setMenuItemData():
if not itemName: # itemName为空就添加分隔符
menu.AppendSeparator()
continue
menuItem = wx.MenuItem(None, wx.ID_ANY, text=itemName, kind=wx.ITEM_NORMAL) # 创建菜单项
menu.Append(menuItem) # 将菜单项添加到菜单
self.Bind(wx.EVT_MENU, itemHandler, menuItem)
return menu
def OnTaskBarLeftDClick(self, event):
webbrowser.open(self.homepage)
def Onlog(self, event):
os.startfile(self.log_path)
@staticmethod
def OnClose(event):
exe_name = os.path.basename(sys.executable)
os.system('taskkill /F /IM ' + exe_name)
class TrayIcon(wx.Frame):
def __init__(self, homepage, log_path):
app = wx.App()
wx.Frame.__init__(self, None)
self.taskBarIcon = Balloon(homepage, log_path)
webbrowser.open(homepage)
self.Hide()
app.MainLoop()
class NullWriter:
softspace = 0
encoding = 'UTF-8'
def write(*args):
pass
def flush(*args):
pass
# Some packages are checking if stdout/stderr is available (e.g., youtube-dl). For details, see #1883.
def isatty(self):
return False
| 1,914 | Python | .py | 53 | 27.641509 | 106 | 0.631638 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,150 | _pypan115.py | demigody_nas-tools/app/downloader/client/_pypan115.py | # -*- coding: utf-8 -*-
import re
import time
from urllib import parse
import requests
import log
from app.utils import RequestUtils, ExceptionUtils
class PyPan115:
cookie = None
user_agent = None
req = None
space_info = None
err = None
def __init__(self, cookie):
self.cookie = cookie
self.req = RequestUtils(cookies=self.cookie, session=requests.Session())
# 登录
def login(self):
if not self.getSpaceInfo():
return False
return True
# 获取space info
def getSpaceInfo(self):
try:
self.space_info = {}
url = "https://webapi.115.com/files/index_info"
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取 SpaceInfo 错误:{}".format(rootobject.get("error"))
return False
self.space_info = rootobject.get('data', {}).get('space_info', {})
all_total = self.space_info.get('all_total', {}).get('size_format', '未知')
all_remain = self.space_info.get('all_remain', {}).get('size_format', '未知')
all_use = self.space_info.get('all_use', {}).get('size_format', '未知')
log.info(f"115空间统计: [总计可用]: {all_total} | [当前剩余]: {all_remain} | [已使用]: {all_use}")
return True
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False
# 获取目录ID
def getdirid(self, tdir):
try:
url = "https://webapi.115.com/files/getid?path=" + parse.quote(tdir or '/')
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取目录 [{}]ID 错误:{}".format(tdir, rootobject["error"])
return False, ''
return True, rootobject.get("id")
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, ''
# 获取任务列表
def gettasklist(self, page=1):
try:
tasks = []
url = "https://115.com/web/lixian/?ct=lixian&ac=task_lists"
while True:
postdata = "page={}".format(page)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取任务列表错误:{}".format(rootobject["error"])
return False, tasks
if rootobject.get("count") == 0:
break
tasks += rootobject.get("tasks") or []
if page >= rootobject.get("page_count"):
break
return True, tasks
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, []
# 添加任务
def addtask(self, tdir, content):
try:
ret, dirid = self.getdirid(tdir)
if not ret:
return False, ''
# 转换为磁力
if re.match("^https*://", content):
try:
p = self.req.get_res(url=content)
if p and p.headers.get("Location"):
content = p.headers.get("Location")
except Exception as result:
ExceptionUtils.exception_traceback(result)
content = str(result).replace("No connection adapters were found for '", "").replace("'", "")
url = "https://115.com/web/lixian/?ct=lixian&ac=add_task_urls"
postdata = "url[0]={}&savepath=&wp_path_id={}".format(parse.quote(content), dirid)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = rootobject.get("error")
return False, ''
return True, rootobject.get('result', [{}])[0].get('info_hash', '未知')
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, ''
# 删除任务
def deltask(self, thash):
try:
url = "https://115.com/web/lixian/?ct=lixian&ac=task_del"
postdata = "hash[0]={}".format(thash)
p = self.req.post_res(url=url, params=postdata.encode('utf-8'))
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = rootobject.get("error_msg")
return False
return True
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False
# 根据ID获取文件夹路径
def getiddir(self, tid):
try:
path = '/'
url = "https://aps.115.com/natsort/files.php?aid=1&cid={}&o=file_name&asc=1&offset=0&show_dir=1&limit=40&code=&scid=&snap=0&natsort=1&record_open_time=1&source=&format=json&fc_mix=0&type=&star=&is_share=&suffix=&custom_order=0".format(
tid)
p = self.req.get_res(url=url)
if p:
rootobject = p.json()
if not rootobject.get("state"):
self.err = "获取 ID[{}]路径 错误:{}".format(id, rootobject["error"])
return False, path
patharray = rootobject["path"]
for pathobject in patharray:
if pathobject.get("cid") == 0:
continue
path += pathobject.get("name") + '/'
if path == "/":
self.err = "文件路径不存在"
return False, path
return True, path
except Exception as result:
ExceptionUtils.exception_traceback(result)
self.err = "异常错误:{}".format(result)
return False, '/' | 6,562 | Python | .pyp | 149 | 29.067114 | 247 | 0.512762 | demigody/nas-tools | 8 | 1 | 0 | AGPL-3.0 | 9/5/2024, 10:48:34 PM (Europe/Amsterdam) |
2,289,151 | user.cpython-310-darwin.so | demigody_nas-tools/web/backend/user.cpython-310-darwin.so | Êşº¾ @ ш @ · Ïúíş … ( __TEXT € € __text __TEXT À1 X™ À1 € __stubs __TEXT Ë 4 Ë € __stub_helper __TEXT LÍ ¾ LÍ € __const __TEXT Ñ Ÿ Ñ __cstring __TEXT %p n %p __unwind_info __TEXT ”~ ` ”~ x __DATA € À € À __nl_symbol_ptr __DATA € € ^ __got __DATA € ğ € _ __la_symbol_ptr __DATA ø€ ğ ø€ } __const __DATA ğƒ �j ğƒ __data __DATA €î ˜ €î __common __DATA __bss __DATA ( H __LINKEDIT @ À @ ˆ‘ " €0 @ F P pH x èQ H 0S * @I
Hˆ P ¬ ¬ ® | ĞE
Û f�&ÄÒø=¨ºhÓÚ·p$
* 8 d' /usr/lib/libSystem.B.dylib & |