rogerxavier commited on
Commit
762894e
·
verified ·
1 Parent(s): 385567f

Upload 10 files

Browse files
getNode.py CHANGED
@@ -1,11 +1,10 @@
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
- import requests
4
  from server.decoder import decode_url_to_configs
5
- from server.utils import config,headers
6
  import random
7
  from fastapi import APIRouter,BackgroundTasks
8
- from randomSubscribeUrl import subscription_link_list,getRandomSubscribeUrl,delete_invalid_url_in_txt
9
 
10
 
11
  router = APIRouter()
@@ -22,24 +21,11 @@ def getNode(background_tasks: BackgroundTasks)->str:
22
  return NodeStr
23
 
24
  def RandomNode(NodeList:list)->str:
25
- #返回一个节点信息(优先trojan和vmess,因为ss存活辣鸡)
26
- # 检查列表中是否存在以'trojan'或'vmess'开头的项
27
- trojan_vmess_nodes = [node for node in NodeList if node.startswith('trojan') or node.startswith('vmess')]
28
-
29
- # 如果存在以'trojan'或'vmess'开头的项,则从这些项中随机选择一个返回
30
- if trojan_vmess_nodes:
31
- return random.choice(trojan_vmess_nodes)
32
- else:
33
- # 否则从整个列表中随机选择一个返回
34
- return random.choice(NodeList)
35
-
36
 
37
 
38
  def dump_configs(url:str)->list:
39
  #返回全部节点信息的列表
40
  configs = decode_url_to_configs(url)
41
- return configs
42
-
43
-
44
-
45
-
 
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
+
4
  from server.decoder import decode_url_to_configs
 
5
  import random
6
  from fastapi import APIRouter,BackgroundTasks
7
+ from server.utils import subscription_link_list,getRandomSubscribeUrl,delete_invalid_url_in_txt
8
 
9
 
10
  router = APIRouter()
 
21
  return NodeStr
22
 
23
  def RandomNode(NodeList:list)->str:
24
+ #返回一个节点信息
25
+ return random.choice(NodeList)
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def dump_configs(url:str)->list:
29
  #返回全部节点信息的列表
30
  configs = decode_url_to_configs(url)
31
+ return configs
 
 
 
 
getServerList.py CHANGED
@@ -3,7 +3,7 @@
3
  from server.decoder import decode_url_to_configs
4
  import base64
5
  from fastapi import APIRouter,BackgroundTasks
6
- from randomSubscribeUrl import subscription_link_list,getRandomSubscribeUrl
7
 
8
 
9
  router = APIRouter()
@@ -27,7 +27,6 @@ def getServerList(background_tasks: BackgroundTasks)->str:
27
 
28
  # 将所有节点拼接成一个字符串,每个节点之间用换行符分隔
29
  combined_links = "\n".join(all_node_lists)
30
- # 将字符串转换为字节串
31
  combined_links_bytes = combined_links.encode("utf-8")
32
  # 对字节串进行 Base64 编码
33
  encoded_links = base64.b64encode(combined_links_bytes)
 
3
  from server.decoder import decode_url_to_configs
4
  import base64
5
  from fastapi import APIRouter,BackgroundTasks
6
+ from server.utils import subscription_link_list
7
 
8
 
9
  router = APIRouter()
 
27
 
28
  # 将所有节点拼接成一个字符串,每个节点之间用换行符分隔
29
  combined_links = "\n".join(all_node_lists)
 
30
  combined_links_bytes = combined_links.encode("utf-8")
31
  # 对字节串进行 Base64 编码
32
  encoded_links = base64.b64encode(combined_links_bytes)
randomSubscribeUrl.py CHANGED
@@ -1,108 +1,33 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- import requests
4
- import random
5
- from fastapi import APIRouter, BackgroundTasks
6
-
7
-
8
-
9
-
10
- # 添加headers模拟浏览器访问(针对juzi等特殊订阅)
11
- headers = {
12
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
13
- 'accept-language': 'zh-CN,zh;q=0.9',
14
- 'cache-control': 'max-age=0',
15
- 'priority': 'u=0, i',
16
- 'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Google Chrome";v="128"',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'document',
20
- 'sec-fetch-mode': 'navigate',
21
- 'sec-fetch-site': 'none',
22
- 'sec-fetch-user': '?1',
23
- 'upgrade-insecure-requests': '1',
24
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'
25
- }
26
-
27
-
28
- def is_subscription_link_valid(subscribeUrl: str) -> bool:
29
- try:
30
- result = requests.get(subscribeUrl,headers=headers).text
31
- if "error" in result:
32
- return False
33
- return True
34
- except Exception as e:
35
- #这里如果只是请求超时那么认为正常返回true,比如juzi sub判断时候连接超时
36
- print('判断sub link是否有效时候发生错误:',e)
37
- return True
38
-
39
-
40
- def delete_invalid_url_in_txt(fileName: str) -> "void":
41
- valid_lines = []
42
- with open(fileName, "r") as file:
43
- lines = file.readlines()
44
- for line in lines:
45
- email = line.strip().split(",")[0]
46
- subscription_url = line.strip().split(",")[1]
47
- if is_subscription_link_valid(subscription_url):
48
- valid_lines.append(line)
49
- else:
50
- print(email + "订阅已经不可用")
51
- # 将有效行重新写回文件
52
- with open(fileName, "w") as file:
53
- file.writelines(valid_lines)
54
-
55
-
56
- def subscription_link_list(fileName: str) -> list:
57
- SubscribeUrlList = []
58
- with open(fileName, "r") as f:
59
- lines = f.readlines()
60
- for line in lines:
61
- subscription_url = line.strip().split(",")[1]
62
- SubscribeUrlList.append(subscription_url)
63
- return SubscribeUrlList
64
-
65
-
66
- def subscription_link_valid_list(SubscribeUrlList: list) -> list:
67
- valid_link_list = list(filter(lambda f: is_subscription_link_valid(f), SubscribeUrlList))
68
- ##返回可用订阅链接前对原始文件进行删除不可用链接操作:
69
- return valid_link_list
70
-
71
-
72
- def read_random_line(fileName: str) -> str:
73
- with open(fileName, "r") as file:
74
- lines = file.readlines()
75
- return random.choice(lines)
76
-
77
-
78
- def getRandomSubscribeUrl(validSubscribeUrlList: list) -> str:
79
- # 返回一个可用的订阅链接信息
80
- return random.choice(validSubscribeUrlList)
81
-
82
-
83
- router = APIRouter()
84
-
85
- #此接口目前唯一作用是方便调试查看订阅剩余状态
86
- @router.get('/')
87
- def returnRandomSubscribeUrl(background_tasks: BackgroundTasks) -> str:
88
- # 返回一个可用的订阅链接信息
89
- SubscribeUrlList = subscription_link_list(router.fileName)
90
- #直接用现成的认为可用然后直接返回,之后再删不可用的
91
- result = getRandomSubscribeUrl(SubscribeUrlList)
92
- background_tasks.add_task(delete_invalid_url_in_txt, router.fileName)
93
- return result
94
-
95
- # return hf_test()
96
-
97
-
98
-
99
-
100
- if __name__ == "__main__":
101
- print()
102
-
103
- ##思路是随机选取一行订阅检测,如果没问题就返回
104
- # 1订阅有问题,那么
105
- ##1->对全部订阅扫描,删除不可用的,然后重新随机一个返回,此返回时不用检测
106
- ##2->如果扫描后没有可用的,那么不做处理让vercel报错
107
-
108
  # 1订阅一个都没有,那么vercel错误提醒(不做处理考虑就行)
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ from fastapi import APIRouter, BackgroundTasks
4
+ from server.utils import *
5
+
6
+
7
+
8
+ router = APIRouter()
9
+
10
+ #此接口目前唯一作用是方便调试查看订阅剩余状态
11
+ @router.get('/')
12
+ def returnRandomSubscribeUrl(background_tasks: BackgroundTasks) -> str:
13
+ # 返回一个可用的订阅链接信息
14
+ SubscribeUrlList = subscription_link_list(router.fileName)
15
+ #直接用现成的认为可用然后直接返回,之后再删不可用的
16
+ result = getRandomSubscribeUrl(SubscribeUrlList)
17
+ background_tasks.add_task(delete_invalid_url_in_txt, router.fileName)
18
+ return result
19
+
20
+ # return hf_test()
21
+
22
+
23
+
24
+
25
+ if __name__ == "__main__":
26
+ print()
27
+
28
+ ##思路是随机选取一行订阅检测,如果没问题就返回
29
+ # 1订阅有问题,那么
30
+ ##1->对全部订阅扫描,删除不可用的,然后重新随机一个返回,此返回时不用检测
31
+ ##2->如果扫描后没有可用的,那么不做处理让vercel报错
32
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  # 1订阅一个都没有,那么vercel错误提醒(不做处理考虑就行)
server/decoder.py CHANGED
@@ -1,12 +1,13 @@
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
- import re
4
- import json
5
- from base64 import b64decode
6
  from typing import Dict, Iterator, List, Union
7
  from .resource import Resource
8
  from dataclasses import dataclass
9
  from urllib import parse
 
 
 
 
10
 
11
 
12
  class BaseDecoder:
@@ -30,23 +31,7 @@ class EncodedCfg:
30
  nameinfo: str = ""
31
 
32
 
33
- def decode_base64_with_filter(encoded_str):
34
- # 定义合法的 Base64 字符集(包括字母、数字、加号、斜杠和等号)
35
- base64_pattern = re.compile(r'[^A-Za-z0-9+/=]')
36
-
37
- # 过滤掉非 Base64 字符
38
- filtered_str = base64_pattern.sub('', encoded_str)
39
 
40
- # 确保字符串长度是4的倍数(Base64编码要求)
41
- while len(filtered_str) % 4:
42
- filtered_str += '='
43
-
44
- try:
45
- # 解码
46
- decoded_bytes = b64decode(filtered_str)
47
- return decoded_bytes.decode('utf-8')
48
- except Exception as e:
49
- return f"解码失败: {e}"
50
 
51
  class ListDecoder(BaseDecoder):
52
  def iter_encode_config(self) -> Iterator[EncodedCfg]:
@@ -65,20 +50,29 @@ class ListDecoder(BaseDecoder):
65
  # 使用正则表达式去除 type=... 参数,包括#前的部分
66
  # 这里我们匹配 &type= 后面跟着任意字符直到下一个 & 或 # 或字符串结束 ,去除type=tcp== 或者type=tcp= 这类影响链接的多余部分
67
  _encoded_config_str_without_type = re.sub(r'([&?]type=[^&#]*)', '',_encoded_config_str)
 
68
  if "allowInsecure" in config_str:
69
- yield _encoded_config_str_without_type.replace('allowInsecure=0', 'allowInsecure=1')+"#"+nameinfo
70
  else:
71
- yield _encoded_config_str_without_type+"&allowInsecure=1"+"#"+nameinfo
72
 
 
73
  if ("vmess" in config_str):
74
  vmess_decoded_data = decode_base64_with_filter(_config_str)
75
  vmess_info_json = json.loads(vmess_decoded_data)
76
  vmess_info_str = json.dumps(vmess_info_json,ensure_ascii=False)#不转译中文
77
  if ('倍率提示'not in vmess_info_str)and('导航' not in vmess_info_str) and('443' not in vmess_info_str):
78
- yield config_str.replace('allowInsecure=0', 'allowInsecure=1')
79
- if ("ss://" in config_str) and ("套餐" not in nameinfo) and('到期' not in nameinfo) and('流量' not in nameinfo) and('剩余' not in nameinfo) and ('专线' not in nameinfo):
 
 
 
 
 
 
 
80
  #shadowsocks节点保留
81
- yield config_str
82
 
83
  class ConfigDecoder(BaseDecoder):
84
  def get_json(self) -> Dict:
 
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
 
 
 
3
  from typing import Dict, Iterator, List, Union
4
  from .resource import Resource
5
  from dataclasses import dataclass
6
  from urllib import parse
7
+ import json,re
8
+ from base64 import b64decode,b64encode
9
+ from .utils import decode_base64_with_filter,truncate_encoded_string
10
+
11
 
12
 
13
  class BaseDecoder:
 
31
  nameinfo: str = ""
32
 
33
 
 
 
 
 
 
 
34
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  class ListDecoder(BaseDecoder):
37
  def iter_encode_config(self) -> Iterator[EncodedCfg]:
 
50
  # 使用正则表达式去除 type=... 参数,包括#前的部分
51
  # 这里我们匹配 &type= 后面跟着任意字符直到下一个 & 或 # 或字符串结束 ,去除type=tcp== 或者type=tcp= 这类影响链接的多余部分
52
  _encoded_config_str_without_type = re.sub(r'([&?]type=[^&#]*)', '',_encoded_config_str)
53
+
54
  if "allowInsecure" in config_str:
55
+ yield _encoded_config_str_without_type.replace('allowInsecure=0', 'allowInsecure=1')+"#"+truncate_encoded_string(nameinfo) # 适当缩短name
56
  else:
57
+ yield _encoded_config_str_without_type+"&allowInsecure=1"+"#"+truncate_encoded_string(nameinfo) # 适当缩短name
58
 
59
+ # vmess无法获取到nameinfo 因为在里面的ps这个key对于的value作为remark
60
  if ("vmess" in config_str):
61
  vmess_decoded_data = decode_base64_with_filter(_config_str)
62
  vmess_info_json = json.loads(vmess_decoded_data)
63
  vmess_info_str = json.dumps(vmess_info_json,ensure_ascii=False)#不转译中文
64
  if ('倍率提示'not in vmess_info_str)and('导航' not in vmess_info_str) and('443' not in vmess_info_str):
65
+ # 获取 ps 对应的remark值
66
+ ps_value = vmess_info_json.get('ps', '') # 如果 'ps' 键不存在,默认值为空字符串
67
+ # 如果 ps_value 不为空,则调用 truncate_encoded_string 函数并更新值。如果 ps_value 为空,则保持原值(空字符串)。
68
+ vmess_info_json['ps'] = truncate_encoded_string(ps_value) if ps_value else ps_value
69
+ vmess_node_return = 'vmess://'+b64encode(json.dumps(vmess_info_json,ensure_ascii=False).encode("utf-8")).decode("utf-8")
70
+ yield vmess_node_return
71
+
72
+ # 不能简单判断ss:// 因为vmess://也包含ss://
73
+ if (config_str.startswith("ss://")) and ("套餐" not in nameinfo) and('到期' not in nameinfo) and('流量' not in nameinfo) and('剩余' not in nameinfo) and ('专线' not in nameinfo):
74
  #shadowsocks节点保留
75
+ yield 'ss://'+_config_str+'#'+truncate_encoded_string(nameinfo)
76
 
77
  class ConfigDecoder(BaseDecoder):
78
  def get_json(self) -> Dict:
server/resource.py CHANGED
@@ -1,3 +1,4 @@
 
1
  #!/usr/bin/env python
2
  # -*- coding: utf-8 -*-
3
  import requests
@@ -8,24 +9,11 @@ class Resource:
8
  def __init__(self, url: str):
9
  self.url = url
10
  self.cutted_str: str
11
- self.cutted_str_save_file = 'subscribe_data_encoded.txt' # 添加一个文件路径存储前得到的 cutted_str
12
- self.previous_cutted_str:str
13
  self._get_resource()
14
 
15
  def _get_resource(self) -> None:
16
- try:
17
- request = requests.get(self.url, headers=headers)
18
- self.cutted_str = request.text
19
- with open(self.cutted_str_save_file,'w')as f:
20
- f.write(self.cutted_str)
21
- except Exception as e:
22
- with open(self.cutted_str_save_file,'r')as f:
23
- self.previous_cutted_str=f.read()
24
- print(f"请求失败,使用之前的cutted_str",self.previous_cutted_str)
25
-
26
 
27
  def get_encoded_data(self) -> str:
28
- try:
29
- return self.cutted_str + ((4 - len(self.cutted_str) % 4) * "=")
30
- except Exception as e:
31
- return self.previous_cutted_str + ((4 - len(self.previous_cutted_str) % 4) * "=")
 
1
+
2
  #!/usr/bin/env python
3
  # -*- coding: utf-8 -*-
4
  import requests
 
9
  def __init__(self, url: str):
10
  self.url = url
11
  self.cutted_str: str
 
 
12
  self._get_resource()
13
 
14
  def _get_resource(self) -> None:
15
+ request = requests.get(self.url,headers=headers)
16
+ self.cutted_str = request.text
 
 
 
 
 
 
 
 
17
 
18
  def get_encoded_data(self) -> str:
19
+ return self.cutted_str + ((4 - len(self.cutted_str) % 4) * "=")
 
 
 
server/utils.py CHANGED
@@ -1,7 +1,12 @@
1
  import json
2
  import os
3
- import hashlib
4
- import secrets
 
 
 
 
 
5
  # 获取当前文件所在的目录
6
  current_dir = os.path.dirname(os.path.realpath(__file__))
7
  # 读取配置文件
@@ -24,3 +29,101 @@ headers = {
24
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'
25
  }
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
2
  import os
3
+ import random
4
+ import re
5
+ import urllib.parse
6
+ from base64 import b64decode
7
+
8
+ import requests
9
+
10
  # 获取当前文件所在的目录
11
  current_dir = os.path.dirname(os.path.realpath(__file__))
12
  # 读取配置文件
 
29
  'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'
30
  }
31
 
32
+ def decode_base64_with_filter(encoded_str):
33
+ # 定义合法的 Base64 字符集(包括字母、数字、加号、斜杠和等号)
34
+ base64_pattern = re.compile(r'[^A-Za-z0-9+/=]')
35
+ # 过滤掉非 Base64 字符
36
+ filtered_str = base64_pattern.sub('', encoded_str)
37
+ # 确保字符串长度是4的倍数(Base64编码要求)
38
+ while len(filtered_str) % 4:
39
+ filtered_str += '='
40
+ try:
41
+ # 解码
42
+ decoded_bytes = b64decode(filtered_str)
43
+ return decoded_bytes.decode('utf-8')
44
+ except Exception as e:
45
+ return f"解码失败: {e}"
46
+
47
+
48
+ def truncate_encoded_string(encoded_string, max_length=8):
49
+ """
50
+ 解码 URL 编码的字符串,根据指定的最大长度截断,并将处理后的字符串重新编码为原始的 URL 编码格式。
51
+ 如果解码后的字符串长度超过最大长度,则保留前 max_length 个字符,并在后面加上 '...'。
52
+
53
+ 参数:
54
+ encoded_string (str): URL 编码的字符串。
55
+ max_length (int): 最大保留长度,默认为 12。
56
+
57
+ 返回:
58
+ tuple: (解码前的字符串, 处理后的字符串)
59
+ """
60
+ # 解码字符串
61
+ decoded_string = urllib.parse.unquote(encoded_string)
62
+
63
+ # 判断解码后字符串的长度
64
+ if len(decoded_string) > max_length:
65
+ # 如果长度大于 max_length,保留前 max_length 位,后面用'...'代替
66
+ truncated_string = decoded_string[:max_length] + "..."
67
+ else:
68
+ truncated_string = decoded_string
69
+
70
+ # 将处理后的字符串重新编码为 URL 编码格式
71
+ processed_encoded_string = urllib.parse.quote(truncated_string)
72
+
73
+ # 返回解码前的字符串和处理后的字符串(已重新编码)
74
+ return processed_encoded_string
75
+
76
+ def subscription_link_list(fileName: str) -> list:
77
+ SubscribeUrlList = []
78
+ with open(fileName, "r") as f:
79
+ lines = f.readlines()
80
+ for line in lines:
81
+ subscription_url = line.strip().split(",")[1]
82
+ SubscribeUrlList.append(subscription_url)
83
+ return SubscribeUrlList
84
+
85
+
86
+
87
+ ## 作用不大的部分
88
+ def is_subscription_link_valid(subscribeUrl: str) -> bool:
89
+ try:
90
+ result = requests.get(subscribeUrl,headers=headers).text
91
+ if "error" in result:
92
+ return False
93
+ return True
94
+ except Exception as e:
95
+ #这里如果只是请求超时那么认为正常返回true,比如juzi sub判断时候连接超时
96
+ print('判断sub link是否有效时候发生错误:',e)
97
+ return True
98
+
99
+ def delete_invalid_url_in_txt(fileName: str) -> "void":
100
+ valid_lines = []
101
+ with open(fileName, "r") as file:
102
+ lines = file.readlines()
103
+ for line in lines:
104
+ email = line.strip().split(",")[0]
105
+ subscription_url = line.strip().split(",")[1]
106
+ if is_subscription_link_valid(subscription_url):
107
+ valid_lines.append(line)
108
+ else:
109
+ print(email + "订阅已经不可用")
110
+ # 将有效行重新写回文件
111
+ with open(fileName, "w") as file:
112
+ file.writelines(valid_lines)
113
+
114
+
115
+ def subscription_link_valid_list(SubscribeUrlList: list) -> list:
116
+ valid_link_list = list(filter(lambda f: is_subscription_link_valid(f), SubscribeUrlList))
117
+ ##返回可用订阅链接前对原始文件进行删除不可用链接操作:
118
+ return valid_link_list
119
+
120
+ def read_random_line(fileName: str) -> str:
121
+ with open(fileName, "r") as file:
122
+ lines = file.readlines()
123
+ return random.choice(lines)
124
+
125
+
126
+ def getRandomSubscribeUrl(validSubscribeUrlList: list) -> str:
127
+ # 返回一个可用的订阅链接信息
128
+ return random.choice(validSubscribeUrlList)
129
+ ## 作用不大的部分
server/uu.json CHANGED
@@ -1,6 +1,7 @@
1
- {
2
- "baseUrl":"https://rogerxavier-v2b-proxy.hf.space",
3
- "purchaseLinkBase":"https://afdian.com/order/create?product_type=1",
4
- "planID": "ef3c43c6daa411ee965e5254001e7c00",
5
- "skuID": "ef440afcdaa411ee9e165254001e7c00"
6
- }
 
 
1
+
2
+ {
3
+ "baseUrl":"https://rogerxavier-v2b-proxy.hf.space",
4
+ "purchaseLinkBase":"https://afdian.com/order/create?product_type=1",
5
+ "planID": "ef3c43c6daa411ee965e5254001e7c00",
6
+ "skuID": "ef440afcdaa411ee9e165254001e7c00"
7
+ }
subscribeLink.txt CHANGED
@@ -1 +1,2 @@
1
- 335,https://sublink.cute-cloud.de/link?token=176ba293ad7f4e3c8ebd53cb6d7eadbc
 
 
1
+ 335,https://sublink.cute-cloud.de/link?token=176ba293ad7f4e3c8ebd53cb6d7eadbc
2
+ jms from git,https://jmssub.net/members/getsub.php?service=131783&id=a35aa5ea-5893-41bc-86c9-5d283bd9cd68&usedomains=1
validSubCount.py CHANGED
@@ -4,7 +4,6 @@ import requests
4
  from fastapi import APIRouter,BackgroundTasks
5
 
6
 
7
-
8
  def ValidSubCount(fileName:str)->int:
9
  with open(fileName, "r") as f:
10
  lines = f.readlines()
 
4
  from fastapi import APIRouter,BackgroundTasks
5
 
6
 
 
7
  def ValidSubCount(fileName:str)->int:
8
  with open(fileName, "r") as f:
9
  lines = f.readlines()