import json import os import random import re import urllib.parse from base64 import b64decode import requests # 获取当前文件所在的目录 current_dir = os.path.dirname(os.path.realpath(__file__)) # 读取配置文件 with open(os.path.join(current_dir, 'uu.json'), 'r') as f: config = json.load(f) headers = { 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'max-age=0', 'priority': 'u=0, i', 'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Google Chrome";v="128"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'none', 'sec-fetch-user': '?1', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36' } def decode_base64_with_filter(encoded_str): # 定义合法的 Base64 字符集(包括字母、数字、加号、斜杠和等号) base64_pattern = re.compile(r'[^A-Za-z0-9+/=]') # 过滤掉非 Base64 字符 filtered_str = base64_pattern.sub('', encoded_str) # 确保字符串长度是4的倍数(Base64编码要求) while len(filtered_str) % 4: filtered_str += '=' try: # 解码 decoded_bytes = b64decode(filtered_str) return decoded_bytes.decode('utf-8') except Exception as e: return f"解码失败: {e}" def truncate_encoded_string(encoded_string, max_length=8): """ 解码 URL 编码的字符串,根据指定的最大长度截断,并将处理后的字符串重新编码为原始的 URL 编码格式。 如果解码后的字符串长度超过最大长度,则保留前 max_length 个字符,并在后面加上 '...'。 参数: encoded_string (str): URL 编码的字符串。 max_length (int): 最大保留长度,默认为 12。 返回: tuple: (解码前的字符串, 处理后的字符串) """ # 解码字符串 decoded_string = urllib.parse.unquote(encoded_string) # 判断解码后字符串的长度 if len(decoded_string) > max_length: # 如果长度大于 max_length,保留前 max_length 位,后面用'...'代替 truncated_string = decoded_string[:max_length] + "..." else: truncated_string = decoded_string # 将处理后的字符串重新编码为 URL 编码格式 processed_encoded_string = urllib.parse.quote(truncated_string) # 返回解码前的字符串和处理后的字符串(已重新编码) return processed_encoded_string def subscription_link_list(fileName: str) -> list: SubscribeUrlList = [] with open(fileName, "r") as f: lines = f.readlines() for line in lines: subscription_url = line.strip().split(",")[1] SubscribeUrlList.append(subscription_url) return SubscribeUrlList ## 作用不大的部分 def is_subscription_link_valid(subscribeUrl: str) -> bool: try: result = requests.get(subscribeUrl,headers=headers).text if "error" in result: return False return True except Exception as e: #这里如果只是请求超时那么认为正常返回true,比如juzi sub判断时候连接超时 print('判断sub link是否有效时候发生错误:',e) return True def delete_invalid_url_in_txt(fileName: str) -> "void": valid_lines = [] with open(fileName, "r") as file: lines = file.readlines() for line in lines: email = line.strip().split(",")[0] subscription_url = line.strip().split(",")[1] if is_subscription_link_valid(subscription_url): valid_lines.append(line) else: print(email + "订阅已经不可用") # 将有效行重新写回文件 with open(fileName, "w") as file: file.writelines(valid_lines) def subscription_link_valid_list(SubscribeUrlList: list) -> list: valid_link_list = list(filter(lambda f: is_subscription_link_valid(f), SubscribeUrlList)) ##返回可用订阅链接前对原始文件进行删除不可用链接操作: return valid_link_list def read_random_line(fileName: str) -> str: with open(fileName, "r") as file: lines = file.readlines() return random.choice(lines) def getRandomSubscribeUrl(validSubscribeUrlList: list) -> str: # 返回一个可用的订阅链接信息 return random.choice(validSubscribeUrlList) ## 作用不大的部分