File size: 8,769 Bytes
1e4a2ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import os
import re
import sys
import json
import codecs
import shutil
import yt_dlp
import warnings
import requests
from bs4 import BeautifulSoup
sys.path.append(os.getcwd())
from main.tools import huggingface, gdown, meganz, mediafire, pixeldrain
from main.app.core.ui import gr_info, gr_warning, gr_error, process_output
from main.app.variables import logger, translations, model_options, configs
from main.app.core.process import move_files_from_directory, fetch_pretrained_data, extract_name_model
def download_url(url):
if not url: return gr_warning(translations["provide_url"])
if not os.path.exists(configs["audios_path"]): os.makedirs(configs["audios_path"], exist_ok=True)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
ydl_opts = {
"format": "bestaudio/best",
"postprocessors": [{
"key": "FFmpegExtractAudio",
"preferredcodec": "wav",
"preferredquality": "192"
}],
"quiet": True,
"no_warnings": True,
"noplaylist": True,
"verbose": False
}
gr_info(translations["start"].format(start=translations["download_music"]))
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
audio_output = os.path.join(configs["audios_path"], re.sub(r'\s+', '-', re.sub(r'[^\w\s\u4e00-\u9fff\uac00-\ud7af\u0400-\u04FF\u1100-\u11FF]', '', ydl.extract_info(url, download=False).get('title', 'video')).strip()))
if os.path.exists(audio_output): shutil.rmtree(audio_output, ignore_errors=True)
ydl_opts['outtmpl'] = audio_output
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
audio_output = process_output(audio_output + ".wav")
ydl.download([url])
gr_info(translations["success"])
return [audio_output, audio_output, translations["success"]]
def move_file(file, download_dir, model):
weights_dir = configs["weights_path"]
logs_dir = configs["logs_path"]
if not os.path.exists(weights_dir): os.makedirs(weights_dir, exist_ok=True)
if not os.path.exists(logs_dir): os.makedirs(logs_dir, exist_ok=True)
if file.endswith(".zip"): shutil.unpack_archive(file, download_dir)
move_files_from_directory(download_dir, weights_dir, logs_dir, model)
def download_model(url=None, model=None):
if not url: return gr_warning(translations["provide_url"])
url = url.replace("/blob/", "/resolve/").replace("?download=true", "").strip()
download_dir = "download_model"
os.makedirs(download_dir, exist_ok=True)
try:
gr_info(translations["start"].format(start=translations["download"]))
if "huggingface.co" in url: file = huggingface.HF_download_file(url, download_dir)
elif "google.com" in url: file = gdown.gdown_download(url, download_dir)
elif "mediafire.com" in url: file = mediafire.Mediafire_Download(url, download_dir)
elif "pixeldrain.com" in url: file = pixeldrain.pixeldrain(url, download_dir)
elif "mega.nz" in url: file = meganz.mega_download_url(url, download_dir)
else:
gr_warning(translations["not_support_url"])
return translations["not_support_url"]
if not model:
modelname = os.path.basename(file)
model = extract_name_model(modelname) if modelname.endswith(".index") else os.path.splitext(modelname)[0]
if model is None: model = os.path.splitext(modelname)[0]
model = model.replace(".onnx", "").replace(".pth", "").replace(".index", "").replace(".zip", "").replace(" ", "_").replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("{", "").replace("}", "").replace(",", "").replace('"', "").replace("'", "").replace("|", "").strip()
move_file(file, download_dir, model)
gr_info(translations["success"])
return translations["success"]
except Exception as e:
gr_error(message=translations["error_occurred"].format(e=e))
return translations["error_occurred"].format(e=e)
finally:
shutil.rmtree(download_dir, ignore_errors=True)
def download_pretrained_model(choices, model, sample_rate):
pretraineds_custom_path = configs["pretrained_custom_path"]
if choices == translations["list_model"]:
paths = fetch_pretrained_data()[model][sample_rate]
if not os.path.exists(pretraineds_custom_path): os.makedirs(pretraineds_custom_path, exist_ok=True)
url = codecs.decode("uggcf://uhttvatsnpr.pb/NauC/Ivrganzrfr-EIP-Cebwrpg/erfbyir/znva/cergenvarq_phfgbz/", "rot13") + paths
gr_info(translations["download_pretrain"])
file = huggingface.HF_download_file(url.replace("/blob/", "/resolve/").replace("?download=true", "").strip(), os.path.join(pretraineds_custom_path, paths))
if file.endswith(".zip"):
shutil.unpack_archive(file, pretraineds_custom_path)
os.remove(file)
gr_info(translations["success"])
return translations["success"], None
elif choices == translations["download_url"]:
if not model: return gr_warning(translations["provide_pretrain"].format(dg="D"))
if not sample_rate: return gr_warning(translations["provide_pretrain"].format(dg="G"))
gr_info(translations["download_pretrain"])
for url in [model, sample_rate]:
url = url.replace("/blob/", "/resolve/").replace("?download=true", "").strip()
if "huggingface.co" in url: huggingface.HF_download_file(url, pretraineds_custom_path)
elif "google.com" in url: gdown.gdown_download(url, pretraineds_custom_path)
elif "mediafire.com" in url: mediafire.Mediafire_Download(url, pretraineds_custom_path)
elif "pixeldrain.com" in url: pixeldrain.pixeldrain(url, pretraineds_custom_path)
elif "mega.nz" in url: meganz.mega_download_url(url, pretraineds_custom_path)
else:
gr_warning(translations["not_support_url"])
return translations["not_support_url"], translations["not_support_url"]
gr_info(translations["success"])
return translations["success"], translations["success"]
def fetch_models_data(search):
all_table_data = []
page = 1
while 1:
try:
response = requests.post(url=codecs.decode("uggcf://ibvpr-zbqryf.pbz/srgpu_qngn.cuc", "rot13"), data={"page": page, "search": search})
if response.status_code == 200:
table_data = response.json().get("table", "")
if not table_data.strip(): break
all_table_data.append(table_data)
page += 1
else:
logger.debug(f"{translations['code_error']} {response.status_code}")
break
except json.JSONDecodeError:
logger.debug(translations["json_error"])
break
except requests.RequestException as e:
logger.debug(translations["requests_error"].format(e=e))
break
return all_table_data
def search_models(name):
if not name: return gr_warning(translations["provide_name"])
gr_info(translations["start"].format(start=translations["search"]))
tables = fetch_models_data(name)
if len(tables) == 0:
gr_info(translations["not_found"].format(name=name))
return [None]*2
else:
model_options.clear()
for table in tables:
for row in BeautifulSoup(table, "html.parser").select("tr"):
name_tag, url_tag = row.find("a", {"class": "fs-5"}), row.find("a", {"class": "btn btn-sm fw-bold btn-light ms-0 p-1 ps-2 pe-2"})
url = url_tag["href"].replace("https://easyaivoice.com/run?url=", "")
if "huggingface" in url:
if name_tag and url_tag: model_options[name_tag.text.replace(".onnx", "").replace(".pth", "").replace(".index", "").replace(".zip", "").replace(" ", "_").replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace(",", "").replace('"', "").replace("'", "").replace("|", "_").replace("-_-", "_").replace("_-_", "_").replace("-", "_").replace("---", "_").replace("___", "_").strip()] = url
gr_info(translations["found"].format(results=len(model_options)))
return [{"value": "", "choices": model_options, "interactive": True, "visible": True, "__type__": "update"}, {"value": translations["downloads"], "visible": True, "__type__": "update"}] |