Spaces:
Sleeping
Sleeping
File size: 9,097 Bytes
925d97e 4fb477f 925d97e 9ffcc96 4fb477f 9ffcc96 4fb477f cc3a828 9ffcc96 a87192b 46a6192 925d97e a87192b 925d97e 3961eb2 925d97e 46a6192 925d97e 46a6192 3961eb2 925d97e 9ffcc96 4dc2207 2361d39 4dc2207 925d97e 2361d39 925d97e 2361d39 f21dafa 925d97e 330ae58 925d97e f21dafa 925d97e 3aecab1 925d97e b175d46 925d97e 2361d39 925d97e 2361d39 925d97e e821c96 925d97e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
import zipfile
import hashlib
from utils.model import model_downloader, get_model
import requests
import json
import torch
import os
import re
import urllib.request
from inference import Inference
import gradio as gr
from constants import VOICE_METHODS, BARK_VOICES, EDGE_VOICES, zips_folder, unzips_folder
from tts.conversion import tts_infer, ELEVENLABS_VOICES_RAW, ELEVENLABS_VOICES_NAMES
api_url = "https://rvc-models-api.onrender.com/uploadfile/"
if not os.path.exists(zips_folder):
os.mkdir(zips_folder)
if not os.path.exists(unzips_folder):
os.mkdir(unzips_folder)
def get_info(path):
path = os.path.join(unzips_folder, path)
try:
a = torch.load(path, map_location="cpu")
return a
except Exception as e:
print("*****************eeeeeeeeeeeeeeeeeeeerrrrrrrrrrrrrrrrrr*****")
print(e)
return {
}
def calculate_md5(file_path):
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def compress(modelname, files):
file_path = os.path.join(zips_folder, f"{modelname}.zip")
# Select the compression mode ZIP_DEFLATED for compression
# or zipfile.ZIP_STORED to just store the file
compression = zipfile.ZIP_DEFLATED
# Comprueba si el archivo ZIP ya existe
if not os.path.exists(file_path):
# Si no existe, crea el archivo ZIP
with zipfile.ZipFile(file_path, mode="w") as zf:
try:
for file in files:
if file:
# Agrega el archivo al archivo ZIP
zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
except FileNotFoundError as fnf:
print("An error occurred", fnf)
else:
# Si el archivo ZIP ya existe, agrega los archivos a un archivo ZIP existente
with zipfile.ZipFile(file_path, mode="a") as zf:
try:
for file in files:
if file:
# Agrega el archivo al archivo ZIP
zf.write(unzips_folder if ".index" in file else os.path.join(unzips_folder, file), compress_type=compression)
except FileNotFoundError as fnf:
print("An error occurred", fnf)
return file_path
# download the model only
def get_username(url):
match_username = re.search(r'models/(.*?)/', url)
if match_username:
result = match_username.group(1)
return result
def get_username_hf(url):
match_username = re.search(r'huggingface.co/(.*?)/', url)
if match_username:
result = match_username.group(1)
return result
pattern_zip = r"/([^/]+)\.zip$"
def get_file_name(url):
match = re.search(pattern_zip, url)
if match:
extracted_string = match.group(1)
return extracted_string
else:
raise Exception("没有找到AI歌手模型的zip压缩包。")
def download_online_model(url):
url = url.strip()
if url.startswith('https://download.openxlab.org.cn/models/'):
zip_path = get_username(url) + "-" + get_file_name(url)
elif url.startswith('https://huggingface.co/'):
zip_path = get_username_hf(url) + "-" + get_file_name(url)
else:
zip_path = get_file_name(url)
if not os.path.exists(zip_path + ".zip"):
print("P.S. AI歌手模型还未下载")
try:
zip_name = url.split('/')[-1]
if 'pixeldrain.com' in url:
url = f'https://pixeldrain.com/api/file/{zip_name}'
urllib.request.urlretrieve(url, zip_path + ".zip")
#return f'[√] {dir_name} Model successfully downloaded!'
except Exception as e:
raise Exception(str(e))
else:
print("P.S. AI歌手模型之前已经下载")
return zip_path + ".zip"
def infer(model, f0_method, audio_file, index_rate, vc_transform0, protect0, resample_sr1, filter_radius1):
if not model:
return "No model url specified, please specify a model url.", None
if not audio_file:
return "No audio file specified, please load an audio file.", None
inference = Inference(
model_name=model,
f0_method=f0_method,
source_audio_path=audio_file,
feature_ratio=index_rate,
transposition=vc_transform0,
protection_amnt=protect0,
resample=resample_sr1,
harvest_median_filter=filter_radius1,
output_file_name=os.path.join("./audio-outputs", os.path.basename(audio_file))
)
output = inference.run()
if 'success' in output and output['success']:
print("Inferencia realizada exitosamente...")
return output, output['file']
else:
print("Fallo en la inferencia...", output)
return output, None
def post_model(name, model_url, version, creator):
modelname = model_downloader(model_url, zips_folder, unzips_folder)
print(f"下载的模型.zip文件名:{modelname}")
if not modelname:
return "抱歉!无法从您提供的链接下载模型,请尝试其他链接或稍后再试。"
model_files = get_model(unzips_folder, modelname)
if not model_files:
return "抱歉!无法找到模型文件,请检查链接对应的模型文件并稍后重试。"
if not model_files.get('pth'):
return "抱歉!无法找到.pth模型文件,请检查链接对应的模型文件并稍后重试。"
else:
print("已找到.pth模型文件")
if not model_files.get('index'):
return "抱歉!无法找到.index模型文件,请检查链接对应的模型文件并稍后重试。"
else:
print("已找到.index模型文件")
md5_hash = calculate_md5(os.path.join(unzips_folder,model_files['pth']))
zipfile = download_online_model(model_url) #compress(modelname, list(model_files.values()))
print(f"已打包模型文件:{model_files.values()}")
a = get_info(model_files.get('pth'))
file_to_upload = open(zipfile, "rb")
info = a.get("info", "None"),
sr = a.get("sr", "None"),
f0 = a.get("f0", "None"),
print(f"正在上传的模型:{file_to_upload}")
data = {
"name": name,
"version": version,
"creator": creator,
"hash": md5_hash,
"info": info,
"sr": sr,
"f0": f0
}
print("正在上传模型文件...")
# Realizar la solicitud POST
response = requests.post(api_url, files={"file": file_to_upload}, data=data)
result = response.json()
# Comprobar la respuesta
if response.status_code == 200:
result = response.json()
return json.dumps(result, indent=4)
else:
print("加载模型文件时出错:", response.status_code)
return result
def search_model(name):
web_service_url = "https://script.google.com/macros/s/AKfycbyRaNxtcuN8CxUrcA_nHW6Sq9G2QJor8Z2-BJUGnQ2F_CB8klF4kQL--U2r2MhLFZ5J/exec"
response = requests.post(web_service_url, json={
'type': 'search_by_filename',
'name': name
})
result = []
response.raise_for_status() # Lanza una excepción en caso de error
json_response = response.json()
cont = 0
result.append("""| AI歌手名 | 模型下载链接(可直接复制到AI翻唱页面使用) | 训练步数 Epoch | 模型采样率 |
| ---------------- | -------------- |:------:|:-----------:|
""")
yield "<br />".join(result)
if json_response.get('ok', None):
for model in json_response['ocurrences']:
if cont < 20:
model_name = str(model.get('name', 'N/A')).strip()
model_url = model.get('url', 'N/A')
epoch = model.get('epoch', 'N/A')
sr = model.get('sr', 'N/A')
line = f"""|{model_name}|<a>{model_url}</a>|{epoch}|{sr}|
"""
result.append(line)
yield "".join(result)
cont += 1
def update_tts_methods_voice(select_value):
if select_value == "Edge-tts":
return gr.Dropdown.update(choices=EDGE_VOICES, visible=True, value="es-CO-GonzaloNeural-Male"), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
elif select_value == "Bark-tts":
return gr.Dropdown.update(choices=BARK_VOICES, visible=True), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
elif select_value == 'ElevenLabs':
return gr.Dropdown.update(choices=ELEVENLABS_VOICES_NAMES, visible=True, value="Bella"), gr.Markdown.update(visible=True), gr.Textbox.update(visible=True), gr.Radio.update(visible=False)
elif select_value == 'CoquiTTS':
return gr.Dropdown.update(visible=False), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False), gr.Radio.update(visible=True)
|