import gradio as gr import subprocess import os def audio_model_inference(files, output_folder, model_path, denoise, margin, chunks, n_fft, dim_t, dim_f): # 构建命令行调用字符串 cmd = f"separate.py {' '.join(files)}" if output_folder: cmd += f" -o {output_folder}" if model_path: cmd += f" -m {model_path}" if denoise: cmd += " -d" if margin: cmd += f" -M {margin}" if chunks: cmd += f" -c {chunks}" if n_fft: cmd += f" -F {n_fft}" if dim_t: cmd += f" -t {dim_t}" if dim_f: cmd += f" -f {dim_f}" # 执行命令行调用 result = subprocess.run(cmd, shell=True, capture_output=True, text=True) # 检查命令是否成功执行 if result.returncode != 0: return f"错误:{result.stderr}" # 读取输出文件 vocals_file = f"{os.path.splitext(os.path.basename(files[0]))[0]}_vocals.wav" no_vocals_file = f"{os.path.splitext(os.path.basename(files[0]))[0]}_no_vocals.wav" vocals_path = os.path.join(output_folder, vocals_file) no_vocals_path = os.path.join(output_folder, no_vocals_file) # 确保文件存在 if not os.path.exists(vocals_path) or not os.path.exists(no_vocals_path): return "错误:输出文件未找到。" # 读取音频文件 vocals_audio = open(vocals_path, 'rb').read() no_vocals_audio = open(no_vocals_path, 'rb').read() return (vocals_audio, no_vocals_audio) # Gradio 界面组件 inputs = [ gr.File(label="源音频文件", type='binary', file_count='multiple'), gr.Textbox(label="输出文件夹", default="./"), gr.Textbox(label="模型路径", default="./models/MDX_Net_Models/UVR-MDX-NET-Inst_HQ_3.onnx"), gr.Checkbox(label="启用降噪", default=False), gr.Number(label="边距", default=0.1), gr.Number(label="块大小", default=1024), gr.Number(label="FFT大小", default=2048), gr.Number(label="时间维度", default=512), gr.Number(label="频率维度", default=64) ] outputs = [gr.Audio(label="人声"), gr.Audio(label="无人声")] # 创建界面 iface = gr.Interface( fn=audio_model_inference, inputs=inputs, outputs=outputs, title="音频分离模型", description="上传音频文件并配置参数,使用音频分离模型处理它们。" ) iface.launch()