next-playground commited on
Commit
a37cef9
·
verified ·
1 Parent(s): 44e25b0

Create webui.py

Browse files
Files changed (1) hide show
  1. webui.py +72 -0
webui.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
+ import os
4
+
5
+ def audio_model_inference(files, output_folder, model_path, denoise, margin, chunks, n_fft, dim_t, dim_f):
6
+ # 构建命令行调用字符串
7
+ cmd = f"separate.py {' '.join(files)}"
8
+ if output_folder:
9
+ cmd += f" -o {output_folder}"
10
+ if model_path:
11
+ cmd += f" -m {model_path}"
12
+ if denoise:
13
+ cmd += " -d"
14
+ if margin:
15
+ cmd += f" -M {margin}"
16
+ if chunks:
17
+ cmd += f" -c {chunks}"
18
+ if n_fft:
19
+ cmd += f" -F {n_fft}"
20
+ if dim_t:
21
+ cmd += f" -t {dim_t}"
22
+ if dim_f:
23
+ cmd += f" -f {dim_f}"
24
+
25
+ # 执行命令行调用
26
+ result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
27
+
28
+ # 检查命令是否成功执行
29
+ if result.returncode != 0:
30
+ return f"Error: {result.stderr}"
31
+
32
+ # 读取输出文件
33
+ vocals_file = f"{os.path.splitext(os.path.basename(files[0]))[0]}_vocals.wav"
34
+ no_vocals_file = f"{os.path.splitext(os.path.basename(files[0]))[0]}_no_vocals.wav"
35
+ vocals_path = os.path.join(output_folder, vocals_file)
36
+ no_vocals_path = os.path.join(output_folder, no_vocals_file)
37
+
38
+ # 确保文件存在
39
+ if not os.path.exists(vocals_path) or not os.path.exists(no_vocals_path):
40
+ return "Error: Output files not found."
41
+
42
+ # 读取音频文件
43
+ vocals_audio = open(vocals_path, 'rb').read()
44
+ no_vocals_audio = open(no_vocals_path, 'rb').read()
45
+
46
+ return (vocals_audio, no_vocals_audio)
47
+
48
+ # Gradio 界面组件
49
+ inputs = [
50
+ gr.inputs.File(label="Source Audio Files", type='file', file_count='multiple'),
51
+ gr.inputs.Textbox(label="Output Folder", default="output/"),
52
+ gr.inputs.Textbox(label="Model Path", default="model.onnx"),
53
+ gr.inputs.Checkbox(label="Enable Denoising", default=False),
54
+ gr.inputs.Number(label="Margin", default=0.1),
55
+ gr.inputs.Number(label="Chunk Size", default=1024),
56
+ gr.inputs.Number(label="FFT Size", default=2048),
57
+ gr.inputs.Number(label="Time Dimension", default=512),
58
+ gr.inputs.Number(label="Frequency Dimension", default=64)
59
+ ]
60
+
61
+ outputs = [gr.outputs.Audio(label="Vocals"), gr.outputs.Audio(label="No Vocals")]
62
+
63
+ # 创建界面
64
+ iface = gr.Interface(
65
+ fn=audio_model_inference,
66
+ inputs=inputs,
67
+ outputs=outputs,
68
+ title="Audio Separation Model",
69
+ description="Upload audio files and configure parameters to process them using the audio separation model."
70
+ )
71
+
72
+ iface.launch()