File size: 2,552 Bytes
75c6e9a
 
 
 
 
 
 
 
 
 
7145294
 
 
75c6e9a
 
 
 
 
 
 
6e1e24e
 
 
 
75c6e9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
os.system('pip install gradio==2.3.0a0')
os.system('pip freeze')
import sys
sys.path.append('.')
import gradio as gr
os.system('pip install -U torchtext==0.8.0')
#os.system('python setup.py install --install-dir .')
from scipy.io import wavfile

os.system('chmod a+x ./separate_scripts/*.sh')
os.system('chmod a+x ./scripts/*.sh')
os.system('chmod a+x ./scripts/*/*.sh')
os.system('./separate_scripts/download_checkpoints.sh')

def inference(audio):
    # read the file and get the sample rate and data
    rate, data = wavfile.read(audio.name) 
    
    # save the result
    filename = 'foo_left' + os.path.splitext(audio.name)[-1] # 构造文件名,使用原文件名的后缀
    wavfile.write(filename, rate, data)
    os.system(f"""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path={filename} --output_path=sep_vocals.mp3""")
    os.system(f"""python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/accompaniment-vocals,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_accompaniment_16.4dB_350k_steps.pth --audio_path={filename} --output_path=sep_accompaniment.mp3""")
    #os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
    #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
    #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
    return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
title = "Music Source Separation"
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Currently supports .wav files. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"

examples = [['example.wav']]
gr.Interface(
    inference, 
    gr.inputs.Audio(type="file", label="Input"), 
    [gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")],
    title=title,
    description=description,
    article=article,
    enable_queue=True,
    examples=examples
    ).launch(debug=True)