jhj0517 commited on
Commit
8c8001e
·
1 Parent(s): eab33e7

Add dedicated bgm separation app

Browse files
app.py CHANGED
@@ -343,6 +343,7 @@ class App:
343
  btn_openfolder.click(fn=lambda: self.open_folder(os.path.join(self.args.output_dir, "translations")),
344
  inputs=None,
345
  outputs=None)
 
346
  with gr.TabItem("BGM Separation"):
347
  files_audio = gr.Files(type="filepath", label="Upload Audio Files to separate background music")
348
  dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
@@ -351,21 +352,30 @@ class App:
351
  choices=self.whisper_inf.music_separator.available_models)
352
  nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
353
  cb_uvr_save_file = gr.Checkbox(label="Save separated files to output",
354
- value=uvr_params["save_file"])
355
  btn_run = gr.Button("SEPARATE BACKGROUND MUSIC", variant="primary")
356
- with gr.Row():
357
- with gr.Column(scale=8):
358
- ad_instrumental = gr.Audio(label="Instrumental")
359
- ad_vocals = gr.Audio(label="Vocals")
360
- with gr.Column(scale=1):
361
- btn_openfolder = gr.Button('📂', scale=1)
 
362
 
363
- btn_run.click(fn=self.whisper_inf.music_separator.separate,
364
- inputs=[files_audio, dd_uvr_device, dd_uvr_model_size, nb_uvr_segment_size, cb_uvr_save_file],
 
365
  outputs=[ad_instrumental, ad_vocals])
366
- btn_openfolder.click(inputs=None,
367
- outputs=None,
368
- fn=lambda: self.open_folder(os.path.join(self.args.output_dir, "uvr")))
 
 
 
 
 
 
 
369
 
370
  # Launch the app with optional gradio settings
371
  args = self.args
@@ -386,7 +396,8 @@ class App:
386
  if os.path.exists(folder_path):
387
  os.system(f"start {folder_path}")
388
  else:
389
- print(f"The folder {folder_path} does not exist.")
 
390
 
391
  @staticmethod
392
  def on_change_models(model_size: str):
 
343
  btn_openfolder.click(fn=lambda: self.open_folder(os.path.join(self.args.output_dir, "translations")),
344
  inputs=None,
345
  outputs=None)
346
+
347
  with gr.TabItem("BGM Separation"):
348
  files_audio = gr.Files(type="filepath", label="Upload Audio Files to separate background music")
349
  dd_uvr_device = gr.Dropdown(label="Device", value=self.whisper_inf.music_separator.device,
 
352
  choices=self.whisper_inf.music_separator.available_models)
353
  nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0)
354
  cb_uvr_save_file = gr.Checkbox(label="Save separated files to output",
355
+ value=True, visible=False)
356
  btn_run = gr.Button("SEPARATE BACKGROUND MUSIC", variant="primary")
357
+ with gr.Column():
358
+ with gr.Row():
359
+ ad_instrumental = gr.Audio(label="Instrumental", scale=8)
360
+ btn_open_instrumental_folder = gr.Button('📂', scale=1)
361
+ with gr.Row():
362
+ ad_vocals = gr.Audio(label="Vocals", scale=8)
363
+ btn_open_vocals_folder = gr.Button('📂', scale=1)
364
 
365
+ btn_run.click(fn=self.whisper_inf.music_separator.separate_files,
366
+ inputs=[files_audio, dd_uvr_model_size, dd_uvr_device, nb_uvr_segment_size,
367
+ cb_uvr_save_file],
368
  outputs=[ad_instrumental, ad_vocals])
369
+ btn_open_instrumental_folder.click(inputs=None,
370
+ outputs=None,
371
+ fn=lambda: self.open_folder(os.path.join(
372
+ self.args.output_dir, "UVR", "instrumental"
373
+ )))
374
+ btn_open_vocals_folder.click(inputs=None,
375
+ outputs=None,
376
+ fn=lambda: self.open_folder(os.path.join(
377
+ self.args.output_dir, "UVR", "vocals"
378
+ )))
379
 
380
  # Launch the app with optional gradio settings
381
  args = self.args
 
396
  if os.path.exists(folder_path):
397
  os.system(f"start {folder_path}")
398
  else:
399
+ os.makedirs(folder_path, exist_ok=True)
400
+ print(f"The directory path {folder_path} has newly created.")
401
 
402
  @staticmethod
403
  def on_change_models(model_size: str):
modules/uvr/music_separator.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Optional, Union
2
  import numpy as np
3
  import torchaudio
4
  import soundfile as sf
@@ -9,6 +9,8 @@ import gradio as gr
9
  from datetime import datetime
10
 
11
  from uvr.models import MDX, Demucs, VrNetwork, MDXC
 
 
12
 
13
 
14
  class MusicSeparator:
@@ -61,7 +63,7 @@ class MusicSeparator:
61
  device: Optional[str] = None,
62
  segment_size: int = 256,
63
  save_file: bool = False,
64
- progress: gr.Progress = gr.Progress()) -> tuple[np.ndarray, np.ndarray]:
65
  """
66
  Separate the background music from the audio.
67
 
@@ -74,7 +76,10 @@ class MusicSeparator:
74
  progress (gr.Progress): Gradio progress indicator.
75
 
76
  Returns:
77
- tuple[np.ndarray, np.ndarray]: Instrumental and vocals numpy arrays.
 
 
 
78
  """
79
  if isinstance(audio, str):
80
  self.audio_info = torchaudio.info(audio)
@@ -108,13 +113,37 @@ class MusicSeparator:
108
  result = self.model(audio)
109
  instrumental, vocals = result["instrumental"].T, result["vocals"].T
110
 
 
111
  if save_file:
112
  instrumental_output_path = os.path.join(self.output_dir, "instrumental", f"{output_filename}-instrumental{ext}")
113
  vocals_output_path = os.path.join(self.output_dir, "vocals", f"{output_filename}-vocals{ext}")
114
  sf.write(instrumental_output_path, instrumental, sample_rate, format="WAV")
115
  sf.write(vocals_output_path, vocals, sample_rate, format="WAV")
116
-
117
- return instrumental, vocals
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  @staticmethod
120
  def get_device():
@@ -130,3 +159,16 @@ class MusicSeparator:
130
  torch.cuda.empty_cache()
131
  gc.collect()
132
  self.audio_info = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union, List, Dict
2
  import numpy as np
3
  import torchaudio
4
  import soundfile as sf
 
9
  from datetime import datetime
10
 
11
  from uvr.models import MDX, Demucs, VrNetwork, MDXC
12
+ from modules.utils.paths import DEFAULT_PARAMETERS_CONFIG_PATH
13
+ from modules.utils.files_manager import load_yaml, save_yaml
14
 
15
 
16
  class MusicSeparator:
 
63
  device: Optional[str] = None,
64
  segment_size: int = 256,
65
  save_file: bool = False,
66
+ progress: gr.Progress = gr.Progress()) -> tuple[np.ndarray, np.ndarray, List]:
67
  """
68
  Separate the background music from the audio.
69
 
 
76
  progress (gr.Progress): Gradio progress indicator.
77
 
78
  Returns:
79
+ A Tuple of
80
+ np.ndarray: Instrumental numpy arrays.
81
+ np.ndarray: Vocals numpy arrays.
82
+ file_paths: List of file paths where the separated audio is saved. Return empty when save_file is False.
83
  """
84
  if isinstance(audio, str):
85
  self.audio_info = torchaudio.info(audio)
 
113
  result = self.model(audio)
114
  instrumental, vocals = result["instrumental"].T, result["vocals"].T
115
 
116
+ file_paths = []
117
  if save_file:
118
  instrumental_output_path = os.path.join(self.output_dir, "instrumental", f"{output_filename}-instrumental{ext}")
119
  vocals_output_path = os.path.join(self.output_dir, "vocals", f"{output_filename}-vocals{ext}")
120
  sf.write(instrumental_output_path, instrumental, sample_rate, format="WAV")
121
  sf.write(vocals_output_path, vocals, sample_rate, format="WAV")
122
+ file_paths += [instrumental_output_path, vocals_output_path]
123
+
124
+ return instrumental, vocals, file_paths
125
+
126
+ def separate_files(self,
127
+ files: List,
128
+ model_name: str,
129
+ device: Optional[str] = None,
130
+ segment_size: int = 256,
131
+ save_file: bool = True,
132
+ progress: gr.Progress = gr.Progress()) -> List[str]:
133
+ """Separate the background music from the audio files. Returns only last Instrumental and vocals file paths
134
+ to display into gr.Audio()"""
135
+ self.cache_parameters(model_size=model_name, segment_size=segment_size)
136
+
137
+ for file_path in files:
138
+ instrumental, vocals, file_paths = self.separate(
139
+ audio=file_path,
140
+ model_name=model_name,
141
+ device=device,
142
+ segment_size=segment_size,
143
+ save_file=save_file,
144
+ progress=progress
145
+ )
146
+ return file_paths
147
 
148
  @staticmethod
149
  def get_device():
 
159
  torch.cuda.empty_cache()
160
  gc.collect()
161
  self.audio_info = None
162
+
163
+ @staticmethod
164
+ def cache_parameters(model_size: str,
165
+ segment_size: int):
166
+ cached_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
167
+ cached_uvr_params = cached_params["bgm_separation"]
168
+ uvr_params_to_cache = {
169
+ "model_size": model_size,
170
+ "segment_size": segment_size
171
+ }
172
+ cached_uvr_params = {**cached_uvr_params, **uvr_params_to_cache}
173
+ cached_params = {**cached_params, **cached_uvr_params}
174
+ save_yaml(cached_params, DEFAULT_PARAMETERS_CONFIG_PATH)
modules/whisper/whisper_base.py CHANGED
@@ -111,7 +111,7 @@ class WhisperBase(ABC):
111
  params.lang = language_code_dict[params.lang]
112
 
113
  if params.is_bgm_separate:
114
- music, audio = self.music_separator.separate(
115
  audio=audio,
116
  model_name=params.uvr_model_size,
117
  device=params.uvr_device,
 
111
  params.lang = language_code_dict[params.lang]
112
 
113
  if params.is_bgm_separate:
114
+ music, audio, _ = self.music_separator.separate(
115
  audio=audio,
116
  model_name=params.uvr_model_size,
117
  device=params.uvr_device,