Pecorized commited on
Commit
00aa15b
1 Parent(s): 79d830c

updated image loading

Browse files
Files changed (1) hide show
  1. app.py +82 -13
app.py CHANGED
@@ -60,6 +60,78 @@
60
 
61
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  import os
64
  import gradio as gr
65
  from scipy.io.wavfile import write
@@ -70,17 +142,15 @@ import torch
70
  from audio_separator import Separator
71
 
72
  def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals):
 
 
 
 
73
  os.makedirs("out", exist_ok=True)
74
  audio_path = 'test.wav'
75
  write(audio_path, audio[0], audio[1])
76
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
77
  print(f"Using device: {device}")
78
- if device=='cuda':
79
- use_cuda=True
80
- print(f"Using device: {device}")
81
- else:
82
- use_cuda=False
83
- print(f"Using device: {device}")
84
 
85
  try:
86
  command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
@@ -88,14 +158,14 @@ def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, bac
88
  print("Demucs script output:", process.stdout.decode())
89
  except subprocess.CalledProcessError as e:
90
  print("Error in Demucs script:", e.stderr.decode())
91
- return [gr.Audio(visible=False)] * 8
92
 
93
  try:
94
- separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=device==use_cuda, output_format='wav')
95
  primary_stem_path, secondary_stem_path = separator.separate()
96
  except Exception as e:
97
  print("Error in custom separation:", str(e))
98
- return [gr.Audio(visible=False)] * 8
99
 
100
  stem_paths = {
101
  "vocals": "./out/htdemucs_6s/test/vocals.wav" if vocals else None,
@@ -108,7 +178,8 @@ def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, bac
108
  "backing_vocals": secondary_stem_path if backing_vocals else None
109
  }
110
 
111
- return [gr.Audio(stem_paths[stem], visible=bool(stem_paths[stem])) for stem in stem_paths]
 
112
 
113
  # Define checkboxes for each stem
114
  checkbox_labels = ["Full Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]
@@ -120,11 +191,9 @@ description = "Music Source Separation in the Waveform Domain. Upload your audio
120
  iface = gr.Interface(
121
  inference,
122
  [gr.components.Audio(type="numpy", label="Input")] + checkboxes,
123
- [gr.Audio(label=label, visible=False) for label in checkbox_labels],
124
  title=title,
125
  description=description,
126
-
127
  )
128
 
129
  iface.launch()
130
-
 
60
 
61
 
62
 
63
+ # import os
64
+ # import gradio as gr
65
+ # from scipy.io.wavfile import write
66
+ # import subprocess
67
+ # import torch
68
+
69
+ # # Assuming audio_separator is available in your environment
70
+ # from audio_separator import Separator
71
+
72
+ # def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals):
73
+ # os.makedirs("out", exist_ok=True)
74
+ # audio_path = 'test.wav'
75
+ # write(audio_path, audio[0], audio[1])
76
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
77
+ # print(f"Using device: {device}")
78
+ # if device=='cuda':
79
+ # use_cuda=True
80
+ # print(f"Using device: {device}")
81
+ # else:
82
+ # use_cuda=False
83
+ # print(f"Using device: {device}")
84
+
85
+ # try:
86
+ # command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
87
+ # process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
88
+ # print("Demucs script output:", process.stdout.decode())
89
+ # except subprocess.CalledProcessError as e:
90
+ # print("Error in Demucs script:", e.stderr.decode())
91
+ # return [gr.Audio(visible=False)] * 8
92
+
93
+ # try:
94
+ # separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=device==use_cuda, output_format='wav')
95
+ # primary_stem_path, secondary_stem_path = separator.separate()
96
+ # except Exception as e:
97
+ # print("Error in custom separation:", str(e))
98
+ # return [gr.Audio(visible=False)] * 8
99
+
100
+ # stem_paths = {
101
+ # "vocals": "./out/htdemucs_6s/test/vocals.wav" if vocals else None,
102
+ # "bass": "./out/htdemucs_6s/test/bass.wav" if bass else None,
103
+ # "drums": "./out/htdemucs_6s/test/drums.wav" if drums else None,
104
+ # "other": "./out/htdemucs_6s/test/other.wav" if other else None,
105
+ # "piano": "./out/htdemucs_6s/test/piano.wav" if piano else None,
106
+ # "guitar": "./out/htdemucs_6s/test/guitar.wav" if guitar else None,
107
+ # "lead_vocals": primary_stem_path if lead_vocals else None,
108
+ # "backing_vocals": secondary_stem_path if backing_vocals else None
109
+ # }
110
+
111
+ # return [gr.Audio(stem_paths[stem], visible=bool(stem_paths[stem])) for stem in stem_paths]
112
+
113
+ # # Define checkboxes for each stem
114
+ # checkbox_labels = ["Full Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]
115
+ # checkboxes = [gr.components.Checkbox(label=label) for label in checkbox_labels]
116
+
117
+ # # Gradio Interface
118
+ # title = "Source Separation Demo"
119
+ # description = "Music Source Separation in the Waveform Domain. Upload your audio to begin."
120
+ # iface = gr.Interface(
121
+ # inference,
122
+ # [gr.components.Audio(type="numpy", label="Input")] + checkboxes,
123
+ # [gr.Audio(label=label, visible=False) for label in checkbox_labels],
124
+ # title=title,
125
+ # description=description,
126
+
127
+ # )
128
+
129
+ # iface.launch()
130
+
131
+
132
+
133
+
134
+
135
  import os
136
  import gradio as gr
137
  from scipy.io.wavfile import write
 
142
  from audio_separator import Separator
143
 
144
  def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals):
145
+ # Initially, show the loading GIF
146
+ loading_gif_path = "7RwF.gif"
147
+ transparent_img_path = "images.png"
148
+
149
  os.makedirs("out", exist_ok=True)
150
  audio_path = 'test.wav'
151
  write(audio_path, audio[0], audio[1])
152
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
153
  print(f"Using device: {device}")
 
 
 
 
 
 
154
 
155
  try:
156
  command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out"
 
158
  print("Demucs script output:", process.stdout.decode())
159
  except subprocess.CalledProcessError as e:
160
  print("Error in Demucs script:", e.stderr.decode())
161
+ return [gr.Audio(visible=False)] * 8 + [loading_gif_path]
162
 
163
  try:
164
+ separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=device=='cuda', output_format='wav')
165
  primary_stem_path, secondary_stem_path = separator.separate()
166
  except Exception as e:
167
  print("Error in custom separation:", str(e))
168
+ return [gr.Audio(visible=False)] * 8 + [loading_gif_path]
169
 
170
  stem_paths = {
171
  "vocals": "./out/htdemucs_6s/test/vocals.wav" if vocals else None,
 
178
  "backing_vocals": secondary_stem_path if backing_vocals else None
179
  }
180
 
181
+ # Once processing is done, hide the GIF by returning a transparent image
182
+ return [gr.Audio(stem_paths[stem], visible=bool(stem_paths[stem])) for stem in stem_paths] + [transparent_img_path]
183
 
184
  # Define checkboxes for each stem
185
  checkbox_labels = ["Full Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]
 
191
  iface = gr.Interface(
192
  inference,
193
  [gr.components.Audio(type="numpy", label="Input")] + checkboxes,
194
+ [gr.Audio(label=label, visible=False) for label in checkbox_labels] + [gr.Image()],
195
  title=title,
196
  description=description,
 
197
  )
198
 
199
  iface.launch()