Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -214,79 +214,102 @@ def toggle_audio_src(choice):
|
|
214 |
|
215 |
|
216 |
def ui_full(launch_kwargs):
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
gr.
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
287 |
|
288 |
-
|
289 |
-
ui_full(launch_kwargs)
|
290 |
|
291 |
|
292 |
def ui_batched(launch_kwargs):
|
|
|
214 |
|
215 |
|
216 |
def ui_full(launch_kwargs):
|
217 |
+
with gr.Blocks() as interface:
|
218 |
+
gr.Markdown(
|
219 |
+
"""
|
220 |
+
# MusicGen
|
221 |
+
This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
|
222 |
+
a simple and controllable model for music generation
|
223 |
+
presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
|
224 |
+
"""
|
225 |
+
)
|
226 |
+
with gr.Row():
|
227 |
+
with gr.Column():
|
228 |
+
with gr.Row():
|
229 |
+
text = gr.Text(label="Input Text", interactive=True)
|
230 |
+
with gr.Column():
|
231 |
+
radio = gr.Radio(["file", "mic"], value="file",
|
232 |
+
label="Condition on a melody (optional) File or Mic")
|
233 |
+
melody = gr.Audio(source="upload", type="numpy", label="File",
|
234 |
+
interactive=True, elem_id="melody-input")
|
235 |
+
with gr.Row():
|
236 |
+
submit = gr.Button("Submit")
|
237 |
+
# Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
|
238 |
+
with gr.Row():
|
239 |
+
model = gr.Radio(["melody", "medium", "small", "large"],
|
240 |
+
label="Model", value="melody", interactive=True)
|
241 |
+
with gr.Row():
|
242 |
+
duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
|
243 |
+
with gr.Row():
|
244 |
+
topk = gr.Number(label="Top-k", value=250, interactive=True)
|
245 |
+
topp = gr.Number(label="Top-p", value=0, interactive=True)
|
246 |
+
temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
|
247 |
+
cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
|
248 |
+
with gr.Column():
|
249 |
+
output = [gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music")]
|
250 |
+
submit.click(predict_full,
|
251 |
+
inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef],
|
252 |
+
outputs=[output])
|
253 |
+
radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
|
254 |
+
gr.Examples(
|
255 |
+
fn=predict_full,
|
256 |
+
examples=[
|
257 |
+
[
|
258 |
+
"An 80s driving pop song with heavy drums and synth pads in the background",
|
259 |
+
"./assets/bach.mp3",
|
260 |
+
"melody"
|
261 |
+
],
|
262 |
+
[
|
263 |
+
"A cheerful country song with acoustic guitars",
|
264 |
+
"./assets/bolero_ravel.mp3",
|
265 |
+
"melody"
|
266 |
+
],
|
267 |
+
[
|
268 |
+
"90s rock song with electric guitar and heavy drums",
|
269 |
+
None,
|
270 |
+
"medium"
|
271 |
+
],
|
272 |
+
[
|
273 |
+
"a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
|
274 |
+
"./assets/bach.mp3",
|
275 |
+
"melody"
|
276 |
+
],
|
277 |
+
[
|
278 |
+
"lofi slow bpm electro chill with organic samples",
|
279 |
+
None,
|
280 |
+
"medium",
|
281 |
+
],
|
282 |
+
],
|
283 |
+
inputs=[text, melody, model],
|
284 |
+
outputs=[output]
|
285 |
+
)
|
286 |
+
gr.Markdown(
|
287 |
+
"""
|
288 |
+
### More details
|
289 |
+
The model will generate a short music extract based on the description you provided.
|
290 |
+
The model can generate up to 30 seconds of audio in one pass. It is now possible
|
291 |
+
to extend the generation by feeding back the end of the previous chunk of audio.
|
292 |
+
This can take a long time, and the model might lose consistency. The model might also
|
293 |
+
decide at arbitrary positions that the song ends.
|
294 |
+
**WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
|
295 |
+
An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
|
296 |
+
are generated each time.
|
297 |
+
We present 4 model variations:
|
298 |
+
1. Melody -- a music generation model capable of generating music condition
|
299 |
+
on text and melody inputs. **Note**, you can also use text only.
|
300 |
+
2. Small -- a 300M transformer decoder conditioned on text only.
|
301 |
+
3. Medium -- a 1.5B transformer decoder conditioned on text only.
|
302 |
+
4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
|
303 |
+
When using `melody`, ou can optionaly provide a reference audio from
|
304 |
+
which a broad melody will be extracted. The model will then try to follow both
|
305 |
+
the description and melody provided.
|
306 |
+
You can also use your own GPU or a Google Colab by following the instructions on our repo.
|
307 |
+
See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
|
308 |
+
for more details.
|
309 |
+
"""
|
310 |
+
)
|
311 |
|
312 |
+
interface.queue().launch(**launch_kwargs)
|
|
|
313 |
|
314 |
|
315 |
def ui_batched(launch_kwargs):
|