nakas commited on
Commit
6a0da6e
·
1 Parent(s): b417793

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -72
app.py CHANGED
@@ -214,79 +214,102 @@ def toggle_audio_src(choice):
214
 
215
 
216
  def ui_full(launch_kwargs):
217
- interface = gr.Interface(
218
- fn=predict_full,
219
- inputs=[
220
- gr.Radio(["melody", "medium", "small", "large"], label="Model", default="melody"),
221
- gr.Text(label="Input Text"),
222
- gr.Audio(source="upload", type="numpy", label="File", interactive=True, elem_id="melody-input"),
223
- gr.Slider(minimum=1, maximum=120, default=10, label="Duration", step=1),
224
- gr.Number(label="Top-k", default=250),
225
- gr.Number(label="Top-p", default=0),
226
- gr.Number(label="Temperature", default=1.0),
227
- gr.Number(label="Classifier Free Guidance", default=3.0),
228
- ],
229
- outputs=[
230
- gr.Audio(type='filepath',label="Generated Music"),
231
- gr.Audio(type='filepath',label="Vocal Music"),
232
- gr.Audio(type='filepath',label="base Music"),
233
- gr.Audio(type='filepath',label="drum Music"),
234
- gr.Audio(type='filepath',label="other Music")
235
- #gr.outputs.Audio(type='filepath')
236
- ],
237
- title="MusicGen",
238
- description="This is your private demo for MusicGen, a simple and controllable model for music generation.",
239
- allow_flagging="never",
240
- layout="vertical",
241
- **launch_kwargs
242
- )
243
-
244
- interface.launch()
245
-
246
-
247
- if __name__ == "__main__":
248
- parser = argparse.ArgumentParser()
249
- parser.add_argument(
250
- '--listen',
251
- type=str,
252
- default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
253
- help='IP to listen on for connections to Gradio',
254
- )
255
- parser.add_argument(
256
- '--username', type=str, default='', help='Username for authentication'
257
- )
258
- parser.add_argument(
259
- '--password', type=str, default='', help='Password for authentication'
260
- )
261
- parser.add_argument(
262
- '--server_port',
263
- type=int,
264
- default=0,
265
- help='Port to run the server listener on',
266
- )
267
- parser.add_argument(
268
- '--inbrowser', action='store_true', help='Open in browser'
269
- )
270
- parser.add_argument(
271
- '--share', action='store_true', help='Share the gradio UI'
272
- )
273
-
274
- args = parser.parse_args()
275
-
276
- launch_kwargs = {}
277
- launch_kwargs['server_name'] = args.listen
278
-
279
- if args.username and args.password:
280
- launch_kwargs['auth'] = (args.username, args.password)
281
- if args.server_port:
282
- launch_kwargs['server_port'] = args.server_port
283
- if args.inbrowser:
284
- launch_kwargs['inbrowser'] = args.inbrowser
285
- if args.share:
286
- launch_kwargs['share'] = args.share
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
288
- # Show the interface
289
- ui_full(launch_kwargs)
290
 
291
 
292
  def ui_batched(launch_kwargs):
 
214
 
215
 
216
  def ui_full(launch_kwargs):
217
+ with gr.Blocks() as interface:
218
+ gr.Markdown(
219
+ """
220
+ # MusicGen
221
+ This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft),
222
+ a simple and controllable model for music generation
223
+ presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
224
+ """
225
+ )
226
+ with gr.Row():
227
+ with gr.Column():
228
+ with gr.Row():
229
+ text = gr.Text(label="Input Text", interactive=True)
230
+ with gr.Column():
231
+ radio = gr.Radio(["file", "mic"], value="file",
232
+ label="Condition on a melody (optional) File or Mic")
233
+ melody = gr.Audio(source="upload", type="numpy", label="File",
234
+ interactive=True, elem_id="melody-input")
235
+ with gr.Row():
236
+ submit = gr.Button("Submit")
237
+ # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
238
+ with gr.Row():
239
+ model = gr.Radio(["melody", "medium", "small", "large"],
240
+ label="Model", value="melody", interactive=True)
241
+ with gr.Row():
242
+ duration = gr.Slider(minimum=1, maximum=120, value=10, label="Duration", interactive=True)
243
+ with gr.Row():
244
+ topk = gr.Number(label="Top-k", value=250, interactive=True)
245
+ topp = gr.Number(label="Top-p", value=0, interactive=True)
246
+ temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
247
+ cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
248
+ with gr.Column():
249
+ output = [gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music"),gr.Audio(label="Generated Music")]
250
+ submit.click(predict_full,
251
+ inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef],
252
+ outputs=[output])
253
+ radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
254
+ gr.Examples(
255
+ fn=predict_full,
256
+ examples=[
257
+ [
258
+ "An 80s driving pop song with heavy drums and synth pads in the background",
259
+ "./assets/bach.mp3",
260
+ "melody"
261
+ ],
262
+ [
263
+ "A cheerful country song with acoustic guitars",
264
+ "./assets/bolero_ravel.mp3",
265
+ "melody"
266
+ ],
267
+ [
268
+ "90s rock song with electric guitar and heavy drums",
269
+ None,
270
+ "medium"
271
+ ],
272
+ [
273
+ "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
274
+ "./assets/bach.mp3",
275
+ "melody"
276
+ ],
277
+ [
278
+ "lofi slow bpm electro chill with organic samples",
279
+ None,
280
+ "medium",
281
+ ],
282
+ ],
283
+ inputs=[text, melody, model],
284
+ outputs=[output]
285
+ )
286
+ gr.Markdown(
287
+ """
288
+ ### More details
289
+ The model will generate a short music extract based on the description you provided.
290
+ The model can generate up to 30 seconds of audio in one pass. It is now possible
291
+ to extend the generation by feeding back the end of the previous chunk of audio.
292
+ This can take a long time, and the model might lose consistency. The model might also
293
+ decide at arbitrary positions that the song ends.
294
+ **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min).
295
+ An overlap of 12 seconds is kept with the previously generated chunk, and 18 "new" seconds
296
+ are generated each time.
297
+ We present 4 model variations:
298
+ 1. Melody -- a music generation model capable of generating music condition
299
+ on text and melody inputs. **Note**, you can also use text only.
300
+ 2. Small -- a 300M transformer decoder conditioned on text only.
301
+ 3. Medium -- a 1.5B transformer decoder conditioned on text only.
302
+ 4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
303
+ When using `melody`, ou can optionaly provide a reference audio from
304
+ which a broad melody will be extracted. The model will then try to follow both
305
+ the description and melody provided.
306
+ You can also use your own GPU or a Google Colab by following the instructions on our repo.
307
+ See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
308
+ for more details.
309
+ """
310
+ )
311
 
312
+ interface.queue().launch(**launch_kwargs)
 
313
 
314
 
315
  def ui_batched(launch_kwargs):