Sayoyo commited on
Commit
04f880c
·
1 Parent(s): b030337

[feat] replace placeholder by value

Browse files
Files changed (1) hide show
  1. ui/components.py +6 -4
ui/components.py CHANGED
@@ -2,8 +2,8 @@ import gradio as gr
2
  import librosa
3
 
4
 
5
- TAG_PLACEHOLDER = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
6
- LYRIC_PLACEHOLDER = """[verse]
7
  Neon lights they flicker bright
8
  City hums in dead of night
9
  Rhythms pulse through concrete veins
@@ -41,6 +41,8 @@ In this moment we take flight
41
  """
42
 
43
 
 
 
44
  def create_output_ui(task_name="Text2Music"):
45
  # For many consumer-grade GPU devices, only one batch can be run
46
  output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
@@ -69,8 +71,8 @@ def create_text2music_ui(
69
  audio_duration = gr.Slider(-1, 240.0, step=0.00001, value=-1, label="Audio Duration", interactive=True, info="-1 means random duration (30 ~ 240).", scale=9)
70
  sample_bnt = gr.Button("Sample", variant="primary", scale=1)
71
 
72
- prompt = gr.Textbox(lines=2, label="Tags", max_lines=4, placeholder=TAG_PLACEHOLDER, info="Support tags, descriptions, and scene. Use commas to separate different tags.\ntags and lyrics examples are from ai music generation community")
73
- lyrics = gr.Textbox(lines=9, label="Lyrics", max_lines=13, placeholder=LYRIC_PLACEHOLDER, info="Support lyric structure tags like [verse], [chorus], and [bridge] to separate different parts of the lyrics.\nUse [instrumental] or [inst] to generate instrumental music. Not support genre structure tag in lyrics")
74
 
75
  with gr.Accordion("Basic Settings", open=False):
76
  infer_step = gr.Slider(minimum=1, maximum=1000, step=1, value=27, label="Infer Steps", interactive=True)
 
2
  import librosa
3
 
4
 
5
+ TAG_DEFAULT = "funk, pop, soul, rock, melodic, guitar, drums, bass, keyboard, percussion, 105 BPM, energetic, upbeat, groovy, vibrant, dynamic"
6
+ LYRIC_DEFAULT = """[verse]
7
  Neon lights they flicker bright
8
  City hums in dead of night
9
  Rhythms pulse through concrete veins
 
41
  """
42
 
43
 
44
+
45
+
46
  def create_output_ui(task_name="Text2Music"):
47
  # For many consumer-grade GPU devices, only one batch can be run
48
  output_audio1 = gr.Audio(type="filepath", label=f"{task_name} Generated Audio 1")
 
71
  audio_duration = gr.Slider(-1, 240.0, step=0.00001, value=-1, label="Audio Duration", interactive=True, info="-1 means random duration (30 ~ 240).", scale=9)
72
  sample_bnt = gr.Button("Sample", variant="primary", scale=1)
73
 
74
+ prompt = gr.Textbox(lines=2, label="Tags", max_lines=4, value=TAG_DEFAULT, info="Support tags, descriptions, and scene. Use commas to separate different tags.\ntags and lyrics examples are from ai music generation community")
75
+ lyrics = gr.Textbox(lines=9, label="Lyrics", max_lines=13, value=LYRIC_DEFAULT, info="Support lyric structure tags like [verse], [chorus], and [bridge] to separate different parts of the lyrics.\nUse [instrumental] or [inst] to generate instrumental music. Not support genre structure tag in lyrics")
76
 
77
  with gr.Accordion("Basic Settings", open=False):
78
  infer_step = gr.Slider(minimum=1, maximum=1000, step=1, value=27, label="Infer Steps", interactive=True)