Spaces:
Running
on
T4
Running
on
T4
Add UI control for Model based on Melody selection
Browse files
app.py
CHANGED
@@ -102,13 +102,13 @@ def load_melody_filepath(melody_filepath, title):
|
|
102 |
def load_melody(melody, prompt_index):
|
103 |
# get melody length in number of segments and modify the UI
|
104 |
if melody is None:
|
105 |
-
return
|
106 |
sr, melody_data = melody[0], melody[1]
|
107 |
segment_samples = sr * 30
|
108 |
total_melodys = max(min((len(melody_data) // segment_samples) - 1, 25), 0)
|
109 |
print(f"Melody length: {len(melody_data)}, Melody segments: {total_melodys}\n")
|
110 |
MAX_PROMPT_INDEX = total_melodys
|
111 |
-
return gr.Slider.update(maximum=MAX_PROMPT_INDEX, value=0,
|
112 |
|
113 |
|
114 |
def predict(model, text, melody, melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap=1, prompt_index = 0, include_title = True, include_settings = True):
|
@@ -318,7 +318,7 @@ def ui(**kwargs):
|
|
318 |
output = gr.Video(label="Generated Music")
|
319 |
seed_used = gr.Number(label='Seed used', value=-1, interactive=False)
|
320 |
|
321 |
-
melody_filepath.change(load_melody_filepath, inputs=[melody_filepath, title], outputs=[melody, title], api_name="melody_filepath_change").success(load_melody, inputs=[melody, prompt_index], outputs=[prompt_index])
|
322 |
melody.change(load_melody, inputs=[melody, prompt_index], outputs=[prompt_index], api_name="melody_change")
|
323 |
reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False, api_name="reuse_seed")
|
324 |
submit.click(predict, inputs=[model, text, melody, melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap, prompt_index, include_title, include_settings], outputs=[output, seed_used], api_name="submit")
|
|
|
102 |
def load_melody(melody, prompt_index):
|
103 |
# get melody length in number of segments and modify the UI
|
104 |
if melody is None:
|
105 |
+
return gr.Slider.update(maximum=0, value=0) , gr.Radio.update(value="melody", interactive=True)
|
106 |
sr, melody_data = melody[0], melody[1]
|
107 |
segment_samples = sr * 30
|
108 |
total_melodys = max(min((len(melody_data) // segment_samples) - 1, 25), 0)
|
109 |
print(f"Melody length: {len(melody_data)}, Melody segments: {total_melodys}\n")
|
110 |
MAX_PROMPT_INDEX = total_melodys
|
111 |
+
return gr.Slider.update(maximum=MAX_PROMPT_INDEX, value=0), gr.Radio.update(value="melody", interactive=False)
|
112 |
|
113 |
|
114 |
def predict(model, text, melody, melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap=1, prompt_index = 0, include_title = True, include_settings = True):
|
|
|
318 |
output = gr.Video(label="Generated Music")
|
319 |
seed_used = gr.Number(label='Seed used', value=-1, interactive=False)
|
320 |
|
321 |
+
melody_filepath.change(load_melody_filepath, inputs=[melody_filepath, title], outputs=[melody, title], api_name="melody_filepath_change").success(load_melody, inputs=[melody, prompt_index], outputs=[prompt_index, model])
|
322 |
melody.change(load_melody, inputs=[melody, prompt_index], outputs=[prompt_index], api_name="melody_change")
|
323 |
reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False, api_name="reuse_seed")
|
324 |
submit.click(predict, inputs=[model, text, melody, melody_filepath, duration, dimension, topk, topp, temperature, cfg_coef, background, title, settings_font, settings_font_color, seed, overlap, prompt_index, include_title, include_settings], outputs=[output, seed_used], api_name="submit")
|