Spaces:
Running
on
Zero
Running
on
Zero
chong.zhang
commited on
Commit
·
9af664a
1
Parent(s):
5891900
update
Browse files
app.py
CHANGED
@@ -18,8 +18,13 @@ def generate_filename():
|
|
18 |
|
19 |
def get_args(
|
20 |
task, text="", audio=None, model_name="InspireMusic-Base",
|
21 |
-
chorus="intro",
|
22 |
output_sample_rate=48000, max_generate_audio_seconds=30.0, time_start = 0.0, time_end=30.0, trim=False):
|
|
|
|
|
|
|
|
|
|
|
23 |
# This function constructs the arguments required for InspireMusic
|
24 |
args = {
|
25 |
"task" : task,
|
@@ -28,7 +33,7 @@ def get_args(
|
|
28 |
"model_name" : model_name,
|
29 |
"chorus" : chorus,
|
30 |
"fast" : fast,
|
31 |
-
"fade_out" :
|
32 |
"trim" : trim,
|
33 |
"output_sample_rate" : output_sample_rate,
|
34 |
"min_generate_audio_seconds": 10.0,
|
@@ -99,25 +104,24 @@ def cut_audio(audio_file, cut_seconds=5):
|
|
99 |
torchaudio.save(output_path, cutted_audio, sr)
|
100 |
return output_path
|
101 |
|
102 |
-
def run_text2music(text, model_name, chorus,
|
103 |
output_sample_rate, max_generate_audio_seconds):
|
104 |
args = get_args(
|
105 |
task='continuation', text=text, audio=None,
|
106 |
-
model_name=model_name, chorus=chorus,
|
107 |
-
|
108 |
max_generate_audio_seconds=max_generate_audio_seconds)
|
109 |
return music_generation(args)
|
110 |
|
111 |
-
def run_continuation(text, audio, model_name, chorus,
|
112 |
output_sample_rate, max_generate_audio_seconds):
|
113 |
args = get_args(
|
114 |
task='text-to-music', text=text, audio=cut_audio(audio, cut_seconds=5),
|
115 |
-
model_name=model_name, chorus=chorus,
|
116 |
-
|
117 |
max_generate_audio_seconds=max_generate_audio_seconds)
|
118 |
return music_generation(args)
|
119 |
|
120 |
-
|
121 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
122 |
gr.Markdown("""
|
123 |
# InspireMusic
|
@@ -140,11 +144,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
140 |
# with gr.Column():
|
141 |
# fast = gr.Checkbox(label="Fast Inference", value=False)
|
142 |
# fade_out = gr.Checkbox(label="Apply Fade Out Effect", value=True)
|
143 |
-
|
144 |
-
fast = True
|
145 |
-
else:
|
146 |
-
fast = False
|
147 |
-
fade_out = True
|
148 |
with gr.Row(equal_height=True):
|
149 |
# Textbox for custom input
|
150 |
text_input = gr.Textbox(label="Input Text (For Text-to-Music Task)", value="Experience soothing and sensual instrumental jazz with a touch of Bossa Nova, perfect for a relaxing restaurant or spa ambiance.")
|
@@ -157,7 +157,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
157 |
button = gr.Button("Text to Music")
|
158 |
button.click(run_text2music,
|
159 |
inputs=[text_input, model_name,
|
160 |
-
chorus,
|
161 |
output_sample_rate,
|
162 |
max_generate_audio_seconds],
|
163 |
outputs=music_output)
|
@@ -165,7 +165,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
165 |
generate_button = gr.Button("Music Continuation")
|
166 |
generate_button.click(run_continuation,
|
167 |
inputs=[text_input, audio_input, model_name,
|
168 |
-
chorus,
|
169 |
output_sample_rate,
|
170 |
max_generate_audio_seconds],
|
171 |
outputs=music_output)
|
@@ -176,7 +176,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
176 |
button = gr.Button(value=prompt)
|
177 |
button.click(run_text2music,
|
178 |
inputs=[text_input, model_name,
|
179 |
-
chorus,
|
180 |
output_sample_rate,
|
181 |
max_generate_audio_seconds],
|
182 |
outputs=music_output)
|
|
|
18 |
|
19 |
def get_args(
|
20 |
task, text="", audio=None, model_name="InspireMusic-Base",
|
21 |
+
chorus="intro",
|
22 |
output_sample_rate=48000, max_generate_audio_seconds=30.0, time_start = 0.0, time_end=30.0, trim=False):
|
23 |
+
|
24 |
+
if output_sample_rate == 24000:
|
25 |
+
fast = True
|
26 |
+
else:
|
27 |
+
fast = False
|
28 |
# This function constructs the arguments required for InspireMusic
|
29 |
args = {
|
30 |
"task" : task,
|
|
|
33 |
"model_name" : model_name,
|
34 |
"chorus" : chorus,
|
35 |
"fast" : fast,
|
36 |
+
"fade_out" : True,
|
37 |
"trim" : trim,
|
38 |
"output_sample_rate" : output_sample_rate,
|
39 |
"min_generate_audio_seconds": 10.0,
|
|
|
104 |
torchaudio.save(output_path, cutted_audio, sr)
|
105 |
return output_path
|
106 |
|
107 |
+
def run_text2music(text, model_name, chorus,
|
108 |
output_sample_rate, max_generate_audio_seconds):
|
109 |
args = get_args(
|
110 |
task='continuation', text=text, audio=None,
|
111 |
+
model_name=model_name, chorus=chorus,
|
112 |
+
output_sample_rate=output_sample_rate,
|
113 |
max_generate_audio_seconds=max_generate_audio_seconds)
|
114 |
return music_generation(args)
|
115 |
|
116 |
+
def run_continuation(text, audio, model_name, chorus,
|
117 |
output_sample_rate, max_generate_audio_seconds):
|
118 |
args = get_args(
|
119 |
task='text-to-music', text=text, audio=cut_audio(audio, cut_seconds=5),
|
120 |
+
model_name=model_name, chorus=chorus,
|
121 |
+
output_sample_rate=output_sample_rate,
|
122 |
max_generate_audio_seconds=max_generate_audio_seconds)
|
123 |
return music_generation(args)
|
124 |
|
|
|
125 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
126 |
gr.Markdown("""
|
127 |
# InspireMusic
|
|
|
144 |
# with gr.Column():
|
145 |
# fast = gr.Checkbox(label="Fast Inference", value=False)
|
146 |
# fade_out = gr.Checkbox(label="Apply Fade Out Effect", value=True)
|
147 |
+
|
|
|
|
|
|
|
|
|
148 |
with gr.Row(equal_height=True):
|
149 |
# Textbox for custom input
|
150 |
text_input = gr.Textbox(label="Input Text (For Text-to-Music Task)", value="Experience soothing and sensual instrumental jazz with a touch of Bossa Nova, perfect for a relaxing restaurant or spa ambiance.")
|
|
|
157 |
button = gr.Button("Text to Music")
|
158 |
button.click(run_text2music,
|
159 |
inputs=[text_input, model_name,
|
160 |
+
chorus,
|
161 |
output_sample_rate,
|
162 |
max_generate_audio_seconds],
|
163 |
outputs=music_output)
|
|
|
165 |
generate_button = gr.Button("Music Continuation")
|
166 |
generate_button.click(run_continuation,
|
167 |
inputs=[text_input, audio_input, model_name,
|
168 |
+
chorus,
|
169 |
output_sample_rate,
|
170 |
max_generate_audio_seconds],
|
171 |
outputs=music_output)
|
|
|
176 |
button = gr.Button(value=prompt)
|
177 |
button.click(run_text2music,
|
178 |
inputs=[text_input, model_name,
|
179 |
+
chorus,
|
180 |
output_sample_rate,
|
181 |
max_generate_audio_seconds],
|
182 |
outputs=music_output)
|