Spaces:
Running
on
Zero
Running
on
Zero
full model
Browse files
app.py
CHANGED
@@ -31,7 +31,7 @@ cfm, cfm_full, tokenizer, muq, vae = prepare_model(device)
|
|
31 |
cfm = torch.compile(cfm)
|
32 |
cfm_full = torch.compile(cfm_full)
|
33 |
|
34 |
-
@spaces.GPU(duration=
|
35 |
def infer_music(lrc, ref_audio_path, text_prompt, current_prompt_type, seed=42, randomize_seed=False, steps=32, cfg_strength=4.0, file_type='wav', odeint_method='euler', Music_Duration='95s', device='cuda'):
|
36 |
if Music_Duration == '95s':
|
37 |
max_frames = 2048
|
@@ -45,7 +45,7 @@ def infer_music(lrc, ref_audio_path, text_prompt, current_prompt_type, seed=42,
|
|
45 |
sway_sampling_coef = -1 if steps < 32 else None
|
46 |
vocal_flag = False
|
47 |
try:
|
48 |
-
lrc_prompt, start_time = get_lrc_token(lrc, tokenizer, device)
|
49 |
if current_prompt_type == 'audio':
|
50 |
style_prompt, vocal_flag = get_audio_style_prompt(muq, ref_audio_path)
|
51 |
else:
|
@@ -415,4 +415,4 @@ with gr.Blocks(css=css) as demo:
|
|
415 |
|
416 |
|
417 |
if __name__ == "__main__":
|
418 |
-
demo.launch()
|
|
|
31 |
cfm = torch.compile(cfm)
|
32 |
cfm_full = torch.compile(cfm_full)
|
33 |
|
34 |
+
@spaces.GPU(duration=40)
|
35 |
def infer_music(lrc, ref_audio_path, text_prompt, current_prompt_type, seed=42, randomize_seed=False, steps=32, cfg_strength=4.0, file_type='wav', odeint_method='euler', Music_Duration='95s', device='cuda'):
|
36 |
if Music_Duration == '95s':
|
37 |
max_frames = 2048
|
|
|
45 |
sway_sampling_coef = -1 if steps < 32 else None
|
46 |
vocal_flag = False
|
47 |
try:
|
48 |
+
lrc_prompt, start_time = get_lrc_token(max_frames, lrc, tokenizer, device)
|
49 |
if current_prompt_type == 'audio':
|
50 |
style_prompt, vocal_flag = get_audio_style_prompt(muq, ref_audio_path)
|
51 |
else:
|
|
|
415 |
|
416 |
|
417 |
if __name__ == "__main__":
|
418 |
+
demo.launch(share=True)
|