Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import spaces
|
2 |
import logging
|
3 |
from datetime import datetime
|
4 |
from pathlib import Path
|
@@ -58,7 +58,7 @@ def get_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]:
|
|
58 |
net, feature_utils, seq_cfg = get_model()
|
59 |
|
60 |
|
61 |
-
@spaces.GPU(duration=120)
|
62 |
@torch.inference_mode()
|
63 |
def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, seed: int, num_steps: int,
|
64 |
cfg_strength: float, duration: float):
|
@@ -95,7 +95,7 @@ def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, seed: int
|
|
95 |
return video_save_path
|
96 |
|
97 |
|
98 |
-
@spaces.GPU(duration=120)
|
99 |
@torch.inference_mode()
|
100 |
def text_to_audio(prompt: str, negative_prompt: str, seed: int, num_steps: int, cfg_strength: float,
|
101 |
duration: float):
|
|
|
1 |
+
# import spaces
|
2 |
import logging
|
3 |
from datetime import datetime
|
4 |
from pathlib import Path
|
|
|
58 |
net, feature_utils, seq_cfg = get_model()
|
59 |
|
60 |
|
61 |
+
# @spaces.GPU(duration=120)
|
62 |
@torch.inference_mode()
|
63 |
def video_to_audio(video: gr.Video, prompt: str, negative_prompt: str, seed: int, num_steps: int,
|
64 |
cfg_strength: float, duration: float):
|
|
|
95 |
return video_save_path
|
96 |
|
97 |
|
98 |
+
# @spaces.GPU(duration=120)
|
99 |
@torch.inference_mode()
|
100 |
def text_to_audio(prompt: str, negative_prompt: str, seed: int, num_steps: int, cfg_strength: float,
|
101 |
duration: float):
|