Spaces:
Paused
Paused
import os | |
from huggingface_hub import login | |
import torch | |
import torchaudio | |
from einops import rearrange | |
import gradio as gr | |
from stable_audio_tools import get_pretrained_model | |
from stable_audio_tools.inference.generation import generate_diffusion_cond | |
# Authenticate using HF token from secrets | |
login(token=os.getenv("HUGGINGFACE_TOKEN")) | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
model, config = get_pretrained_model("stabilityai/stable-audio-open-small") | |
model = model.to(device) | |
sample_rate = config["sample_rate"] | |
sample_size = config["sample_size"] | |
def generate_audio(prompt): | |
conditioning = [{"prompt": prompt, "seconds_total": 11}] | |
with torch.no_grad(): | |
output = generate_diffusion_cond( | |
model, | |
steps=8, | |
conditioning=conditioning, | |
sample_size=sample_size, | |
device=device | |
) | |
output = rearrange(output, "b d n -> d (b n)") | |
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() | |
path = "output.wav" | |
torchaudio.save(path, output, sample_rate) | |
return path | |
gr.Interface( | |
fn=generate_audio, | |
inputs=gr.Textbox(label="Enter your sound prompt"), | |
outputs=gr.Audio(type="filepath"), | |
title="Stable Audio Generator" | |
).launch() | |