Spaces:
Running
Running
File size: 4,462 Bytes
929aad9 c879843 929aad9 2a76b54 929aad9 2a76b54 929aad9 2a76b54 929aad9 2a76b54 929aad9 2a76b54 c879843 929aad9 a3fc4ad 2a76b54 929aad9 f581b9c 929aad9 f581b9c a8c3afa 929aad9 2a76b54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import torch
import torchaudio
from einops import rearrange
import gradio as gr
import spaces
import os
import uuid
from pydub import AudioSegment
# Importing the model-related functions
from stable_audio_tools import get_pretrained_model
from stable_audio_tools.inference.generation import generate_diffusion_cond
global model, model_config
# Load the model outside of the GPU-decorated function
def load_model():
global model, model_config
print("Loading model...")
model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
print("Model loaded successfully.")
return model, model_config
# Function to set up, generate, and process the audio
def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
global model, model_config
print(f"Prompt received: {prompt}")
print(f"Settings: Duration={seconds_total}s, Steps={steps}, CFG Scale={cfg_scale}")
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Fetch the Hugging Face token from the environment variable
hf_token = os.getenv('HF_TOKEN')
print(f"Hugging Face token: {hf_token}")
# Use pre-loaded model and configuration
#model, model_config = load_model()
sample_rate = model_config["sample_rate"]
sample_size = model_config["sample_size"]
print(f"Sample rate: {sample_rate}, Sample size: {sample_size}")
model = model.to(device)
print("Model moved to device.")
# Set up text and timing conditioning
conditioning = [{
"prompt": prompt,
"seconds_start": 0,
"seconds_total": seconds_total
}]
print(f"Conditioning: {conditioning}")
# Generate stereo audio
print("Generating audio...")
output = generate_diffusion_cond(
model,
steps=steps,
cfg_scale=cfg_scale,
conditioning=conditioning,
sample_size=sample_size,
sigma_min=0.3,
sigma_max=500,
sampler_type="dpmpp-3m-sde",
device=device
)
print("Audio generated.")
# Rearrange audio batch to a single sequence
output = rearrange(output, "b d n -> d (b n)")
print("Audio rearranged.")
# Peak normalize, clip, convert to int16
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
print("Audio normalized and converted.")
# Generate a unique filename for the output
unique_filename = f"output_{uuid.uuid4().hex}.wav"
print(f"Saving audio to file: {unique_filename}")
# Save to file
torchaudio.save(unique_filename, output, sample_rate)
print(f"Audio saved: {unique_filename}")
return unique_filename
'''
# Convert WAV to MP3 using pydub without ffmpeg
audio = AudioSegment.from_wav(unique_filename)
full_path_mp3 = unique_filename.replace('wav', 'mp3')
audio.export(full_path_mp3, format="mp3")
print(f"Audio converted and saved to MP3: {full_path_mp3}")
# Return the path to the generated audio file
return full_path_mp3
'''
# Setting up the Gradio Interface
interface = gr.Interface(
fn=generate_audio,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"),
gr.Slider(0, 47, value=30, label="Duration in Seconds"),
gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps"),
gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
],
outputs=gr.Audio(type="filepath", label="Generated Audio"),
title="Stable Audio Generator",
description="Generate variable-length stereo audio at 44.1kHz from text prompts using Stable Audio Open 1.0.",
)
with gr.Blocks() as demo:
with gr.Tab("Audio"):
audio_prompt = gr.Textbox(label="Prompt", placeholder="Enter your text prompt here")
audio_duration = gr.Slider(0, 47, value=30, label="Duration in Seconds")
audio_steps = gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps")
audio_cfg = gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
audio_process_button = gr.Button("Process Audio")
audio_output = gr.Audio(type="filepath", label="Generated Audio")
audio_process_button.click(generate_audio, [audio_prompt, audio_duration, audio_steps, audio_cfg], [audio_output])
# Pre-load the model to avoid multiprocessing issues
model, model_config = load_model()
demo.launch(share=True)
|