Spaces:
Sleeping
Sleeping
File size: 8,405 Bytes
929aad9 c879843 cda11a5 929aad9 c879843 929aad9 a3fc4ad 929aad9 cda11a5 929aad9 cda11a5 929aad9 a8c3afa 929aad9 cda11a5 929aad9 a8c3afa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
import torch
import torchaudio
from einops import rearrange
import gradio as gr
import spaces
import os
import uuid
from pydub import AudioSegment
import numpy as np
import random
import torch
from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
'''AUDIO'''
# Importing the model-related functions
from stable_audio_tools import get_pretrained_model
from stable_audio_tools.inference.generation import generate_diffusion_cond
# Load the model outside of the GPU-decorated function
def load_model():
print("Loading model...")
model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
print("Model loaded successfully.")
return model, model_config
# Function to set up, generate, and process the audio
@spaces.GPU(duration=120) # Allocate GPU only when this function is called
def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
print(f"Prompt received: {prompt}")
print(f"Settings: Duration={seconds_total}s, Steps={steps}, CFG Scale={cfg_scale}")
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Fetch the Hugging Face token from the environment variable
hf_token = os.getenv('HF_TOKEN')
print(f"Hugging Face token: {hf_token}")
# Use pre-loaded model and configuration
model, model_config = load_model()
sample_rate = model_config["sample_rate"]
sample_size = model_config["sample_size"]
print(f"Sample rate: {sample_rate}, Sample size: {sample_size}")
model = model.to(device)
print("Model moved to device.")
# Set up text and timing conditioning
conditioning = [{
"prompt": prompt,
"seconds_start": 0,
"seconds_total": seconds_total
}]
print(f"Conditioning: {conditioning}")
# Generate stereo audio
print("Generating audio...")
output = generate_diffusion_cond(
model,
steps=steps,
cfg_scale=cfg_scale,
conditioning=conditioning,
sample_size=sample_size,
sigma_min=0.3,
sigma_max=500,
sampler_type="dpmpp-3m-sde",
device=device
)
print("Audio generated.")
# Rearrange audio batch to a single sequence
output = rearrange(output, "b d n -> d (b n)")
print("Audio rearranged.")
# Peak normalize, clip, convert to int16
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
print("Audio normalized and converted.")
# Generate a unique filename for the output
unique_filename = f"output_{uuid.uuid4().hex}.wav"
print(f"Saving audio to file: {unique_filename}")
# Save to file
torchaudio.save(unique_filename, output, sample_rate)
print(f"Audio saved: {unique_filename}")
# Convert WAV to MP3 using pydub without ffmpeg
audio = AudioSegment.from_wav(unique_filename)
full_path_mp3 = unique_filename.replace('wav', 'mp3')
audio.export(full_path_mp3, format="mp3")
print(f"Audio converted and saved to MP3: {full_path_mp3}")
# Return the path to the generated audio file
return full_path_mp3
'''DIFFUSION'''
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16
repo = "stabilityai/stable-diffusion-3-medium-diffusers"
pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1344
@spaces.GPU
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt = prompt,
negative_prompt = negative_prompt,
guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
width = width,
height = height,
generator = generator
).images[0]
return image, seed
'''
# Setting up the Gradio Interface
interface = gr.Interface(
fn=generate_audio,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"),
gr.Slider(0, 47, value=30, label="Duration in Seconds"),
gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps"),
gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
],
outputs=gr.Audio(type="filepath", label="Generated Audio"),
title="Stable Audio Generator",
description="Generate variable-length stereo audio at 44.1kHz from text prompts using Stable Audio Open 1.0.",
)'''
with gr.Blocks() as demo:
with gr.Tab("SD3"):
with gr.Column:
gr.Markdown(f"""
# Demo [Stable Diffusion 3 Medium](https://huggingface.co/stabilityai/stable-diffusion-3-medium)
Learn more about the [Stable Diffusion 3 series](https://stability.ai/news/stable-diffusion-3). Try on [Stability AI API](https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1sd3/post), [Stable Assistant](https://stability.ai/stable-assistant), or on Discord via [Stable Artisan](https://stability.ai/stable-artisan). Run locally with [ComfyUI](https://github.com/comfyanonymous/ComfyUI) or [diffusers](https://github.com/huggingface/diffusers)
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=5.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Tab("Audio"):
audio_prompt = gr.Textbox(label="Prompt", placeholder="Enter your text prompt here")
audio_duration = gr.Slider(0, 47, value=30, label="Duration in Seconds")
audio_steps = gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps")
audio_cfg = gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
audio_process_button = gr.Button("Process Audio")
audio_output = gr.Audio(type="filepath", label="Generated Audio")
audio_process_button.click(generate_audio, [audio_prompt, audio_duration, audio_steps, audio_cfg], [audio_output])
gr.on(
triggers=[run_button.click, prompt.submit, negative_prompt.submit],
fn = infer,
inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
# Pre-load the model to avoid multiprocessing issues
model, model_config = load_model()
demo.launch()
|