Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,49 @@
|
|
|
|
1 |
import torch
|
2 |
import gradio as gr
|
3 |
from diffusers import StableDiffusion3Pipeline
|
|
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
pipeline = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers",
|
8 |
-
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
9 |
-
text_encoder_3=None,
|
10 |
-
tokenizer_3 = None)
|
11 |
-
#pipeline.enable_model_cpu_offload()
|
12 |
-
pipeline.to(device)
|
13 |
-
|
14 |
-
image = pipeline(
|
15 |
-
prompt=prompt,
|
16 |
-
negative_prompt="blurred, ugly, watermark, low, resolution, blurry",
|
17 |
-
num_inference_steps=40,
|
18 |
-
height=1024,
|
19 |
-
width=1024,
|
20 |
-
guidance_scale=9.0
|
21 |
-
).images[0]
|
22 |
-
|
23 |
-
#image.show()
|
24 |
-
return image
|
25 |
|
26 |
-
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
|
|
37 |
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
from diffusers import StableDiffusion3Pipeline
|
5 |
+
from huggingface_hub import login
|
6 |
|
7 |
+
# Load Hugging Face token from environment variable
|
8 |
+
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
login(token=hf_token)
|
11 |
|
12 |
+
# Set device (CUDA if available)
|
13 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
+
# Load the model once at the beginning
|
15 |
+
pipeline = StableDiffusion3Pipeline.from_pretrained(
|
16 |
+
"stabilityai/stable-diffusion-3-medium-diffusers",
|
17 |
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
18 |
+
use_auth_token=hf_token, # Use the token for authentication
|
19 |
+
# Load the model in 8-bit precision
|
20 |
+
load_in_8bit=True
|
21 |
+
)
|
22 |
+
pipeline.to(device)
|
23 |
|
24 |
+
def image_generator(prompt):
|
25 |
+
# Generate the image based on the prompt
|
26 |
+
with torch.no_grad(): # Disable gradient calculations for faster inference
|
27 |
+
image = pipeline(
|
28 |
+
prompt=prompt,
|
29 |
+
negative_prompt="blurred, ugly, watermark, low, resolution, blurry",
|
30 |
+
num_inference_steps=20, # Further reduced steps for faster generation
|
31 |
+
height=512, # Smaller height for quicker generation
|
32 |
+
width=512, # Smaller width for quicker generation
|
33 |
+
guidance_scale=7.5, # Moderate guidance scale
|
34 |
+
num_images_per_prompt=1 # Generate only one image per prompt
|
35 |
+
).images[0]
|
36 |
|
37 |
+
return image
|
38 |
|
39 |
+
# Gradio interface setup
|
40 |
+
interface = gr.Interface(
|
41 |
+
fn=image_generator,
|
42 |
+
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
|
43 |
+
outputs=gr.Image(type="pil"),
|
44 |
+
title="Image Generator App",
|
45 |
+
description="This is a simple image generator app using HuggingFace's Stable Diffusion 3 model."
|
46 |
+
)
|
47 |
+
|
48 |
+
# Launch the Gradio interface
|
49 |
+
interface.launch()
|