sbicy commited on
Commit
b3b7325
·
verified ·
1 Parent(s): 3e85111

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -35
app.py CHANGED
@@ -1,38 +1,58 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import spaces
4
 
5
- # Load the pipeline (lazy-load the model to save resources)
6
- @spaces.GPU
7
  def load_model():
8
- return pipeline(
9
- "text-to-image",
10
- model="stabilityai/stable-diffusion-2-1",
11
- torch_dtype="float16" # Ensure compatibility with ZeroGPU
12
- )
13
-
14
- # Initialize the pipeline
15
- model = load_model()
16
-
17
- # Function to generate images
18
- @spaces.GPU
19
- def generate_image(prompt, guidance_scale=7.5):
20
- print(f"Generating image for prompt: {prompt}")
21
- images = model(prompt, guidance_scale=guidance_scale)
22
- return images[0]
23
-
24
- # Gradio interface
25
- interface = gr.Interface(
26
- fn=generate_image,
27
- inputs=[
28
- gr.Textbox(label="Prompt", placeholder="Describe your image..."),
29
- gr.Slider(1, 20, value=7.5, label="Guidance Scale")
30
- ],
31
- outputs=gr.Image(label="Generated Image"),
32
- title="Text-to-Image Generator",
33
- description="Generate images from text prompts using Stable Diffusion."
34
- )
35
-
36
- # Launch the app
37
- if __name__ == "__main__":
38
- interface.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
3
+ import torch
4
 
 
 
5
  def load_model():
6
+ try:
7
+ # Load the model with explicit variant for half-precision weights
8
+ pipeline = StableDiffusionPipeline.from_pretrained(
9
+ "stabilityai/stable-diffusion-2-1",
10
+ torch_dtype=torch.float16,
11
+ variant="fp16", # Updated from 'revision' to 'variant'
12
+ safety_checker=None # Disable safety checker for faster inference
13
+ )
14
+ except Exception as e:
15
+ print(f"Error loading the model: {e}")
16
+ raise
17
+
18
+ # Configure the scheduler for faster generation
19
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
20
+
21
+ # Move to CPU if no GPU is available
22
+ try:
23
+ pipeline = pipeline.to("cuda" if torch.cuda.is_available() else "cpu")
24
+ except Exception as e:
25
+ print(f"Error moving the model to device: {e}")
26
+ raise
27
+
28
+ return pipeline
29
+
30
+ # Initialize the model
31
+ try:
32
+ model = load_model()
33
+ except Exception as e:
34
+ print(f"Error initializing the model: {e}")
35
+
36
+ # Define Gradio interface
37
+ def generate(prompt, guidance_scale=7.5, num_inference_steps=50):
38
+ try:
39
+ # Generate image from the prompt
40
+ images = model(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images
41
+ return images[0]
42
+ except Exception as e:
43
+ return f"Error generating image: {e}"
44
+
45
+ # Gradio Interface
46
+ with gr.Blocks() as demo:
47
+ with gr.Row():
48
+ prompt = gr.Textbox(label="Enter your prompt")
49
+ guidance_scale = gr.Slider(1.0, 10.0, value=7.5, label="Guidance Scale")
50
+ steps = gr.Slider(10, 100, value=50, label="Number of Inference Steps")
51
+ with gr.Row():
52
+ submit = gr.Button("Generate")
53
+ with gr.Row():
54
+ output = gr.Image()
55
+
56
+ submit.click(generate, inputs=[prompt, guidance_scale, steps], outputs=output)
57
+
58
+ demo.launch()