sbicy commited on
Commit
b9a6225
·
verified ·
1 Parent(s): 669fd03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -29
app.py CHANGED
@@ -1,38 +1,47 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import spaces
4
 
5
- # Load the pipeline (lazy-load the model to save resources)
6
- @spaces.GPU
7
  def load_model():
8
- return pipeline(
9
- "text-to-image",
10
- model="stabilityai/stable-diffusion-2-1",
11
- torch_dtype="float16" # Ensure compatibility with ZeroGPU
 
 
12
  )
 
 
 
 
 
 
 
 
13
 
14
- # Initialize the pipeline
15
- model = load_model()
 
 
 
16
 
17
- # Function to generate images
18
- @spaces.GPU
19
- def generate_image(prompt, guidance_scale=7.5):
20
- print(f"Generating image for prompt: {prompt}")
21
- images = model(prompt, guidance_scale=guidance_scale)
22
  return images[0]
23
 
24
- # Gradio interface
25
- interface = gr.Interface(
26
- fn=generate_image,
27
- inputs=[
28
- gr.Textbox(label="Prompt", placeholder="Describe your image..."),
29
- gr.Slider(1, 20, value=7.5, label="Guidance Scale")
30
- ],
31
- outputs=gr.Image(label="Generated Image"),
32
- title="Text-to-Image Generator",
33
- description="Generate images from text prompts using Stable Diffusion."
34
- )
35
 
36
- # Launch the app
37
- if __name__ == "__main__":
38
- interface.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
3
+ import torch
4
 
 
 
5
  def load_model():
6
+ # Specify the Stable Diffusion pipeline with an appropriate model type
7
+ pipeline = StableDiffusionPipeline.from_pretrained(
8
+ "stabilityai/stable-diffusion-2-1",
9
+ torch_dtype=torch.float16,
10
+ revision="fp16",
11
+ safety_checker=None # Disable safety checker if necessary
12
  )
13
+
14
+ # Set the scheduler (optional but recommended)
15
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
16
+
17
+ # Move pipeline to GPU or ZeroGPU
18
+ pipeline = pipeline.to("cuda") # or ZeroGPU-specific setup
19
+
20
+ return pipeline
21
 
22
+ # Initialize the model
23
+ try:
24
+ model = load_model()
25
+ except Exception as e:
26
+ print(f"Error loading the model: {e}")
27
 
28
+ # Define Gradio interface
29
+ def generate(prompt, guidance_scale=7.5, num_inference_steps=50):
30
+ # Generate the image
31
+ images = model(prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images
 
32
  return images[0]
33
 
34
+ # Gradio Interface
35
+ with gr.Blocks() as demo:
36
+ with gr.Row():
37
+ prompt = gr.Textbox(label="Enter your prompt")
38
+ guidance_scale = gr.Slider(1.0, 10.0, value=7.5, label="Guidance Scale")
39
+ steps = gr.Slider(10, 100, value=50, label="Number of Inference Steps")
40
+ with gr.Row():
41
+ submit = gr.Button("Generate")
42
+ with gr.Row():
43
+ output = gr.Image()
 
44
 
45
+ submit.click(generate, inputs=[prompt, guidance_scale, steps], outputs=output)
46
+
47
+ demo.launch()