Sal-ONE commited on
Commit
758511a
·
verified ·
1 Parent(s): f04bc6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  from diffusers import StableDiffusionPipeline
3
  import torch
 
4
 
5
  # Function to load the selected Stable Diffusion model
6
  def load_model(model_id):
@@ -24,9 +25,14 @@ def switch_model(selected_model):
24
  pipeline = load_model(selected_model)
25
  return f"Model switched to: {selected_model}"
26
 
27
- def generate_image(prompt, num_inference_steps=50, guidance_scale=7.5):
28
  """Generate an image from a text prompt using Stable Diffusion."""
29
- image = pipeline(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
 
 
 
 
 
30
  return image
31
 
32
  # Define the Gradio app layout
@@ -43,8 +49,9 @@ with gr.Blocks() as app:
43
  )
44
  model_switch_status = gr.Textbox(label="Model Status", value=f"Current model: {current_model_id}", interactive=False)
45
  prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here", lines=2)
46
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=50, step=5)
47
  guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=7.5, step=0.5)
 
48
  generate_btn = gr.Button("Generate Image")
49
 
50
  with gr.Column():
@@ -58,7 +65,7 @@ with gr.Blocks() as app:
58
 
59
  generate_btn.click(
60
  generate_image,
61
- inputs=[prompt, num_inference_steps, guidance_scale],
62
  outputs=output_image
63
  )
64
 
 
1
  import gradio as gr
2
  from diffusers import StableDiffusionPipeline
3
  import torch
4
+ import random
5
 
6
  # Function to load the selected Stable Diffusion model
7
  def load_model(model_id):
 
25
  pipeline = load_model(selected_model)
26
  return f"Model switched to: {selected_model}"
27
 
28
+ def generate_image(prompt, num_inference_steps=20, guidance_scale=7.5, seed=None):
29
  """Generate an image from a text prompt using Stable Diffusion."""
30
+ if seed is not None:
31
+ generator = torch.manual_seed(seed)
32
+ else:
33
+ generator = None
34
+
35
+ image = pipeline(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator).images[0]
36
  return image
37
 
38
  # Define the Gradio app layout
 
49
  )
50
  model_switch_status = gr.Textbox(label="Model Status", value=f"Current model: {current_model_id}", interactive=False)
51
  prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here", lines=2)
52
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=30, value=20, step=1)
53
  guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=7.5, step=0.5)
54
+ seed = gr.Number(label="Seed (Optional)", value=None)
55
  generate_btn = gr.Button("Generate Image")
56
 
57
  with gr.Column():
 
65
 
66
  generate_btn.click(
67
  generate_image,
68
+ inputs=[prompt, num_inference_steps, guidance_scale, seed],
69
  outputs=output_image
70
  )
71