dgoot commited on
Commit
d651ab5
Β·
verified Β·
1 Parent(s): 73ae1c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -14
app.py CHANGED
@@ -6,15 +6,11 @@ import spaces
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model_repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
 
 
11
 
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
  pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
@@ -86,7 +82,6 @@ with gr.Blocks(css=css) as demo:
86
  label="Negative prompt",
87
  max_lines=1,
88
  placeholder="Enter a negative prompt",
89
- visible=False,
90
  )
91
 
92
  seed = gr.Slider(
@@ -105,7 +100,7 @@ with gr.Blocks(css=css) as demo:
105
  minimum=256,
106
  maximum=MAX_IMAGE_SIZE,
107
  step=32,
108
- value=1024, # Replace with defaults that work for your model
109
  )
110
 
111
  height = gr.Slider(
@@ -113,24 +108,24 @@ with gr.Blocks(css=css) as demo:
113
  minimum=256,
114
  maximum=MAX_IMAGE_SIZE,
115
  step=32,
116
- value=1024, # Replace with defaults that work for your model
117
  )
118
 
119
  with gr.Row():
120
  guidance_scale = gr.Slider(
121
  label="Guidance scale",
122
  minimum=0.0,
123
- maximum=10.0,
124
  step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
  )
127
 
128
  num_inference_steps = gr.Slider(
129
  label="Number of inference steps",
130
  minimum=1,
131
- maximum=50,
132
  step=1,
133
- value=2, # Replace with defaults that work for your model
134
  )
135
 
136
  gr.Examples(examples=examples, inputs=[prompt])
 
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
 
9
  model_repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
10
+ device = "cuda"
11
+ variant="fp16"
12
 
13
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16, use_safetensors=True, variant=variant)
 
 
 
 
 
14
  pipe = pipe.to(device)
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
 
82
  label="Negative prompt",
83
  max_lines=1,
84
  placeholder="Enter a negative prompt",
 
85
  )
86
 
87
  seed = gr.Slider(
 
100
  minimum=256,
101
  maximum=MAX_IMAGE_SIZE,
102
  step=32,
103
+ value=1024,
104
  )
105
 
106
  height = gr.Slider(
 
108
  minimum=256,
109
  maximum=MAX_IMAGE_SIZE,
110
  step=32,
111
+ value=1024,
112
  )
113
 
114
  with gr.Row():
115
  guidance_scale = gr.Slider(
116
  label="Guidance scale",
117
  minimum=0.0,
118
+ maximum=100.0,
119
  step=0.1,
120
+ value=0.0,
121
  )
122
 
123
  num_inference_steps = gr.Slider(
124
  label="Number of inference steps",
125
  minimum=1,
126
+ maximum=100,
127
  step=1,
128
+ value=50,
129
  )
130
 
131
  gr.Examples(examples=examples, inputs=[prompt])