Huage001 commited on
Commit
b4bb1b4
·
verified ·
1 Parent(s): bc0656e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -14
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  #import spaces #[uncomment to use ZeroGPU]
5
- from diffusers import DiffusionPipeline
6
  import torch
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -13,22 +13,47 @@ if torch.cuda.is_available():
13
  else:
14
  torch_dtype = torch.float32
15
 
16
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
17
- pipe = pipe.to(device)
18
-
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
21
 
22
  #@spaces.GPU #[uncomment to use ZeroGPU]
23
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  if randomize_seed:
26
  seed = random.randint(0, MAX_SEED)
27
 
28
  generator = torch.Generator().manual_seed(seed)
 
 
 
29
 
30
  image = pipe(
31
  prompt = prompt,
 
 
32
  negative_prompt = negative_prompt,
33
  guidance_scale = guidance_scale,
34
  num_inference_steps = num_inference_steps,
@@ -136,7 +161,7 @@ with gr.Blocks(css=css) as demo:
136
  )
137
 
138
  run_button.click(
139
- fn=infer,
140
  inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
141
  outputs = [result, seed]
142
  )
@@ -159,6 +184,8 @@ with gr.Blocks(css=css) as demo:
159
  )
160
 
161
  run_button = gr.Button("Run", scale=0)
 
 
162
 
163
  result = gr.Image(label="Result", show_label=False)
164
 
@@ -216,6 +243,14 @@ with gr.Blocks(css=css) as demo:
216
  step=1,
217
  value=25, #Replace with defaults that work for your model
218
  )
 
 
 
 
 
 
 
 
219
 
220
  gr.Examples(
221
  examples = examples,
@@ -223,15 +258,9 @@ with gr.Blocks(css=css) as demo:
223
  )
224
 
225
  run_button.click(
226
- fn=infer,
227
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
228
  outputs = [result, seed]
229
  )
230
- # gr.on(
231
- # triggers=[run_button.click, prompt.submit],
232
- # fn = infer,
233
- # inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
234
- # outputs = [result, seed]
235
- # )
236
 
237
  demo.queue().launch()
 
2
  import numpy as np
3
  import random
4
  #import spaces #[uncomment to use ZeroGPU]
5
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
6
  import torch
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
13
  else:
14
  torch_dtype = torch.float32
15
 
 
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 1024
18
 
19
  #@spaces.GPU #[uncomment to use ZeroGPU]
20
+ def infer_t2i(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
21
+
22
+ if randomize_seed:
23
+ seed = random.randint(0, MAX_SEED)
24
+
25
+ generator = torch.Generator().manual_seed(seed)
26
+
27
+ pipe = StableDiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
28
+ pipe = pipe.to(device)
29
+
30
+ image = pipe(
31
+ prompt = prompt,
32
+ negative_prompt = negative_prompt,
33
+ guidance_scale = guidance_scale,
34
+ num_inference_steps = num_inference_steps,
35
+ width = width,
36
+ height = height,
37
+ generator = generator
38
+ ).images[0]
39
+
40
+ return image, seed
41
+
42
+ #@spaces.GPU #[uncomment to use ZeroGPU]
43
+ def infer_i2i(prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
44
 
45
  if randomize_seed:
46
  seed = random.randint(0, MAX_SEED)
47
 
48
  generator = torch.Generator().manual_seed(seed)
49
+
50
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
51
+ pipe = pipe.to(device)
52
 
53
  image = pipe(
54
  prompt = prompt,
55
+ image = image.resize((width, height)),
56
+ strength = strength,
57
  negative_prompt = negative_prompt,
58
  guidance_scale = guidance_scale,
59
  num_inference_steps = num_inference_steps,
 
161
  )
162
 
163
  run_button.click(
164
+ fn=infer_t2i,
165
  inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
166
  outputs = [result, seed]
167
  )
 
184
  )
185
 
186
  run_button = gr.Button("Run", scale=0)
187
+
188
+ image_upload_input = gr.Image(label="Upload an Image", type="pil")
189
 
190
  result = gr.Image(label="Result", show_label=False)
191
 
 
243
  step=1,
244
  value=25, #Replace with defaults that work for your model
245
  )
246
+
247
+ editing_strength = gr.Slider(
248
+ label="Strength of editing",
249
+ minimum=0,
250
+ maximum=1,
251
+ step=0.01,
252
+ value=0.5, #Replace with defaults that work for your model
253
+ )
254
 
255
  gr.Examples(
256
  examples = examples,
 
258
  )
259
 
260
  run_button.click(
261
+ fn=infer_i2i,
262
+ inputs = [prompt, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
263
  outputs = [result, seed]
264
  )
 
 
 
 
 
 
265
 
266
  demo.queue().launch()