craftgamesnetwork commited on
Commit
1569a11
·
verified ·
1 Parent(s): a7335d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -55
app.py CHANGED
@@ -12,21 +12,20 @@ import spaces
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
- from diffusers.utils import load_image
16
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting
17
 
18
  DESCRIPTION = "# Run any LoRA or SD Model"
19
  if not torch.cuda.is_available():
20
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
 
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
24
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
26
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
27
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
28
- ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
29
- ENABLE_USE_INPAINTING = os.getenv("ENABLE_USE_INPAINTING", "1") == "1"
30
 
31
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
32
 
@@ -58,10 +57,7 @@ def generate(
58
  lora = '',
59
  lora_scale: float = 0.7,
60
  use_img2img: bool = False,
61
- use_inpainting: bool = False,
62
  url = '',
63
- img_url = '',
64
- mask_url = '',
65
  ):
66
  if torch.cuda.is_available():
67
 
@@ -78,17 +74,11 @@ def generate(
78
  if use_vae:
79
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
80
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
81
-
82
- if use_inpainting:
83
- pipe = AutoPipelineForInpainting.from_pretrained(model, torch_dtype=torch.float16)
84
-
85
  response = requests.get(url)
86
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
87
  init_image = init_image.resize((width, height))
88
 
89
- image_init = load_image(img_url)
90
- mask_image = load_image(mask_url)
91
-
92
  if use_lora:
93
  pipe.load_lora_weights(lora)
94
  pipe.fuse_lora(lora_scale)
@@ -111,23 +101,20 @@ def generate(
111
  if not use_negative_prompt_2:
112
  negative_prompt_2 = None # type: ignore
113
 
114
- if use_inpainting:
115
- image = pipe(
116
  prompt=prompt,
117
- image=image_init,
118
- mask_image=mask_image,
119
- strength=strength_img2img,
120
  negative_prompt=negative_prompt,
121
  prompt_2=prompt_2,
 
122
  width=width,
123
  height=height,
124
- negative_prompt_2=negative_prompt_2,
125
  guidance_scale=guidance_scale_base,
126
  num_inference_steps=num_inference_steps_base,
127
  generator=generator,
 
128
  ).images[0]
129
- return image
130
- elif use_img2img:
131
  images = pipe(
132
  prompt=prompt,
133
  image=init_image,
@@ -143,19 +130,11 @@ def generate(
143
  output_type="pil",
144
  ).images[0]
145
  return images
146
- else:
147
- return pipe(
148
- prompt=prompt,
149
- negative_prompt=negative_prompt,
150
- prompt_2=prompt_2,
151
- negative_prompt_2=negative_prompt_2,
152
- width=width,
153
- height=height,
154
- guidance_scale=guidance_scale_base,
155
- num_inference_steps=num_inference_steps_base,
156
- generator=generator,
157
- output_type="pil",
158
- ).images[0]
159
 
160
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
161
  gr.HTML(
@@ -175,8 +154,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
175
  value=0.7,
176
  )
177
  url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
178
- img_url = gr.Text(label='URL (Image Inpainting)', placeholder='e.g https://example.com/image.png')
179
- mask_url = gr.Text(label='URL (Mask Image Inpainting)', placeholder='e.g https://example.com/image.png')
180
  with gr.Row():
181
  prompt = gr.Text(
182
  placeholder="Input prompt",
@@ -189,7 +166,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
189
  result = gr.Image(label="Result", show_label=False)
190
  with gr.Accordion("Advanced options", open=False):
191
  with gr.Row():
192
- use_inpainting = gr.Checkbox(label='Use Inpainting', value=False, visible=ENABLE_USE_INPAINTING)
193
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
194
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
195
  use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
@@ -267,6 +243,14 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
267
  value=0.7,
268
  )
269
 
 
 
 
 
 
 
 
 
270
  use_negative_prompt.change(
271
  fn=lambda x: gr.update(visible=x),
272
  inputs=use_negative_prompt,
@@ -309,20 +293,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
309
  queue=False,
310
  api_name=False,
311
  )
312
- use_inpainting.change(
313
- fn=lambda x: gr.update(visible=x),
314
- inputs=use_inpainting,
315
- outputs=img_url,
316
- queue=False,
317
- api_name=False,
318
- )
319
- use_inpainting.change(
320
- fn=lambda x: gr.update(visible=x),
321
- inputs=use_inpainting,
322
- outputs=mask_url,
323
- queue=False,
324
- api_name=False,
325
- )
326
 
327
  gr.on(
328
  triggers=[
@@ -360,10 +330,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
360
  lora,
361
  lora_scale,
362
  use_img2img,
363
- use_inpainting,
364
  url,
365
- img_url,
366
- mask_url,
367
  ],
368
  outputs=result,
369
  api_name="run",
 
12
  import torch
13
  from PIL import Image
14
  from io import BytesIO
15
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image
 
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
24
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
26
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
27
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
28
+ ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_VAE", "1") == "1"
 
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
 
57
  lora = '',
58
  lora_scale: float = 0.7,
59
  use_img2img: bool = False,
 
60
  url = '',
 
 
61
  ):
62
  if torch.cuda.is_available():
63
 
 
74
  if use_vae:
75
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
+
 
 
 
78
  response = requests.get(url)
79
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
80
  init_image = init_image.resize((width, height))
81
 
 
 
 
82
  if use_lora:
83
  pipe.load_lora_weights(lora)
84
  pipe.fuse_lora(lora_scale)
 
101
  if not use_negative_prompt_2:
102
  negative_prompt_2 = None # type: ignore
103
 
104
+ if not use_img2img:
105
+ return pipe(
106
  prompt=prompt,
 
 
 
107
  negative_prompt=negative_prompt,
108
  prompt_2=prompt_2,
109
+ negative_prompt_2=negative_prompt_2,
110
  width=width,
111
  height=height,
 
112
  guidance_scale=guidance_scale_base,
113
  num_inference_steps=num_inference_steps_base,
114
  generator=generator,
115
+ output_type="pil",
116
  ).images[0]
117
+ else:
 
118
  images = pipe(
119
  prompt=prompt,
120
  image=init_image,
 
130
  output_type="pil",
131
  ).images[0]
132
  return images
133
+
134
+ examples = [
135
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
136
+ "An astronaut riding a green horse",
137
+ ]
 
 
 
 
 
 
 
 
138
 
139
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
140
  gr.HTML(
 
154
  value=0.7,
155
  )
156
  url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
 
 
157
  with gr.Row():
158
  prompt = gr.Text(
159
  placeholder="Input prompt",
 
166
  result = gr.Image(label="Result", show_label=False)
167
  with gr.Accordion("Advanced options", open=False):
168
  with gr.Row():
 
169
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
170
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
171
  use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
 
243
  value=0.7,
244
  )
245
 
246
+ gr.Examples(
247
+ examples=examples,
248
+ inputs=prompt,
249
+ outputs=result,
250
+ fn=generate,
251
+ cache_examples=CACHE_EXAMPLES,
252
+ )
253
+
254
  use_negative_prompt.change(
255
  fn=lambda x: gr.update(visible=x),
256
  inputs=use_negative_prompt,
 
293
  queue=False,
294
  api_name=False,
295
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  gr.on(
298
  triggers=[
 
330
  lora,
331
  lora_scale,
332
  use_img2img,
 
333
  url,
 
 
334
  ],
335
  outputs=result,
336
  api_name="run",