craftgamesnetwork commited on
Commit
4a5d0df
·
verified ·
1 Parent(s): c59eee7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -27
app.py CHANGED
@@ -10,11 +10,10 @@ import gradio as gr
10
  import numpy as np
11
  import spaces
12
  import torch
13
- import cv2
14
  from PIL import Image
15
  from io import BytesIO
16
  from diffusers.utils import load_image
17
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting
18
 
19
  DESCRIPTION = "# Run any LoRA or SD Model"
20
  if not torch.cuda.is_available():
@@ -28,7 +27,6 @@ ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
28
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
29
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
30
  ENABLE_USE_INPAINTING = os.getenv("ENABLE_USE_INPAINTING", "1") == "1"
31
- ENABLE_USE_CONTROLNET = os.getenv("ENABLE_USE_CONTROLNET", "1") == "1"
32
 
33
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
34
 
@@ -61,23 +59,39 @@ def generate(
61
  lora_scale: float = 0.7,
62
  use_img2img: bool = False,
63
  use_inpainting: bool = False,
64
- use_controlnet: bool = False,
65
  url = '',
66
  img_url = '',
67
  mask_url = '',
68
  ):
69
  if torch.cuda.is_available():
70
- controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
71
- pipe = StableDiffusionControlNetPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16)
 
 
 
 
 
 
 
 
72
 
73
- image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
74
- np_image = np.array(image)
 
 
 
 
75
 
76
- # get canny image
77
- np_image = cv2.Canny(np_image, 100, 200)
78
- np_image = np_image[:, :, None]
79
- np_image = np.concatenate([np_image, np_image, np_image], axis=2)
80
- canny_image = Image.fromarray(np_image)
 
 
 
 
 
81
 
82
  if ENABLE_CPU_OFFLOAD:
83
  pipe.enable_model_cpu_offload()
@@ -97,15 +111,51 @@ def generate(
97
  if not use_negative_prompt_2:
98
  negative_prompt_2 = None # type: ignore
99
 
100
- if use_controlnet:
101
  image = pipe(
102
- "futuristic-looking woman",
103
- num_inference_steps=20,
 
 
 
 
 
 
 
 
 
104
  generator=generator,
105
- image=image,
106
- control_image=canny_image,
107
  ).images[0]
108
  return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
111
  gr.HTML(
@@ -139,7 +189,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
139
  result = gr.Image(label="Result", show_label=False)
140
  with gr.Accordion("Advanced options", open=False):
141
  with gr.Row():
142
- use_controlnet = gr.Checkbox(label='Use Controlnet', value=False, visible=ENABLE_USE_CONTROLNET)
143
  use_inpainting = gr.Checkbox(label='Use Inpainting', value=False, visible=ENABLE_USE_INPAINTING)
144
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
145
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
@@ -274,13 +323,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
274
  queue=False,
275
  api_name=False,
276
  )
277
- use_controlnet.change(
278
- fn=lambda x: gr.update(visible=x),
279
- inputs=use_controlnet,
280
- outputs=img_url,
281
- queue=False,
282
- api_name=False,
283
- )
284
 
285
  gr.on(
286
  triggers=[
@@ -322,7 +364,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
322
  url,
323
  img_url,
324
  mask_url,
325
- use_controlnet,
326
  ],
327
  outputs=result,
328
  api_name="run",
 
10
  import numpy as np
11
  import spaces
12
  import torch
 
13
  from PIL import Image
14
  from io import BytesIO
15
  from diffusers.utils import load_image
16
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting
17
 
18
  DESCRIPTION = "# Run any LoRA or SD Model"
19
  if not torch.cuda.is_available():
 
27
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
28
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
29
  ENABLE_USE_INPAINTING = os.getenv("ENABLE_USE_INPAINTING", "1") == "1"
 
30
 
31
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
32
 
 
59
  lora_scale: float = 0.7,
60
  use_img2img: bool = False,
61
  use_inpainting: bool = False,
 
62
  url = '',
63
  img_url = '',
64
  mask_url = '',
65
  ):
66
  if torch.cuda.is_available():
67
+
68
+ if not use_img2img:
69
+ pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
70
+
71
+ if use_vae:
72
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
73
+ pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
74
+
75
+ if use_img2img:
76
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
77
 
78
+ if use_vae:
79
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
80
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
81
+
82
+ if use_inpainting:
83
+ pipe = AutoPipelineForInpainting.from_pretrained(model, torch_dtype=torch.float16)
84
 
85
+ response = requests.get(url)
86
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
87
+ init_image = init_image.resize((width, height))
88
+
89
+ image_init = load_image(img_url)
90
+ mask_image = load_image(mask_url)
91
+
92
+ if use_lora:
93
+ pipe.load_lora_weights(lora)
94
+ pipe.fuse_lora(lora_scale)
95
 
96
  if ENABLE_CPU_OFFLOAD:
97
  pipe.enable_model_cpu_offload()
 
111
  if not use_negative_prompt_2:
112
  negative_prompt_2 = None # type: ignore
113
 
114
+ if use_inpainting:
115
  image = pipe(
116
+ prompt=prompt,
117
+ image=image_init,
118
+ mask_image=mask_image,
119
+ strength=strength_img2img,
120
+ negative_prompt=negative_prompt,
121
+ prompt_2=prompt_2,
122
+ width=width,
123
+ height=height,
124
+ negative_prompt_2=negative_prompt_2,
125
+ guidance_scale=guidance_scale_base,
126
+ num_inference_steps=num_inference_steps_base,
127
  generator=generator,
 
 
128
  ).images[0]
129
  return image
130
+ elif use_img2img:
131
+ images = pipe(
132
+ prompt=prompt,
133
+ image=init_image,
134
+ strength=strength_img2img,
135
+ negative_prompt=negative_prompt,
136
+ prompt_2=prompt_2,
137
+ negative_prompt_2=negative_prompt_2,
138
+ width=width,
139
+ height=height,
140
+ guidance_scale=guidance_scale_base,
141
+ num_inference_steps=num_inference_steps_base,
142
+ generator=generator,
143
+ output_type="pil",
144
+ ).images[0]
145
+ return images
146
+ else:
147
+ return pipe(
148
+ prompt=prompt,
149
+ negative_prompt=negative_prompt,
150
+ prompt_2=prompt_2,
151
+ negative_prompt_2=negative_prompt_2,
152
+ width=width,
153
+ height=height,
154
+ guidance_scale=guidance_scale_base,
155
+ num_inference_steps=num_inference_steps_base,
156
+ generator=generator,
157
+ output_type="pil",
158
+ ).images[0]
159
 
160
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
161
  gr.HTML(
 
189
  result = gr.Image(label="Result", show_label=False)
190
  with gr.Accordion("Advanced options", open=False):
191
  with gr.Row():
 
192
  use_inpainting = gr.Checkbox(label='Use Inpainting', value=False, visible=ENABLE_USE_INPAINTING)
193
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
194
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
 
323
  queue=False,
324
  api_name=False,
325
  )
 
 
 
 
 
 
 
326
 
327
  gr.on(
328
  triggers=[
 
364
  url,
365
  img_url,
366
  mask_url,
 
367
  ],
368
  outputs=result,
369
  api_name="run",