radames commited on
Commit
decd923
·
1 Parent(s): 1d1e539

depth hyper SD

Browse files
server/config.py CHANGED
@@ -31,7 +31,7 @@ MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
31
  TIMEOUT = float(os.environ.get("TIMEOUT", 0))
32
  SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None) == "True"
33
  TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None) == "True"
34
- USE_TAESD = os.environ.get("USE_TAESD", "True") == "True"
35
  default_host = os.getenv("HOST", "0.0.0.0")
36
  default_port = int(os.getenv("PORT", "7860"))
37
 
@@ -67,12 +67,6 @@ parser.add_argument(
67
  action="store_true",
68
  help="Use Tiny Autoencoder",
69
  )
70
- parser.add_argument(
71
- "--no-taesd",
72
- dest="taesd",
73
- action="store_false",
74
- help="Use Tiny Autoencoder",
75
- )
76
  parser.add_argument(
77
  "--pipeline",
78
  type=str,
 
31
  TIMEOUT = float(os.environ.get("TIMEOUT", 0))
32
  SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None) == "True"
33
  TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None) == "True"
34
+ USE_TAESD = os.environ.get("USE_TAESD", "False") == "True"
35
  default_host = os.getenv("HOST", "0.0.0.0")
36
  default_port = int(os.getenv("PORT", "7860"))
37
 
 
67
  action="store_true",
68
  help="Use Tiny Autoencoder",
69
  )
 
 
 
 
 
 
70
  parser.add_argument(
71
  "--pipeline",
72
  type=str,
server/pipelines/controlnetDepthHyperSD.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ StableDiffusionControlNetImg2ImgPipeline,
3
+ ControlNetModel,
4
+ AutoencoderTiny,
5
+ TCDScheduler,
6
+ )
7
+ from compel import Compel, ReturnedEmbeddingsType
8
+ import torch
9
+ from huggingface_hub import hf_hub_download
10
+
11
+ from transformers import pipeline
12
+
13
+ try:
14
+ import intel_extension_for_pytorch as ipex # type: ignore
15
+ except:
16
+ pass
17
+
18
+ from config import Args
19
+ from pydantic import BaseModel, Field
20
+ from PIL import Image
21
+ import math
22
+
23
+ controlnet_model = "lllyasviel/control_v11f1p_sd15_depth"
24
+ model_id = "runwayml/stable-diffusion-v1-5"
25
+ taesd_model = "madebyollin/taesd"
26
+
27
+ default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
28
+ default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
29
+ page_content = """
30
+ <h1 class="text-3xl font-bold">Hyper-SD Unified + Depth</h1>
31
+ <h3 class="text-xl font-bold">Image-to-Image ControlNet</h3>
32
+
33
+ """
34
+
35
+
36
+ class Pipeline:
37
+ class Info(BaseModel):
38
+ name: str = "controlnet+SDXL+Turbo"
39
+ title: str = "SDXL Turbo + Controlnet"
40
+ description: str = "Generates an image from a text prompt"
41
+ input_mode: str = "image"
42
+ page_content: str = page_content
43
+
44
+ class InputParams(BaseModel):
45
+ prompt: str = Field(
46
+ default_prompt,
47
+ title="Prompt",
48
+ field="textarea",
49
+ id="prompt",
50
+ )
51
+ negative_prompt: str = Field(
52
+ default_negative_prompt,
53
+ title="Negative Prompt",
54
+ field="textarea",
55
+ id="negative_prompt",
56
+ hide=True,
57
+ )
58
+ seed: int = Field(
59
+ 2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
60
+ )
61
+ steps: int = Field(
62
+ 2, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
63
+ )
64
+ width: int = Field(
65
+ 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
66
+ )
67
+ height: int = Field(
68
+ 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
69
+ )
70
+ guidance_scale: float = Field(
71
+ 0.0,
72
+ min=0,
73
+ max=10,
74
+ step=0.001,
75
+ title="Guidance Scale",
76
+ field="range",
77
+ hide=True,
78
+ id="guidance_scale",
79
+ )
80
+ strength: float = Field(
81
+ 0.5,
82
+ min=0.25,
83
+ max=1.0,
84
+ step=0.001,
85
+ title="Strength",
86
+ field="range",
87
+ hide=True,
88
+ id="strength",
89
+ )
90
+ eta: float = Field(
91
+ 1.0,
92
+ min=0,
93
+ max=1.0,
94
+ step=0.001,
95
+ title="Eta",
96
+ field="range",
97
+ hide=True,
98
+ id="eta",
99
+ )
100
+ controlnet_scale: float = Field(
101
+ 0.5,
102
+ min=0,
103
+ max=1.0,
104
+ step=0.001,
105
+ title="Controlnet Scale",
106
+ field="range",
107
+ hide=True,
108
+ id="controlnet_scale",
109
+ )
110
+ controlnet_start: float = Field(
111
+ 0.0,
112
+ min=0,
113
+ max=1.0,
114
+ step=0.001,
115
+ title="Controlnet Start",
116
+ field="range",
117
+ hide=True,
118
+ id="controlnet_start",
119
+ )
120
+ controlnet_end: float = Field(
121
+ 1.0,
122
+ min=0,
123
+ max=1.0,
124
+ step=0.001,
125
+ title="Controlnet End",
126
+ field="range",
127
+ hide=True,
128
+ id="controlnet_end",
129
+ )
130
+ debug_depth: bool = Field(
131
+ False,
132
+ title="Debug Depth",
133
+ field="checkbox",
134
+ hide=True,
135
+ id="debug_depth",
136
+ )
137
+
138
+ def __init__(self, args: Args, device: torch.device, torch_dtype: torch.dtype):
139
+ controlnet_depth = ControlNetModel.from_pretrained(
140
+ controlnet_model, torch_dtype=torch_dtype
141
+ )
142
+
143
+ self.depth_estimator = pipeline(
144
+ task="depth-estimation",
145
+ # model="Intel/dpt-swinv2-large-384",
146
+ # model="Intel/dpt-swinv2-base-384",
147
+ model="LiheYoung/depth-anything-small-hf",
148
+ device=device,
149
+ )
150
+
151
+ if args.safety_checker:
152
+ self.pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
153
+ model_id, controlnet=controlnet_depth, torch_dtype=torch_dtype
154
+ )
155
+ else:
156
+ self.pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
157
+ model_id,
158
+ safety_checker=None,
159
+ controlnet=controlnet_depth,
160
+ torch_dtype=torch_dtype,
161
+ )
162
+
163
+ if args.taesd:
164
+ self.pipe.vae = AutoencoderTiny.from_pretrained(
165
+ taesd_model, torch_dtype=torch_dtype
166
+ )
167
+
168
+ self.pipe.load_lora_weights(
169
+ hf_hub_download("ByteDance/Hyper-SD", "Hyper-SD15-1step-lora.safetensors")
170
+ )
171
+
172
+ self.pipe.scheduler = TCDScheduler.from_config(self.pipe.scheduler.config)
173
+
174
+ self.pipe.fuse_lora()
175
+
176
+ if args.sfast:
177
+ from sfast.compilers.stable_diffusion_pipeline_compiler import (
178
+ compile,
179
+ CompilationConfig,
180
+ )
181
+
182
+ config = CompilationConfig.Default()
183
+ # config.enable_xformers = True
184
+ config.enable_triton = True
185
+ config.enable_cuda_graph = True
186
+ self.pipe = compile(self.pipe, config=config)
187
+
188
+ self.pipe.set_progress_bar_config(disable=True)
189
+ self.pipe.to(device=device)
190
+ if device.type != "mps":
191
+ self.pipe.unet.to(memory_format=torch.channels_last)
192
+
193
+ if args.compel:
194
+ self.pipe.compel_proc = Compel(
195
+ tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],
196
+ text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],
197
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
198
+ requires_pooled=[False, True],
199
+ )
200
+
201
+ if args.torch_compile:
202
+ self.pipe.unet = torch.compile(
203
+ self.pipe.unet, mode="reduce-overhead", fullgraph=True
204
+ )
205
+ self.pipe.vae = torch.compile(
206
+ self.pipe.vae, mode="reduce-overhead", fullgraph=True
207
+ )
208
+ self.pipe(
209
+ prompt="warmup",
210
+ image=[Image.new("RGB", (768, 768))],
211
+ control_image=[Image.new("RGB", (768, 768))],
212
+ )
213
+
214
+ def predict(self, params: "Pipeline.InputParams") -> Image.Image:
215
+ generator = torch.manual_seed(params.seed)
216
+
217
+ prompt = params.prompt
218
+ negative_prompt = params.negative_prompt
219
+ prompt_embeds = None
220
+ pooled_prompt_embeds = None
221
+ negative_prompt_embeds = None
222
+ negative_pooled_prompt_embeds = None
223
+ if hasattr(self.pipe, "compel_proc"):
224
+ _prompt_embeds, pooled_prompt_embeds = self.pipe.compel_proc(
225
+ [params.prompt, params.negative_prompt]
226
+ )
227
+ prompt = None
228
+ negative_prompt = None
229
+ prompt_embeds = _prompt_embeds[0:1]
230
+ pooled_prompt_embeds = pooled_prompt_embeds[0:1]
231
+ negative_prompt_embeds = _prompt_embeds[1:2]
232
+ negative_pooled_prompt_embeds = pooled_prompt_embeds[1:2]
233
+
234
+ control_image = self.depth_estimator(params.image)["depth"]
235
+ steps = params.steps
236
+ strength = params.strength
237
+ if int(steps * strength) < 1:
238
+ steps = math.ceil(1 / max(0.10, strength))
239
+
240
+ results = self.pipe(
241
+ image=params.image,
242
+ control_image=control_image,
243
+ prompt=prompt,
244
+ negative_prompt=negative_prompt,
245
+ prompt_embeds=prompt_embeds,
246
+ pooled_prompt_embeds=pooled_prompt_embeds,
247
+ negative_prompt_embeds=negative_prompt_embeds,
248
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
249
+ generator=generator,
250
+ strength=strength,
251
+ eta=params.eta,
252
+ num_inference_steps=steps,
253
+ guidance_scale=params.guidance_scale,
254
+ width=params.width,
255
+ height=params.height,
256
+ output_type="pil",
257
+ controlnet_conditioning_scale=params.controlnet_scale,
258
+ control_guidance_start=params.controlnet_start,
259
+ control_guidance_end=params.controlnet_end,
260
+ )
261
+
262
+ nsfw_content_detected = (
263
+ results.nsfw_content_detected[0]
264
+ if "nsfw_content_detected" in results
265
+ else False
266
+ )
267
+ if nsfw_content_detected:
268
+ return None
269
+ result_image = results.images[0]
270
+ if params.debug_depth:
271
+ # paste control_image on top of result_image
272
+ w0, h0 = (200, 200)
273
+ control_image = control_image.resize((w0, h0))
274
+ w1, h1 = result_image.size
275
+ result_image.paste(control_image, (w1 - w0, h1 - h0))
276
+
277
+ return result_image
server/pipelines/controlnetDepthHyperSDXL.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ StableDiffusionXLControlNetImg2ImgPipeline,
3
+ ControlNetModel,
4
+ AutoencoderKL,
5
+ TCDScheduler,
6
+ )
7
+ from compel import Compel, ReturnedEmbeddingsType
8
+ import torch
9
+ from huggingface_hub import hf_hub_download
10
+
11
+ from transformers import pipeline
12
+
13
+ try:
14
+ import intel_extension_for_pytorch as ipex # type: ignore
15
+ except:
16
+ pass
17
+
18
+ import psutil
19
+ from config import Args
20
+ from pydantic import BaseModel, Field
21
+ from PIL import Image
22
+ import math
23
+
24
+ controlnet_model = "diffusers/controlnet-depth-sdxl-1.0"
25
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
26
+ taesd_model = "madebyollin/taesdxl"
27
+
28
+ default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
29
+ default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
30
+ page_content = """
31
+ <h1 class="text-3xl font-bold">Hyper-SDXL Unified</h1>
32
+ <h3 class="text-xl font-bold">Image-to-Image ControlNet</h3>
33
+
34
+ """
35
+
36
+
37
+ class Pipeline:
38
+ class Info(BaseModel):
39
+ name: str = "controlnet+SDXL+Turbo"
40
+ title: str = "SDXL Turbo + Controlnet"
41
+ description: str = "Generates an image from a text prompt"
42
+ input_mode: str = "image"
43
+ page_content: str = page_content
44
+
45
+ class InputParams(BaseModel):
46
+ prompt: str = Field(
47
+ default_prompt,
48
+ title="Prompt",
49
+ field="textarea",
50
+ id="prompt",
51
+ )
52
+ negative_prompt: str = Field(
53
+ default_negative_prompt,
54
+ title="Negative Prompt",
55
+ field="textarea",
56
+ id="negative_prompt",
57
+ hide=True,
58
+ )
59
+ seed: int = Field(
60
+ 2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
61
+ )
62
+ steps: int = Field(
63
+ 2, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
64
+ )
65
+ width: int = Field(
66
+ 1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
67
+ )
68
+ height: int = Field(
69
+ 1024, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
70
+ )
71
+ guidance_scale: float = Field(
72
+ 0.0,
73
+ min=0,
74
+ max=10,
75
+ step=0.001,
76
+ title="Guidance Scale",
77
+ field="range",
78
+ hide=True,
79
+ id="guidance_scale",
80
+ )
81
+ strength: float = Field(
82
+ 0.5,
83
+ min=0.25,
84
+ max=1.0,
85
+ step=0.001,
86
+ title="Strength",
87
+ field="range",
88
+ hide=True,
89
+ id="strength",
90
+ )
91
+ eta: float = Field(
92
+ 1.0,
93
+ min=0,
94
+ max=1.0,
95
+ step=0.001,
96
+ title="Eta",
97
+ field="range",
98
+ hide=True,
99
+ id="eta",
100
+ )
101
+ controlnet_scale: float = Field(
102
+ 0.5,
103
+ min=0,
104
+ max=1.0,
105
+ step=0.001,
106
+ title="Controlnet Scale",
107
+ field="range",
108
+ hide=True,
109
+ id="controlnet_scale",
110
+ )
111
+ controlnet_start: float = Field(
112
+ 0.0,
113
+ min=0,
114
+ max=1.0,
115
+ step=0.001,
116
+ title="Controlnet Start",
117
+ field="range",
118
+ hide=True,
119
+ id="controlnet_start",
120
+ )
121
+ controlnet_end: float = Field(
122
+ 1.0,
123
+ min=0,
124
+ max=1.0,
125
+ step=0.001,
126
+ title="Controlnet End",
127
+ field="range",
128
+ hide=True,
129
+ id="controlnet_end",
130
+ )
131
+ debug_depth: bool = Field(
132
+ False,
133
+ title="Debug Depth",
134
+ field="checkbox",
135
+ hide=True,
136
+ id="debug_depth",
137
+ )
138
+
139
+ def __init__(self, args: Args, device: torch.device, torch_dtype: torch.dtype):
140
+ controlnet_depth = ControlNetModel.from_pretrained(
141
+ controlnet_model, torch_dtype=torch_dtype
142
+ )
143
+
144
+ self.depth_estimator = pipeline(
145
+ task="depth-estimation",
146
+ model="Intel/dpt-swinv2-base-384",
147
+ device=device,
148
+ )
149
+
150
+ vae = AutoencoderKL.from_pretrained(
151
+ "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype
152
+ )
153
+
154
+ if args.safety_checker:
155
+ self.pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
156
+ model_id, controlnet=controlnet_depth, vae=vae, torch_dtype=torch_dtype
157
+ )
158
+ else:
159
+ self.pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
160
+ model_id,
161
+ safety_checker=None,
162
+ controlnet=controlnet_depth,
163
+ vae=vae,
164
+ torch_dtype=torch_dtype,
165
+ )
166
+
167
+ self.pipe.load_lora_weights(
168
+ hf_hub_download("ByteDance/Hyper-SD", "Hyper-SDXL-1step-lora.safetensors")
169
+ )
170
+
171
+ self.pipe.scheduler = TCDScheduler.from_config(self.pipe.scheduler.config)
172
+
173
+ self.pipe.fuse_lora()
174
+
175
+ if args.sfast:
176
+ from sfast.compilers.stable_diffusion_pipeline_compiler import (
177
+ compile,
178
+ CompilationConfig,
179
+ )
180
+
181
+ config = CompilationConfig.Default()
182
+ # config.enable_xformers = True
183
+ config.enable_triton = True
184
+ config.enable_cuda_graph = True
185
+ self.pipe = compile(self.pipe, config=config)
186
+
187
+ self.pipe.set_progress_bar_config(disable=True)
188
+ self.pipe.to(device=device)
189
+ if device.type != "mps":
190
+ self.pipe.unet.to(memory_format=torch.channels_last)
191
+
192
+ if args.compel:
193
+ self.pipe.compel_proc = Compel(
194
+ tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],
195
+ text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],
196
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
197
+ requires_pooled=[False, True],
198
+ )
199
+
200
+ if args.torch_compile:
201
+ self.pipe.unet = torch.compile(
202
+ self.pipe.unet, mode="reduce-overhead", fullgraph=True
203
+ )
204
+ self.pipe.vae = torch.compile(
205
+ self.pipe.vae, mode="reduce-overhead", fullgraph=True
206
+ )
207
+ self.pipe(
208
+ prompt="warmup",
209
+ image=[Image.new("RGB", (768, 768))],
210
+ control_image=[Image.new("RGB", (768, 768))],
211
+ )
212
+
213
+ def predict(self, params: "Pipeline.InputParams") -> Image.Image:
214
+ generator = torch.manual_seed(params.seed)
215
+
216
+ prompt = params.prompt
217
+ negative_prompt = params.negative_prompt
218
+ prompt_embeds = None
219
+ pooled_prompt_embeds = None
220
+ negative_prompt_embeds = None
221
+ negative_pooled_prompt_embeds = None
222
+ if hasattr(self.pipe, "compel_proc"):
223
+ _prompt_embeds, pooled_prompt_embeds = self.pipe.compel_proc(
224
+ [params.prompt, params.negative_prompt]
225
+ )
226
+ prompt = None
227
+ negative_prompt = None
228
+ prompt_embeds = _prompt_embeds[0:1]
229
+ pooled_prompt_embeds = pooled_prompt_embeds[0:1]
230
+ negative_prompt_embeds = _prompt_embeds[1:2]
231
+ negative_pooled_prompt_embeds = pooled_prompt_embeds[1:2]
232
+
233
+ control_image = self.depth_estimator(params.image)["depth"]
234
+ steps = params.steps
235
+ strength = params.strength
236
+ if int(steps * strength) < 1:
237
+ steps = math.ceil(1 / max(0.10, strength))
238
+
239
+ results = self.pipe(
240
+ image=params.image,
241
+ control_image=control_image,
242
+ prompt=prompt,
243
+ negative_prompt=negative_prompt,
244
+ prompt_embeds=prompt_embeds,
245
+ pooled_prompt_embeds=pooled_prompt_embeds,
246
+ negative_prompt_embeds=negative_prompt_embeds,
247
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
248
+ generator=generator,
249
+ strength=strength,
250
+ eta=params.eta,
251
+ num_inference_steps=steps,
252
+ guidance_scale=params.guidance_scale,
253
+ width=params.width,
254
+ height=params.height,
255
+ output_type="pil",
256
+ controlnet_conditioning_scale=params.controlnet_scale,
257
+ control_guidance_start=params.controlnet_start,
258
+ control_guidance_end=params.controlnet_end,
259
+ )
260
+
261
+ nsfw_content_detected = (
262
+ results.nsfw_content_detected[0]
263
+ if "nsfw_content_detected" in results
264
+ else False
265
+ )
266
+ if nsfw_content_detected:
267
+ return None
268
+ result_image = results.images[0]
269
+ if params.debug_depth:
270
+ # paste control_image on top of result_image
271
+ w0, h0 = (200, 200)
272
+ control_image = control_image.resize((w0, h0))
273
+ w1, h1 = result_image.size
274
+ result_image.paste(control_image, (w1 - w0, h1 - h0))
275
+
276
+ return result_image