Nef Caballero commited on
Commit
dfac101
·
1 Parent(s): 1f02a81

starting over

Browse files
Files changed (1) hide show
  1. app.py +230 -91
app.py CHANGED
@@ -18,14 +18,19 @@ hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp1
18
 
19
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
20
  """Returns the value at the given index of a sequence or mapping.
 
21
  If the object is a sequence (like list or string), returns the value at the given index.
22
  If the object is a mapping (like a dictionary), returns the value at the index-th key.
 
23
  Some return a dictionary, in these cases, we look for the "results" key
 
24
  Args:
25
  obj (Union[Sequence, Mapping]): The object to retrieve the value from.
26
  index (int): The index of the value to retrieve.
 
27
  Returns:
28
  Any: The value at the given index.
 
29
  Raises:
30
  IndexError: If the index is out of bounds for the object and the object is not a mapping.
31
  """
@@ -75,12 +80,6 @@ def add_extra_model_paths() -> None:
75
  """
76
  Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
77
  """
78
- # Ensure custom_nodes directory exists
79
- custom_nodes_path = os.path.join(os.getcwd(), "custom_nodes")
80
- if not os.path.exists(custom_nodes_path):
81
- os.makedirs(custom_nodes_path)
82
- print(f"Created custom_nodes directory at: {custom_nodes_path}")
83
-
84
  try:
85
  from main import load_extra_path_config
86
  except ImportError:
@@ -100,8 +99,10 @@ def add_extra_model_paths() -> None:
100
  add_comfyui_directory_to_sys_path()
101
  add_extra_model_paths()
102
 
 
103
  def import_custom_nodes() -> None:
104
  """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
 
105
  This function sets up a new asyncio event loop, initializes the PromptServer,
106
  creates a PromptQueue, and initializes the custom nodes.
107
  """
@@ -121,108 +122,246 @@ def import_custom_nodes() -> None:
121
  # Initializing custom nodes
122
  init_extra_nodes()
123
 
124
- # Initialize nodes before using them
125
- import_custom_nodes()
126
 
127
- # Now import and use NODE_CLASS_MAPPINGS
128
  from nodes import NODE_CLASS_MAPPINGS
129
 
130
- # Create instances of the nodes we'll use
131
- try:
132
- # Load required models
133
- dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
134
- vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
135
- unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
136
- clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
137
- stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
138
-
139
- # Image processing nodes
140
- loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
141
- imagescale = NODE_CLASS_MAPPINGS["ImageScale"]()
142
- vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
143
- vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]()
144
- saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
145
-
146
- # Conditioning and sampling nodes
147
- cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
148
- ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
149
- emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
150
-
151
- except KeyError as e:
152
- print(f"Error: Could not find node {e} in NODE_CLASS_MAPPINGS")
153
- print("Available nodes:", list(NODE_CLASS_MAPPINGS.keys()))
154
- raise
155
-
156
- # Load all the models that need a safetensors file
157
- model_loaders = [
158
- dualcliploader.load_clip(
159
- clip_name1="t5/t5xxl_fp16.safetensors",
160
- clip_name2="clip_l.safetensors",
161
- type="flux",
162
- ),
163
- vaeloader.load_vae("vae/FLUX1/ae.safetensors"),
164
- unetloader.load_unet("diffusion_models/flux1-depth-dev.safetensors"),
165
- clipvisionloader.load_clip("clip_vision/sigclip_vision_patch14_384.safetensors"),
166
- stylemodelloader.load_style_model("style_models/flux1-redux-dev.safetensors")
167
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
- # Check which models are valid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  valid_models = [
171
- model for model in model_loaders
172
- if model is not None and len(model) > 0
 
173
  ]
174
 
 
 
 
175
  @spaces.GPU(duration=60)
176
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
 
177
  with torch.inference_mode():
178
- # Set up image dimensions
179
- width = 1024
180
- height = 1024
181
-
182
- # Load and process the input images
183
- loaded_structure = loadimage.load_image(structure_image)
184
- loaded_style = loadimage.load_image(style_image)
185
-
186
- # Scale images if needed
187
- scaled_structure = imagescale.upscale(loaded_structure, width, height, "lanczos", "center")
188
- scaled_style = imagescale.upscale(loaded_style, width, height, "lanczos", "center")
189
 
190
- # Create empty latent
191
- latent = emptylatentimage.generate(width, height, 1)
192
-
193
- # Encode the prompt
194
- conditioning = cliptextencode.encode(
195
- clip=get_value_at_index(dualcliploader.load_clip(
196
- clip_name1="t5/t5xxl_fp16.safetensors",
197
- clip_name2="clip_l.safetensors",
198
- type="flux",
199
- ), 0),
200
- text=prompt
 
 
 
 
 
 
201
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- # Sample the image
204
- sampled = ksampler.sample(
205
- model=get_value_at_index(unetloader.load_unet("diffusion_models/flux1-depth-dev.safetensors"), 0),
206
- positive=conditioning,
207
- negative=None,
208
- latent=latent,
209
- seed=random.randint(1, 2**32),
210
- steps=20,
211
- cfg=7.5,
212
- sampler_name="euler",
213
- scheduler="normal",
214
- denoise=1.0,
215
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
- # Decode the latent to image
218
- decoded = vaedecode.decode(
219
- samples=sampled,
220
- vae=get_value_at_index(vaeloader.load_vae("vae/FLUX1/ae.safetensors"), 0)
221
  )
222
 
223
- # Save the final image
224
- saved = saveimage.save_images(decoded)
225
- return saved
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  if __name__ == "__main__":
228
  # Comment out the main() call
 
18
 
19
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
20
  """Returns the value at the given index of a sequence or mapping.
21
+
22
  If the object is a sequence (like list or string), returns the value at the given index.
23
  If the object is a mapping (like a dictionary), returns the value at the index-th key.
24
+
25
  Some return a dictionary, in these cases, we look for the "results" key
26
+
27
  Args:
28
  obj (Union[Sequence, Mapping]): The object to retrieve the value from.
29
  index (int): The index of the value to retrieve.
30
+
31
  Returns:
32
  Any: The value at the given index.
33
+
34
  Raises:
35
  IndexError: If the index is out of bounds for the object and the object is not a mapping.
36
  """
 
80
  """
81
  Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
82
  """
 
 
 
 
 
 
83
  try:
84
  from main import load_extra_path_config
85
  except ImportError:
 
99
  add_comfyui_directory_to_sys_path()
100
  add_extra_model_paths()
101
 
102
+
103
  def import_custom_nodes() -> None:
104
  """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
105
+
106
  This function sets up a new asyncio event loop, initializes the PromptServer,
107
  creates a PromptQueue, and initializes the custom nodes.
108
  """
 
122
  # Initializing custom nodes
123
  init_extra_nodes()
124
 
 
 
125
 
 
126
  from nodes import NODE_CLASS_MAPPINGS
127
 
128
+ intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
129
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
130
+
131
+ #To be added to `model_loaders` as it loads a model
132
+ dualcliploader_357 = dualcliploader.load_clip(
133
+ clip_name1="t5/t5xxl_fp16.safetensors",
134
+ clip_name2="clip_l.safetensors",
135
+ type="flux",
136
+ )
137
+ cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
138
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
139
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
140
+ imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
141
+ getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
142
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
143
+
144
+ #To be added to `model_loaders` as it loads a model
145
+ vaeloader_359 = vaeloader.load_vae(vae_name="FLUX1/ae.safetensors")
146
+
147
+ vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]()
148
+ unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
149
+
150
+ #To be added to `model_loaders` as it loads a model
151
+ unetloader_358 = unetloader.load_unet(
152
+ unet_name="flux1-depth-dev.safetensors", weight_dtype="default"
153
+ )
154
+ ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
155
+ randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
156
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
157
+ depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
158
+ downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS[
159
+ "DownloadAndLoadDepthAnythingV2Model"
160
+ ]()
161
+
162
+ #To be added to `model_loaders` as it loads a model
163
+ downloadandloaddepthanythingv2model_437 = (
164
+ downloadandloaddepthanythingv2model.loadmodel(
165
+ model="depth_anything_v2_vitl_fp32.safetensors"
166
+ )
167
+ )
168
+ instructpixtopixconditioning = NODE_CLASS_MAPPINGS[
169
+ "InstructPixToPixConditioning"
170
+ ]()
171
+ text_multiline_454 = text_multiline.text_multiline(text="FLUX_Redux")
172
+ clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
173
+
174
+ #To be added to `model_loaders` as it loads a model
175
+ clipvisionloader_438 = clipvisionloader.load_clip(
176
+ clip_name="sigclip_vision_patch14_384.safetensors"
177
+ )
178
+ clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
179
+ stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
180
 
181
+ #To be added to `model_loaders` as it loads a model
182
+ stylemodelloader_441 = stylemodelloader.load_style_model(
183
+ style_model_name="flux1-redux-dev.safetensors"
184
+ )
185
+ text_multiline = NODE_CLASS_MAPPINGS["Text Multiline"]()
186
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
187
+ cr_conditioning_input_switch = NODE_CLASS_MAPPINGS[
188
+ "CR Conditioning Input Switch"
189
+ ]()
190
+ cr_model_input_switch = NODE_CLASS_MAPPINGS["CR Model Input Switch"]()
191
+ stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
192
+ basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
193
+ basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
194
+ samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
195
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
196
+ saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
197
+ imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
198
+
199
+ #Add all the models that load a safetensors file
200
+ model_loaders = [dualcliploader_357, vaeloader_359, unetloader_358, clipvisionloader_438, stylemodelloader_441, downloadandloaddepthanythingv2model_437]
201
+
202
+ # Check which models are valid and how to best load them
203
  valid_models = [
204
+ getattr(loader[0], 'patcher', loader[0])
205
+ for loader in model_loaders
206
+ if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
207
  ]
208
 
209
+ #Finally loads the models
210
+ model_management.load_models_gpu(valid_models)
211
+
212
  @spaces.GPU(duration=60)
213
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
214
+ import_custom_nodes()
215
  with torch.inference_mode():
 
 
 
 
 
 
 
 
 
 
 
216
 
217
+ intconstant_83 = intconstant.get_value(value=1024)
218
+
219
+ intconstant_84 = intconstant.get_value(value=1024)
220
+
221
+ cr_clip_input_switch_319 = cr_clip_input_switch.switch(
222
+ Input=1,
223
+ clip1=get_value_at_index(dualcliploader_357, 0),
224
+ clip2=get_value_at_index(dualcliploader_357, 0),
225
+ )
226
+
227
+ cliptextencode_174 = cliptextencode.encode(
228
+ text=prompt,
229
+ clip=get_value_at_index(cr_clip_input_switch_319, 0),
230
+ )
231
+
232
+ cliptextencode_175 = cliptextencode.encode(
233
+ text="purple", clip=get_value_at_index(cr_clip_input_switch_319, 0)
234
  )
235
+
236
+ loadimage_429 = loadimage.load_image(image=structure_image)
237
+
238
+ imageresize_72 = imageresize.execute(
239
+ width=get_value_at_index(intconstant_83, 0),
240
+ height=get_value_at_index(intconstant_84, 0),
241
+ interpolation="bicubic",
242
+ method="keep proportion",
243
+ condition="always",
244
+ multiple_of=16,
245
+ image=get_value_at_index(loadimage_429, 0),
246
+ )
247
+
248
+ getimagesizeandcount_360 = getimagesizeandcount.getsize(
249
+ image=get_value_at_index(imageresize_72, 0)
250
+ )
251
+
252
+ vaeencode_197 = vaeencode.encode(
253
+ pixels=get_value_at_index(getimagesizeandcount_360, 0),
254
+ vae=get_value_at_index(vaeloader_359, 0),
255
+ )
256
+
257
+ ksamplerselect_363 = ksamplerselect.get_sampler(sampler_name="euler")
258
+
259
+ randomnoise_365 = randomnoise.get_noise(noise_seed=random.randint(1, 2**64))
260
+
261
 
262
+ fluxguidance_430 = fluxguidance.append(
263
+ guidance=15, conditioning=get_value_at_index(cliptextencode_174, 0)
 
 
 
 
 
 
 
 
 
 
264
  )
265
+
266
+ depthanything_v2_436 = depthanything_v2.process(
267
+ da_model=get_value_at_index(downloadandloaddepthanythingv2model_437, 0),
268
+ images=get_value_at_index(getimagesizeandcount_360, 0),
269
+ )
270
+
271
+ instructpixtopixconditioning_431 = instructpixtopixconditioning.encode(
272
+ positive=get_value_at_index(fluxguidance_430, 0),
273
+ negative=get_value_at_index(cliptextencode_175, 0),
274
+ vae=get_value_at_index(vaeloader_359, 0),
275
+ pixels=get_value_at_index(depthanything_v2_436, 0),
276
+ )
277
+
278
+ loadimage_440 = loadimage.load_image(image=style_image)
279
 
280
+ clipvisionencode_439 = clipvisionencode.encode(
281
+ crop="center",
282
+ clip_vision=get_value_at_index(clipvisionloader_438, 0),
283
+ image=get_value_at_index(loadimage_440, 0),
284
  )
285
 
286
+
287
+ emptylatentimage_10 = emptylatentimage.generate(
288
+ width=get_value_at_index(imageresize_72, 1),
289
+ height=get_value_at_index(imageresize_72, 2),
290
+ batch_size=1,
291
+ )
292
+
293
+ cr_conditioning_input_switch_271 = cr_conditioning_input_switch.switch(
294
+ Input=1,
295
+ conditioning1=get_value_at_index(instructpixtopixconditioning_431, 0),
296
+ conditioning2=get_value_at_index(instructpixtopixconditioning_431, 0),
297
+ )
298
+
299
+ cr_conditioning_input_switch_272 = cr_conditioning_input_switch.switch(
300
+ Input=1,
301
+ conditioning1=get_value_at_index(instructpixtopixconditioning_431, 1),
302
+ conditioning2=get_value_at_index(instructpixtopixconditioning_431, 1),
303
+ )
304
+
305
+ cr_model_input_switch_320 = cr_model_input_switch.switch(
306
+ Input=1,
307
+ model1=get_value_at_index(unetloader_358, 0),
308
+ model2=get_value_at_index(unetloader_358, 0),
309
+ )
310
+
311
+ stylemodelapplyadvanced_442 = stylemodelapplyadvanced.apply_stylemodel(
312
+ strength=style_strength,
313
+ conditioning=get_value_at_index(instructpixtopixconditioning_431, 0),
314
+ style_model=get_value_at_index(stylemodelloader_441, 0),
315
+ clip_vision_output=get_value_at_index(clipvisionencode_439, 0),
316
+ )
317
+
318
+ basicguider_366 = basicguider.get_guider(
319
+ model=get_value_at_index(cr_model_input_switch_320, 0),
320
+ conditioning=get_value_at_index(stylemodelapplyadvanced_442, 0),
321
+ )
322
+
323
+ basicscheduler_364 = basicscheduler.get_sigmas(
324
+ scheduler="simple",
325
+ steps=28,
326
+ denoise=1,
327
+ model=get_value_at_index(cr_model_input_switch_320, 0),
328
+ )
329
+
330
+ samplercustomadvanced_362 = samplercustomadvanced.sample(
331
+ noise=get_value_at_index(randomnoise_365, 0),
332
+ guider=get_value_at_index(basicguider_366, 0),
333
+ sampler=get_value_at_index(ksamplerselect_363, 0),
334
+ sigmas=get_value_at_index(basicscheduler_364, 0),
335
+ latent_image=get_value_at_index(emptylatentimage_10, 0),
336
+ )
337
+
338
+ vaedecode_321 = vaedecode.decode(
339
+ samples=get_value_at_index(samplercustomadvanced_362, 0),
340
+ vae=get_value_at_index(vaeloader_359, 0),
341
+ )
342
+
343
+ saveimage_327 = saveimage.save_images(
344
+ filename_prefix=get_value_at_index(text_multiline_454, 0),
345
+ images=get_value_at_index(vaedecode_321, 0),
346
+ )
347
+
348
+
349
+ fluxguidance_382 = fluxguidance.append(
350
+ guidance=depth_strength,
351
+ conditioning=get_value_at_index(cr_conditioning_input_switch_272, 0),
352
+ )
353
+
354
+ imagecrop_447 = imagecrop.execute(
355
+ width=2000,
356
+ height=2000,
357
+ position="top-center",
358
+ x_offset=0,
359
+ y_offset=0,
360
+ image=get_value_at_index(loadimage_440, 0),
361
+ )
362
+
363
+ saved_path = f"output/{saveimage_327['ui']['images'][0]['filename']}"
364
+ return saved_path
365
 
366
  if __name__ == "__main__":
367
  # Comment out the main() call