Nef Caballero commited on
Commit
1f02a81
·
1 Parent(s): be70f8e

fix attempt for HG error 3

Browse files
Files changed (1) hide show
  1. app.py +71 -222
app.py CHANGED
@@ -127,253 +127,102 @@ import_custom_nodes()
127
  # Now import and use NODE_CLASS_MAPPINGS
128
  from nodes import NODE_CLASS_MAPPINGS
129
 
 
130
  try:
131
- intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
132
  dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  except KeyError as e:
134
  print(f"Error: Could not find node {e} in NODE_CLASS_MAPPINGS")
135
  print("Available nodes:", list(NODE_CLASS_MAPPINGS.keys()))
136
  raise
137
 
138
- cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
139
- cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
140
- loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
141
- imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
142
- getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
143
- vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
144
-
145
- #To be added to `model_loaders` as it loads a model
146
- vaeloader_359 = vaeloader.load_vae(vae_name="FLUX1/ae.safetensors")
147
-
148
- vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]()
149
- unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
150
-
151
- #To be added to `model_loaders` as it loads a model
152
- unetloader_358 = unetloader.load_unet(
153
- unet_name="flux1-depth-dev.safetensors", weight_dtype="default"
154
- )
155
- ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
156
- randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
157
- fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
158
- depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
159
- downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS[
160
- "DownloadAndLoadDepthAnythingV2Model"
161
- ]()
162
-
163
- #To be added to `model_loaders` as it loads a model
164
- downloadandloaddepthanythingv2model_437 = (
165
- downloadandloaddepthanythingv2model.loadmodel(
166
- model="depth_anything_v2_vitl_fp32.safetensors"
167
- )
168
- )
169
- instructpixtopixconditioning = NODE_CLASS_MAPPINGS[
170
- "InstructPixToPixConditioning"
171
- ]()
172
- text_multiline_454 = NODE_CLASS_MAPPINGS["Text Multiline"].text_multiline(text="FLUX_Redux")
173
- clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
174
-
175
- #To be added to `model_loaders` as it loads a model
176
- clipvisionloader_438 = clipvisionloader.load_clip(
177
- clip_name="sigclip_vision_patch14_384.safetensors"
178
- )
179
- clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
180
- stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
181
-
182
- #To be added to `model_loaders` as it loads a model
183
- stylemodelloader_441 = stylemodelloader.load_style_model(
184
- style_model_name="flux1-redux-dev.safetensors"
185
- )
186
- text_multiline = NODE_CLASS_MAPPINGS["Text Multiline"]()
187
- emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
188
- cr_conditioning_input_switch = NODE_CLASS_MAPPINGS[
189
- "CR Conditioning Input Switch"
190
- ]()
191
- cr_model_input_switch = NODE_CLASS_MAPPINGS["CR Model Input Switch"]()
192
- stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
193
- basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
194
- basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
195
- samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
196
- vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
197
- saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
198
- imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
199
-
200
- #Add all the models that load a safetensors file
201
- model_loaders = [dualcliploader.load_clip(
202
- clip_name1="t5/t5xxl_fp16.safetensors",
203
- clip_name2="clip_l.safetensors",
204
- type="flux",
205
- ), vaeloader_359, unetloader_358, clipvisionloader_438, stylemodelloader_441, downloadandloaddepthanythingv2model_437]
206
 
207
- # Check which models are valid and how to best load them
208
  valid_models = [
209
- getattr(loader[0], 'patcher', loader[0])
210
- for loader in model_loaders
211
- if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
212
  ]
213
 
214
- #Finally loads the models
215
- model_management.load_models_gpu(valid_models)
216
-
217
  @spaces.GPU(duration=60)
218
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
219
  with torch.inference_mode():
 
 
 
220
 
221
- intconstant_83 = intconstant.get_value(value=1024)
222
-
223
- intconstant_84 = intconstant.get_value(value=1024)
224
-
225
- cr_clip_input_switch_319 = cr_clip_input_switch.switch(
226
- Input=1,
227
- clip1=get_value_at_index(dualcliploader.load_clip(
228
- clip_name1="t5/t5xxl_fp16.safetensors",
229
- clip_name2="clip_l.safetensors",
230
- type="flux",
231
- ), 0),
232
- clip2=get_value_at_index(dualcliploader.load_clip(
 
 
233
  clip_name1="t5/t5xxl_fp16.safetensors",
234
  clip_name2="clip_l.safetensors",
235
  type="flux",
236
  ), 0),
 
237
  )
238
-
239
- cliptextencode_174 = cliptextencode.encode(
240
- text=prompt,
241
- clip=get_value_at_index(cr_clip_input_switch_319, 0),
242
- )
243
-
244
- cliptextencode_175 = cliptextencode.encode(
245
- text="purple", clip=get_value_at_index(cr_clip_input_switch_319, 0)
246
- )
247
-
248
- loadimage_429 = loadimage.load_image(image=structure_image)
249
-
250
- imageresize_72 = imageresize.execute(
251
- width=get_value_at_index(intconstant_83, 0),
252
- height=get_value_at_index(intconstant_84, 0),
253
- interpolation="bicubic",
254
- method="keep proportion",
255
- condition="always",
256
- multiple_of=16,
257
- image=get_value_at_index(loadimage_429, 0),
258
- )
259
-
260
- getimagesizeandcount_360 = getimagesizeandcount.getsize(
261
- image=get_value_at_index(imageresize_72, 0)
262
- )
263
-
264
- vaeencode_197 = vaeencode.encode(
265
- pixels=get_value_at_index(getimagesizeandcount_360, 0),
266
- vae=get_value_at_index(vaeloader_359, 0),
267
- )
268
-
269
- ksamplerselect_363 = ksamplerselect.get_sampler(sampler_name="euler")
270
-
271
- randomnoise_365 = randomnoise.get_noise(noise_seed=random.randint(1, 2**64))
272
-
273
 
274
- fluxguidance_430 = fluxguidance.append(
275
- guidance=15, conditioning=get_value_at_index(cliptextencode_174, 0)
 
 
 
 
 
 
 
 
 
 
276
  )
277
-
278
- depthanything_v2_436 = depthanything_v2.process(
279
- da_model=get_value_at_index(downloadandloaddepthanythingv2model_437, 0),
280
- images=get_value_at_index(getimagesizeandcount_360, 0),
281
- )
282
-
283
- instructpixtopixconditioning_431 = instructpixtopixconditioning.encode(
284
- positive=get_value_at_index(fluxguidance_430, 0),
285
- negative=get_value_at_index(cliptextencode_175, 0),
286
- vae=get_value_at_index(vaeloader_359, 0),
287
- pixels=get_value_at_index(depthanything_v2_436, 0),
288
- )
289
-
290
- loadimage_440 = loadimage.load_image(image=style_image)
291
 
292
- clipvisionencode_439 = clipvisionencode.encode(
293
- crop="center",
294
- clip_vision=get_value_at_index(clipvisionloader_438, 0),
295
- image=get_value_at_index(loadimage_440, 0),
296
  )
297
 
298
-
299
- emptylatentimage_10 = emptylatentimage.generate(
300
- width=get_value_at_index(imageresize_72, 1),
301
- height=get_value_at_index(imageresize_72, 2),
302
- batch_size=1,
303
- )
304
-
305
- cr_conditioning_input_switch_271 = cr_conditioning_input_switch.switch(
306
- Input=1,
307
- conditioning1=get_value_at_index(instructpixtopixconditioning_431, 0),
308
- conditioning2=get_value_at_index(instructpixtopixconditioning_431, 0),
309
- )
310
-
311
- cr_conditioning_input_switch_272 = cr_conditioning_input_switch.switch(
312
- Input=1,
313
- conditioning1=get_value_at_index(instructpixtopixconditioning_431, 1),
314
- conditioning2=get_value_at_index(instructpixtopixconditioning_431, 1),
315
- )
316
-
317
- cr_model_input_switch_320 = cr_model_input_switch.switch(
318
- Input=1,
319
- model1=get_value_at_index(unetloader_358, 0),
320
- model2=get_value_at_index(unetloader_358, 0),
321
- )
322
-
323
- stylemodelapplyadvanced_442 = stylemodelapplyadvanced.apply_stylemodel(
324
- strength=style_strength,
325
- conditioning=get_value_at_index(instructpixtopixconditioning_431, 0),
326
- style_model=get_value_at_index(stylemodelloader_441, 0),
327
- clip_vision_output=get_value_at_index(clipvisionencode_439, 0),
328
- )
329
-
330
- basicguider_366 = basicguider.get_guider(
331
- model=get_value_at_index(cr_model_input_switch_320, 0),
332
- conditioning=get_value_at_index(stylemodelapplyadvanced_442, 0),
333
- )
334
-
335
- basicscheduler_364 = basicscheduler.get_sigmas(
336
- scheduler="simple",
337
- steps=28,
338
- denoise=1,
339
- model=get_value_at_index(cr_model_input_switch_320, 0),
340
- )
341
-
342
- samplercustomadvanced_362 = samplercustomadvanced.sample(
343
- noise=get_value_at_index(randomnoise_365, 0),
344
- guider=get_value_at_index(basicguider_366, 0),
345
- sampler=get_value_at_index(ksamplerselect_363, 0),
346
- sigmas=get_value_at_index(basicscheduler_364, 0),
347
- latent_image=get_value_at_index(emptylatentimage_10, 0),
348
- )
349
-
350
- vaedecode_321 = vaedecode.decode(
351
- samples=get_value_at_index(samplercustomadvanced_362, 0),
352
- vae=get_value_at_index(vaeloader_359, 0),
353
- )
354
-
355
- saveimage_327 = saveimage.save_images(
356
- filename_prefix=get_value_at_index(text_multiline_454, 0),
357
- images=get_value_at_index(vaedecode_321, 0),
358
- )
359
-
360
-
361
- fluxguidance_382 = fluxguidance.append(
362
- guidance=depth_strength,
363
- conditioning=get_value_at_index(cr_conditioning_input_switch_272, 0),
364
- )
365
-
366
- imagecrop_447 = imagecrop.execute(
367
- width=2000,
368
- height=2000,
369
- position="top-center",
370
- x_offset=0,
371
- y_offset=0,
372
- image=get_value_at_index(loadimage_440, 0),
373
- )
374
-
375
- saved_path = f"output/{saveimage_327['ui']['images'][0]['filename']}"
376
- return saved_path
377
 
378
  if __name__ == "__main__":
379
  # Comment out the main() call
 
127
  # Now import and use NODE_CLASS_MAPPINGS
128
  from nodes import NODE_CLASS_MAPPINGS
129
 
130
+ # Create instances of the nodes we'll use
131
  try:
132
+ # Load required models
133
  dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
134
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
135
+ unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
136
+ clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
137
+ stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
138
+
139
+ # Image processing nodes
140
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
141
+ imagescale = NODE_CLASS_MAPPINGS["ImageScale"]()
142
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
143
+ vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]()
144
+ saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
145
+
146
+ # Conditioning and sampling nodes
147
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
148
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
149
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
150
+
151
  except KeyError as e:
152
  print(f"Error: Could not find node {e} in NODE_CLASS_MAPPINGS")
153
  print("Available nodes:", list(NODE_CLASS_MAPPINGS.keys()))
154
  raise
155
 
156
+ # Load all the models that need a safetensors file
157
+ model_loaders = [
158
+ dualcliploader.load_clip(
159
+ clip_name1="t5/t5xxl_fp16.safetensors",
160
+ clip_name2="clip_l.safetensors",
161
+ type="flux",
162
+ ),
163
+ vaeloader.load_vae("vae/FLUX1/ae.safetensors"),
164
+ unetloader.load_unet("diffusion_models/flux1-depth-dev.safetensors"),
165
+ clipvisionloader.load_clip("clip_vision/sigclip_vision_patch14_384.safetensors"),
166
+ stylemodelloader.load_style_model("style_models/flux1-redux-dev.safetensors")
167
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
+ # Check which models are valid
170
  valid_models = [
171
+ model for model in model_loaders
172
+ if model is not None and len(model) > 0
 
173
  ]
174
 
 
 
 
175
  @spaces.GPU(duration=60)
176
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
177
  with torch.inference_mode():
178
+ # Set up image dimensions
179
+ width = 1024
180
+ height = 1024
181
 
182
+ # Load and process the input images
183
+ loaded_structure = loadimage.load_image(structure_image)
184
+ loaded_style = loadimage.load_image(style_image)
185
+
186
+ # Scale images if needed
187
+ scaled_structure = imagescale.upscale(loaded_structure, width, height, "lanczos", "center")
188
+ scaled_style = imagescale.upscale(loaded_style, width, height, "lanczos", "center")
189
+
190
+ # Create empty latent
191
+ latent = emptylatentimage.generate(width, height, 1)
192
+
193
+ # Encode the prompt
194
+ conditioning = cliptextencode.encode(
195
+ clip=get_value_at_index(dualcliploader.load_clip(
196
  clip_name1="t5/t5xxl_fp16.safetensors",
197
  clip_name2="clip_l.safetensors",
198
  type="flux",
199
  ), 0),
200
+ text=prompt
201
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
+ # Sample the image
204
+ sampled = ksampler.sample(
205
+ model=get_value_at_index(unetloader.load_unet("diffusion_models/flux1-depth-dev.safetensors"), 0),
206
+ positive=conditioning,
207
+ negative=None,
208
+ latent=latent,
209
+ seed=random.randint(1, 2**32),
210
+ steps=20,
211
+ cfg=7.5,
212
+ sampler_name="euler",
213
+ scheduler="normal",
214
+ denoise=1.0,
215
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
 
217
+ # Decode the latent to image
218
+ decoded = vaedecode.decode(
219
+ samples=sampled,
220
+ vae=get_value_at_index(vaeloader.load_vae("vae/FLUX1/ae.safetensors"), 0)
221
  )
222
 
223
+ # Save the final image
224
+ saved = saveimage.save_images(decoded)
225
+ return saved
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  if __name__ == "__main__":
228
  # Comment out the main() call