Nef Caballero commited on
Commit
be70f8e
·
1 Parent(s): b061f17

fix attempt for HG error 2

Browse files
Files changed (1) hide show
  1. app.py +26 -14
app.py CHANGED
@@ -100,7 +100,6 @@ def add_extra_model_paths() -> None:
100
  add_comfyui_directory_to_sys_path()
101
  add_extra_model_paths()
102
 
103
-
104
  def import_custom_nodes() -> None:
105
  """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
106
  This function sets up a new asyncio event loop, initializes the PromptServer,
@@ -122,18 +121,20 @@ def import_custom_nodes() -> None:
122
  # Initializing custom nodes
123
  init_extra_nodes()
124
 
 
 
125
 
 
126
  from nodes import NODE_CLASS_MAPPINGS
127
 
128
- intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
129
- dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
 
 
 
 
 
130
 
131
- #To be added to `model_loaders` as it loads a model
132
- dualcliploader_357 = dualcliploader.load_clip(
133
- clip_name1="t5/t5xxl_fp16.safetensors",
134
- clip_name2="clip_l.safetensors",
135
- type="flux",
136
- )
137
  cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
138
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
139
  loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
@@ -168,7 +169,7 @@ downloadandloaddepthanythingv2model_437 = (
168
  instructpixtopixconditioning = NODE_CLASS_MAPPINGS[
169
  "InstructPixToPixConditioning"
170
  ]()
171
- text_multiline_454 = text_multiline.text_multiline(text="FLUX_Redux")
172
  clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
173
 
174
  #To be added to `model_loaders` as it loads a model
@@ -197,7 +198,11 @@ saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
197
  imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
198
 
199
  #Add all the models that load a safetensors file
200
- model_loaders = [dualcliploader_357, vaeloader_359, unetloader_358, clipvisionloader_438, stylemodelloader_441, downloadandloaddepthanythingv2model_437]
 
 
 
 
201
 
202
  # Check which models are valid and how to best load them
203
  valid_models = [
@@ -211,7 +216,6 @@ model_management.load_models_gpu(valid_models)
211
 
212
  @spaces.GPU(duration=60)
213
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
214
- import_custom_nodes()
215
  with torch.inference_mode():
216
 
217
  intconstant_83 = intconstant.get_value(value=1024)
@@ -220,8 +224,16 @@ def generate_image(prompt, structure_image, style_image, depth_strength, style_s
220
 
221
  cr_clip_input_switch_319 = cr_clip_input_switch.switch(
222
  Input=1,
223
- clip1=get_value_at_index(dualcliploader_357, 0),
224
- clip2=get_value_at_index(dualcliploader_357, 0),
 
 
 
 
 
 
 
 
225
  )
226
 
227
  cliptextencode_174 = cliptextencode.encode(
 
100
  add_comfyui_directory_to_sys_path()
101
  add_extra_model_paths()
102
 
 
103
  def import_custom_nodes() -> None:
104
  """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
105
  This function sets up a new asyncio event loop, initializes the PromptServer,
 
121
  # Initializing custom nodes
122
  init_extra_nodes()
123
 
124
+ # Initialize nodes before using them
125
+ import_custom_nodes()
126
 
127
+ # Now import and use NODE_CLASS_MAPPINGS
128
  from nodes import NODE_CLASS_MAPPINGS
129
 
130
+ try:
131
+ intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
132
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
133
+ except KeyError as e:
134
+ print(f"Error: Could not find node {e} in NODE_CLASS_MAPPINGS")
135
+ print("Available nodes:", list(NODE_CLASS_MAPPINGS.keys()))
136
+ raise
137
 
 
 
 
 
 
 
138
  cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
139
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
140
  loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
 
169
  instructpixtopixconditioning = NODE_CLASS_MAPPINGS[
170
  "InstructPixToPixConditioning"
171
  ]()
172
+ text_multiline_454 = NODE_CLASS_MAPPINGS["Text Multiline"].text_multiline(text="FLUX_Redux")
173
  clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
174
 
175
  #To be added to `model_loaders` as it loads a model
 
198
  imagecrop = NODE_CLASS_MAPPINGS["ImageCrop+"]()
199
 
200
  #Add all the models that load a safetensors file
201
+ model_loaders = [dualcliploader.load_clip(
202
+ clip_name1="t5/t5xxl_fp16.safetensors",
203
+ clip_name2="clip_l.safetensors",
204
+ type="flux",
205
+ ), vaeloader_359, unetloader_358, clipvisionloader_438, stylemodelloader_441, downloadandloaddepthanythingv2model_437]
206
 
207
  # Check which models are valid and how to best load them
208
  valid_models = [
 
216
 
217
  @spaces.GPU(duration=60)
218
  def generate_image(prompt, structure_image, style_image, depth_strength, style_strength):
 
219
  with torch.inference_mode():
220
 
221
  intconstant_83 = intconstant.get_value(value=1024)
 
224
 
225
  cr_clip_input_switch_319 = cr_clip_input_switch.switch(
226
  Input=1,
227
+ clip1=get_value_at_index(dualcliploader.load_clip(
228
+ clip_name1="t5/t5xxl_fp16.safetensors",
229
+ clip_name2="clip_l.safetensors",
230
+ type="flux",
231
+ ), 0),
232
+ clip2=get_value_at_index(dualcliploader.load_clip(
233
+ clip_name1="t5/t5xxl_fp16.safetensors",
234
+ clip_name2="clip_l.safetensors",
235
+ type="flux",
236
+ ), 0),
237
  )
238
 
239
  cliptextencode_174 = cliptextencode.encode(