Spaces:
Running
on
Zero
Running
on
Zero
add debug msg
Browse files- app.py +1 -0
- omini_control/flux_conceptrol_pipeline.py +6 -3
app.py
CHANGED
@@ -209,6 +209,7 @@ def generate(
|
|
209 |
image=image,
|
210 |
subject=subject,
|
211 |
num_inference_steps=num_inference_steps,
|
|
|
212 |
condition_scale=condition_scale,
|
213 |
control_guidance_start=control_guidance_start,
|
214 |
height=512,
|
|
|
209 |
image=image,
|
210 |
subject=subject,
|
211 |
num_inference_steps=num_inference_steps,
|
212 |
+
guidance_scale=guidance_scale,
|
213 |
condition_scale=condition_scale,
|
214 |
control_guidance_start=control_guidance_start,
|
215 |
height=512,
|
omini_control/flux_conceptrol_pipeline.py
CHANGED
@@ -97,9 +97,7 @@ class FluxConceptrolPipeline(FluxPipeline):
|
|
97 |
return_overflowing_tokens=False,
|
98 |
return_tensors="pt",
|
99 |
)
|
100 |
-
print("
|
101 |
-
print("Sbject Inputs:", subject_inputs)
|
102 |
-
print(self.find_subsequence(text_inputs, subject_inputs))
|
103 |
return self.find_subsequence(text_inputs, subject_inputs)
|
104 |
|
105 |
text_input_ids = text_inputs
|
@@ -196,6 +194,8 @@ class FluxConceptrolPipeline(FluxPipeline):
|
|
196 |
else:
|
197 |
batch_size = prompt_embeds.shape[0]
|
198 |
|
|
|
|
|
199 |
device = self._execution_device
|
200 |
|
201 |
lora_scale = (
|
@@ -292,6 +292,9 @@ class FluxConceptrolPipeline(FluxPipeline):
|
|
292 |
guidance = guidance.expand(latents.shape[0])
|
293 |
else:
|
294 |
guidance = None
|
|
|
|
|
|
|
295 |
noise_pred = tranformer_forward(
|
296 |
self.transformer,
|
297 |
model_config=model_config,
|
|
|
97 |
return_overflowing_tokens=False,
|
98 |
return_tensors="pt",
|
99 |
)
|
100 |
+
print(f"Locate {subject}", self.find_subsequence(text_inputs, subject_inputs))
|
|
|
|
|
101 |
return self.find_subsequence(text_inputs, subject_inputs)
|
102 |
|
103 |
text_input_ids = text_inputs
|
|
|
194 |
else:
|
195 |
batch_size = prompt_embeds.shape[0]
|
196 |
|
197 |
+
print(batch_size)
|
198 |
+
|
199 |
device = self._execution_device
|
200 |
|
201 |
lora_scale = (
|
|
|
292 |
guidance = guidance.expand(latents.shape[0])
|
293 |
else:
|
294 |
guidance = None
|
295 |
+
print("condition_latents.shape:", condition_latents.shape)
|
296 |
+
print("latent.shape:", latents.shape)
|
297 |
+
print("prompt_embeds.shape", prompt_embeds.shape)
|
298 |
noise_pred = tranformer_forward(
|
299 |
self.transformer,
|
300 |
model_config=model_config,
|