JUNJIE99 commited on
Commit
89dccbf
·
verified ·
1 Parent(s): e8000f1

Upload folder using huggingface_hub

Browse files
modeling_llavanext_for_embedding.py CHANGED
@@ -305,7 +305,6 @@ class LLaVANextForEmbedding(LlavaNextForConditionalGeneration):
305
  text_input = self.prepare_text_input(images, text, q_or_c, task_instruction)
306
  text_input = [text_input]
307
 
308
- print(text_input)
309
 
310
  if images is not None:
311
  images = Image.open(images).resize((512,512)).convert("RGB")
@@ -315,10 +314,10 @@ class LLaVANextForEmbedding(LlavaNextForConditionalGeneration):
315
  inputs = self.processor(text=text_input, return_tensors="pt", padding=True)
316
 
317
  else:
 
 
318
  text_input = [self.prepare_text_input(_image, _text, q_or_c, task_instruction) for _image, _text in zip(images, text)]
319
 
320
- print(text_input)
321
-
322
  if images is not None:
323
  images = [Image.open(_image).resize((512,512)).convert("RGB") for _image in images]
324
  inputs = self.processor(images=images, text=text_input, return_tensors="pt", padding=True)
 
305
  text_input = self.prepare_text_input(images, text, q_or_c, task_instruction)
306
  text_input = [text_input]
307
 
 
308
 
309
  if images is not None:
310
  images = Image.open(images).resize((512,512)).convert("RGB")
 
314
  inputs = self.processor(text=text_input, return_tensors="pt", padding=True)
315
 
316
  else:
317
+ if text is None:
318
+ text = [None] * len(images)
319
  text_input = [self.prepare_text_input(_image, _text, q_or_c, task_instruction) for _image, _text in zip(images, text)]
320
 
 
 
321
  if images is not None:
322
  images = [Image.open(_image).resize((512,512)).convert("RGB") for _image in images]
323
  inputs = self.processor(images=images, text=text_input, return_tensors="pt", padding=True)