li-qing commited on
Commit
d2b427c
1 Parent(s): 6aded7a

fix: fire model

Browse files
Files changed (1) hide show
  1. src/model/model_llava.py +3 -3
src/model/model_llava.py CHANGED
@@ -102,9 +102,9 @@ def inference_by_prompt_and_images_fire(prompt, images):
102
  for image in images:
103
  image_data.append(Image.open(BytesIO(base64.b64decode(image))))
104
  images = image_data
105
- image_tensor = process_images(images, image_processor_llava, model_llava.config)
106
  image_tensor = image_tensor.to(dtype=torch.float16, device=device)
107
- input_ids = tokenizer_image_token(prompt, tokenizer_llava, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
108
  image_sizes = [image.size for image in images]
109
  logger.info("Shape: {};{}; Devices: {};{}",input_ids.shape, image_tensor.shape, input_ids.device, image_tensor.device)
110
  with torch.inference_mode():
@@ -117,7 +117,7 @@ def inference_by_prompt_and_images_fire(prompt, images):
117
  max_new_tokens=256,
118
  use_cache=True
119
  )
120
- text_outputs = tokenizer_llava.batch_decode(cont, skip_special_tokens=True)
121
  logger.info("response={}", text_outputs)
122
  return text_outputs
123
 
 
102
  for image in images:
103
  image_data.append(Image.open(BytesIO(base64.b64decode(image))))
104
  images = image_data
105
+ image_tensor = process_images(images, image_processor_llava_fire, model_llava_fire.config)
106
  image_tensor = image_tensor.to(dtype=torch.float16, device=device)
107
+ input_ids = tokenizer_image_token(prompt, tokenizer_llava_fire, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
108
  image_sizes = [image.size for image in images]
109
  logger.info("Shape: {};{}; Devices: {};{}",input_ids.shape, image_tensor.shape, input_ids.device, image_tensor.device)
110
  with torch.inference_mode():
 
117
  max_new_tokens=256,
118
  use_cache=True
119
  )
120
+ text_outputs = tokenizer_llava_fire.batch_decode(cont, skip_special_tokens=True)
121
  logger.info("response={}", text_outputs)
122
  return text_outputs
123