xelpmocAI commited on
Commit
1c78289
·
verified ·
1 Parent(s): db47c8b
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -56,12 +56,14 @@ def demo(image_path, prompt):
56
  ],
57
  }
58
  ]
59
-
60
  # Preparation for inference
61
  text = processor.apply_chat_template(
62
  messages, tokenize=False, add_generation_prompt=True
63
  )
 
64
  image_inputs, video_inputs = process_vision_info(messages)
 
65
  inputs = processor(
66
  text=[text],
67
  images=image_inputs,
@@ -69,16 +71,20 @@ def demo(image_path, prompt):
69
  padding=True,
70
  return_tensors="pt",
71
  )
 
72
  inputs = inputs.to("cuda")
73
-
74
  # Inference: Generation of the output
75
  generated_ids = model.generate(**inputs, max_new_tokens=1500)
 
76
  generated_ids_trimmed = [
77
  out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
78
  ]
 
79
  output_text = processor.batch_decode(
80
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
81
  )
 
82
 
83
  # Handle output text to convert it into JSON
84
  try:
 
56
  ],
57
  }
58
  ]
59
+ print("1")
60
  # Preparation for inference
61
  text = processor.apply_chat_template(
62
  messages, tokenize=False, add_generation_prompt=True
63
  )
64
+ print("2")
65
  image_inputs, video_inputs = process_vision_info(messages)
66
+ print("3")
67
  inputs = processor(
68
  text=[text],
69
  images=image_inputs,
 
71
  padding=True,
72
  return_tensors="pt",
73
  )
74
+ print("4")
75
  inputs = inputs.to("cuda")
76
+ print("5")
77
  # Inference: Generation of the output
78
  generated_ids = model.generate(**inputs, max_new_tokens=1500)
79
+ print("6")
80
  generated_ids_trimmed = [
81
  out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
82
  ]
83
+ print("7")
84
  output_text = processor.batch_decode(
85
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
86
  )
87
+ print("8")
88
 
89
  # Handle output text to convert it into JSON
90
  try: