Update app.py
Browse files
app.py
CHANGED
@@ -44,15 +44,15 @@ def describe_image(image, user_question="Solve this AP Problem step by step and
|
|
44 |
)
|
45 |
|
46 |
# Load and prepare the model
|
47 |
-
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to(torch.
|
48 |
|
49 |
# Generate embeddings from the image input
|
50 |
-
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs).to(dtype=torch.
|
51 |
|
52 |
# Generate the model's response
|
53 |
outputs = vl_gpt.language_model.generate(
|
54 |
inputs_embeds=inputs_embeds,
|
55 |
-
attention_mask = prepare_inputs.attention_mask.to(vl_gpt.device).to(dtype=torch.
|
56 |
pad_token_id=tokenizer.eos_token_id,
|
57 |
bos_token_id=tokenizer.bos_token_id,
|
58 |
eos_token_id=tokenizer.eos_token_id,
|
|
|
44 |
)
|
45 |
|
46 |
# Load and prepare the model
|
47 |
+
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True).to(torch.float16).eval()
|
48 |
|
49 |
# Generate embeddings from the image input
|
50 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs).to(dtype=torch.float16)
|
51 |
|
52 |
# Generate the model's response
|
53 |
outputs = vl_gpt.language_model.generate(
|
54 |
inputs_embeds=inputs_embeds,
|
55 |
+
attention_mask = prepare_inputs.attention_mask.to(vl_gpt.device).to(dtype=torch.float16),
|
56 |
pad_token_id=tokenizer.eos_token_id,
|
57 |
bos_token_id=tokenizer.bos_token_id,
|
58 |
eos_token_id=tokenizer.eos_token_id,
|