|
## Usage |
|
```python |
|
import requests |
|
from PIL import Image |
|
|
|
import torch |
|
|
|
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration |
|
|
|
model_id = "NicoZenith/onevision-7b-all-vqa-conv" |
|
model = LlavaOnevisionForConditionalGeneration.from_pretrained( |
|
model_id, |
|
torch_dtype=torch.float16, |
|
low_cpu_mem_usage=True, |
|
).to(0) |
|
|
|
processor = AutoProcessor.from_pretrained(model_id) |
|
|
|
conversation = [ |
|
{ |
|
|
|
"role": "user", |
|
"content": [ |
|
{"type": "text", "text": "What can you say about this X-ray?"}, |
|
{"type": "image"}, |
|
], |
|
}, |
|
] |
|
|
|
|
|
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) |
|
|
|
image_file = "https://prod-images-static.radiopaedia.org/images/29923576/fed73420497c8622734f21ce20fc91_gallery.jpeg" |
|
raw_image = Image.open(requests.get(image_file, stream=True).raw) |
|
inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(0, torch.float16) |
|
|
|
output = model.generate(**inputs, max_new_tokens=200, do_sample=False) |
|
response_text = processor.decode(output[0][2:], skip_special_tokens=True) |
|
response_text = response_text.split("assistant\n")[-1] |
|
print(response_text) |
|
|