æLtorio
commited on
Commit
•
ebf3ae4
1
Parent(s):
f78193d
zerogpu initialization
Browse files
app.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoProcessor, Idefics3ForConditionalGeneration, image_utils
|
3 |
+
import torch
|
4 |
+
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
5 |
+
model_id="eltorio/IDEFICS3_ROCO"
|
6 |
+
# model = AutoModelForImageTextToText.from_pretrained(model_id).to(device)
|
7 |
+
base_model_path="HuggingFaceM4/Idefics3-8B-Llama3" #or change to local path
|
8 |
+
processor = AutoProcessor.from_pretrained(base_model_path)
|
9 |
+
model = Idefics3ForConditionalGeneration.from_pretrained(
|
10 |
+
base_model_path, torch_dtype=torch.bfloat16
|
11 |
+
).to(device)
|
12 |
+
|
13 |
+
model.load_adapter(model_id)
|
14 |
+
|
15 |
+
def infere(image):
|
16 |
+
messages = [
|
17 |
+
{
|
18 |
+
"role": "user",
|
19 |
+
"content": [
|
20 |
+
{"type": "image"},
|
21 |
+
{"type": "text", "text": "What do we see in this image?"},
|
22 |
+
]
|
23 |
+
},
|
24 |
+
]
|
25 |
+
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
|
26 |
+
inputs = processor(text=prompt, images=[image], return_tensors="pt")
|
27 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
28 |
+
generated_ids = model.generate(**inputs, max_new_tokens=8192)
|
29 |
+
generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
30 |
+
return generated_texts
|
31 |
+
|
32 |
+
demo = gr.Interface(fn=infere, inputs="image", outputs="text")
|
33 |
+
demo.launch()
|