Spaces:
Running
Running
add demo
Browse files
app.py
CHANGED
@@ -2,17 +2,15 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
-
from transformers import AutoProcessor
|
6 |
-
from
|
7 |
-
from modeling_florence2 import Florence2ForCausalLM
|
8 |
-
from processing_florence2 import Florence2Processor
|
9 |
|
10 |
# Initialize model and processor
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
13 |
|
14 |
-
model =
|
15 |
-
processor =
|
16 |
|
17 |
# Define task prompts
|
18 |
TASK_PROMPTS = {
|
@@ -31,8 +29,7 @@ def process_image(image, task):
|
|
31 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
|
32 |
|
33 |
generated_ids = model.generate(
|
34 |
-
|
35 |
-
pixel_values=inputs["pixel_values"],
|
36 |
max_new_tokens=1024,
|
37 |
num_beams=3,
|
38 |
do_sample=False
|
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
+
from transformers import AutoProcessor
|
6 |
+
from modeling_florence2 import Florence2ForConditionalGeneration
|
|
|
|
|
7 |
|
8 |
# Initialize model and processor
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
11 |
|
12 |
+
model = Florence2ForConditionalGeneration.from_pretrained("PleIAs/Florence-PDF", torch_dtype=torch_dtype, trust_remote_code=True).to(device)
|
13 |
+
processor = AutoProcessor.from_pretrained("PleIAs/Florence-PDF", trust_remote_code=True)
|
14 |
|
15 |
# Define task prompts
|
16 |
TASK_PROMPTS = {
|
|
|
29 |
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device, torch_dtype)
|
30 |
|
31 |
generated_ids = model.generate(
|
32 |
+
**inputs,
|
|
|
33 |
max_new_tokens=1024,
|
34 |
num_beams=3,
|
35 |
do_sample=False
|