File size: 1,313 Bytes
0fcb299 b34fba5 0fcb299 b34fba5 5fe03c4 0fcb299 3455ede 5fe03c4 0fcb299 4efd111 0fcb299 3455ede 0fcb299 4efd111 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
from typing import Dict, List, Any
import torch
from transformers import AutoProcessor, Pix2StructVisionModel
from PIL import Image
import pdb
import requests
MODEL = "google/pix2struct-screen2words-large"
class EndpointHandler():
def __init__(self, path=""):
#self.processor = AutoProcessor.from_pretrained("jasper-lu/pix2struct_embedding")
#self.model = MarkupLMModel.from_pretrained("jasper-lu/pix2struct_embedding")
self.processor = AutoProcessor.from_pretrained(MODEL)
self.processor.image_processor.is_vqa = False
self.model = Pix2StructVisionModel.from_pretrained(MODEL).cuda()
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
url = data.pop("inputs", data)
image = Image.open(requests.get(url, stream=True).raw)
inputs = self.processor(images=image, return_tensors="pt").cuda()
with torch.no_grad():
outputs = self.model(**inputs)
last_hidden_state = outputs['last_hidden_state']
embedding = torch.mean(last_hidden_state, dim=1).flatten().tolist()
return {"embedding": embedding}
"""
handler = EndpointHandler()
output = handler({"inputs": "https://figma-staging-api.s3.us-west-2.amazonaws.com/images/a8c6a0cc-c022-4f3a-9fc5-ac8582c964dd"})
print(output)
"""
|