File size: 540 Bytes
ac1c6ae
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from transformers import AutoProcessor, CLIPModel


class CLIPImageEncoder:
    def __init__(self, device="cpu"):
        self.device = device
        self.model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
        self.processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

    def encode_image(self, image_pil):
        input = self.processor(images=image_pil, return_tensors="pt")
        image_features = self.model.get_image_features(**input)
        return image_features.cpu().detach().numpy()[0]