eye_for_blind / vit_gpt2.py
krishnapal2308
pipeline to manual
7301eb7
raw
history blame
962 Bytes
from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
def predict_step(img_array):
i_image = Image.fromarray(img_array)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(images=i_image, return_tensors="pt", do_normalize=True).pixel_values
output_ids = model.generate(pixel_values, **gen_kwargs)
pred = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
pred = [p.strip() for p in pred]
return pred