File size: 1,077 Bytes
4ef9317 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import AutoModel
import numpy as np
from PIL import Image
import torch
import os
images = [
"1.png",
"1.jpg",
]
def read_image_as_np_array(image_path):
with open(image_path, "rb") as file:
image = Image.open(file).convert("L").convert("RGB")
image = np.array(image)
return image
images = [read_image_as_np_array(image) for image in images]
model = AutoModel.from_pretrained(
"ragavsachdeva/magi", trust_remote_code=True).cuda()
# model = AutoModel.from_pretrained(
# "./magi", trust_remote_code=True).cuda()
with torch.no_grad():
results = model.predict_detections_and_associations(images)
text_bboxes_for_all_images = [x["texts"] for x in results]
ocr_results = model.predict_ocr(images, text_bboxes_for_all_images)
for i in range(len(images)):
model.visualise_single_image_prediction(
images[i], results[i], filename=f"image_{i}.png")
model.generate_transcript_for_single_image(
results[i], ocr_results[i], filename=f"transcript_{i}.txt")
|