Spaces:
Running
Running
from transformers import TrOCRProcessor, VisionEncoderDecoderModel | |
from PIL import Image | |
import torch | |
# Load processor and model only once | |
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") | |
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") | |
def extract_weight(image): | |
try: | |
# Resize or preprocess if needed | |
pixel_values = processor(images=image, return_tensors="pt").pixel_values | |
generated_ids = model.generate(pixel_values) | |
text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
return text.strip() | |
except Exception as e: | |
return f"Error: {str(e)}" | |