Spaces:
Running
Running
from transformers import TrOCRProcessor, VisionEncoderDecoderModel | |
from PIL import Image | |
import torch | |
import re | |
# Load processor and model once | |
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") | |
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") | |
def extract_weight(image): | |
try: | |
# OCR inference | |
pixel_values = processor(images=image, return_tensors="pt").pixel_values | |
generated_ids = model.generate(pixel_values) | |
text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() | |
print("OCR Output:", text) | |
# Extract valid float or integer from OCR result | |
match = re.search(r'\d{2,5}(\.\d{1,2})?', text) # e.g., 65 or 5325.0 | |
if match: | |
return match.group() + " kg" | |
else: | |
return "No valid weight found" | |
except Exception as e: | |
return f"Error: {str(e)}" | |