from transformers import TrOCRProcessor, VisionEncoderDecoderModel from PIL import Image import torch import re # Load TrOCR model and processor once processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") def extract_weight(image): try: # OCR Inference pixel_values = processor(images=image, return_tensors="pt").pixel_values generated_ids = model.generate(pixel_values) text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() print("OCR Output:", text) # Pattern to detect weight with optional decimal and unit (g or kg) match = re.search(r'(\d{1,5}(?:\.\d{1,3})?)\s*(kg|g)', text.lower()) if match: value = match.group(1) unit = match.group(2) return f"{value} {unit}" else: return "No valid weight found" except Exception as e: return f"Error: {str(e)}"