Spaces:
Running
Running
from transformers import TrOCRProcessor, VisionEncoderDecoderModel | |
from PIL import Image, ImageFilter | |
import torch | |
import re | |
# Load TrOCR model and processor | |
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") | |
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") | |
def clean_ocr_text(text): | |
# Fix common OCR misreads | |
text = text.replace(",", ".").replace("s", "5").replace("o", "0").replace("O", "0") | |
return re.sub(r"[^\d.kg]", "", text.lower()) # keep digits, dot, k, g | |
def restore_decimal(text): | |
if re.fullmatch(r"\d{5}", text): | |
return f"{text[:2]}.{text[2:]}" | |
elif re.fullmatch(r"\d{4}", text): | |
return f"{text[:2]}.{text[2:]}" | |
return text | |
def extract_unit_from_text(raw_text): | |
raw_text = raw_text.lower() | |
if "kg" in raw_text: | |
return "kg" | |
elif "g" in raw_text: | |
return "g" | |
return "g" # fallback if unit not found | |
def extract_weight(image): | |
try: | |
# Enhance image | |
image = image.resize((image.width * 2, image.height * 2), Image.BICUBIC) | |
image = image.filter(ImageFilter.SHARPEN) | |
# OCR inference | |
pixel_values = processor(images=image, return_tensors="pt").pixel_values | |
generated_ids = model.generate(pixel_values) | |
raw_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() | |
cleaned = clean_ocr_text(raw_text) | |
# Try direct match (e.g., 52.25 kg or 250.5g) | |
match = re.search(r"(\d{1,3}\.\d{1,3})\s*(kg|g)?", cleaned) | |
if match: | |
return f"{match.group(1)} {match.group(2) or ''}".strip(), raw_text | |
# Fallback if no decimal found: convert big number like 53255 to 52.255 | |
fallback_match = re.search(r"\d{4,5}", cleaned) | |
if fallback_match: | |
decimal_fixed = restore_decimal(fallback_match.group()) | |
return decimal_fixed, raw_text | |
return "Error: No valid weight found", raw_text | |
except Exception as e: | |
return f"Error: {str(e)}", "" | |