import cv2 import pytesseract import numpy as np from PIL import Image def extract_weight_from_image(pil_img): try: # Step 1: Convert PIL to OpenCV img = pil_img.convert("L") # grayscale img = np.array(img) # Step 2: Resize image for better OCR accuracy img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC) # Step 3: Apply Gaussian Blur to remove noise blur = cv2.GaussianBlur(img, (5, 5), 0) # Step 4: Apply Adaptive Thresholding thresh = cv2.adaptiveThreshold( blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2 ) # Step 5: OCR Config - digits only config = r'--oem 3 --psm 6 -c tessedit_char_whitelist=0123456789.' # Step 6: Run OCR text = pytesseract.image_to_string(thresh, config=config) print("🔍 OCR RAW OUTPUT:", repr(text)) # view this in Hugging Face logs # Step 7: Extract numbers weight = ''.join(filter(lambda c: c in '0123456789.', text)) confidence = 95 if weight else 0 return weight.strip(), confidence except Exception as e: print("❌ OCR Exception:", str(e)) return "", 0