import easyocr import numpy as np import cv2 import re reader = easyocr.Reader(['en'], gpu=False) def enhance_image(img): max_dim = 1000 height, width = img.shape[:2] if max(height, width) > max_dim: scale = max_dim / max(height, width) img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) gray = cv2.fastNlMeansDenoising(gray, h=15) kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) sharp = cv2.filter2D(gray, -1, kernel) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) enhanced = clahe.apply(sharp) return enhanced def extract_weight_from_image(pil_img): try: img = np.array(pil_img) enhanced = enhance_image(img) results = reader.readtext(enhanced) print("DEBUG OCR RESULTS:", results) if not results: return "No text detected", 0.0, "OCR returned empty list" all_texts = [] weight_candidates = [] for _, text, conf in results: original = text cleaned = text.lower().strip() cleaned = cleaned.replace(",", ".") cleaned = cleaned.replace("o", "0").replace("O", "0") cleaned = cleaned.replace("s", "5").replace("S", "5") cleaned = cleaned.replace("g", "9").replace("G", "6") cleaned = cleaned.replace("kg", "").replace("kgs", "") cleaned = re.sub(r"[^\d\.]", "", cleaned) all_texts.append(f"{original} → {cleaned} (conf: {round(conf, 2)})") if re.fullmatch(r"\d{2,4}(\.\d{1,3})?", cleaned): weight_candidates.append((cleaned, conf)) if not weight_candidates: return "Not detected", 0.0, "\n".join(all_texts) best_weight, best_conf = sorted(weight_candidates, key=lambda x: -x[1])[0] if "." in best_weight: parts = best_weight.split(".") parts[0] = parts[0].lstrip("0") or "0" best_weight = ".".join(parts) else: best_weight = best_weight.lstrip("0") or "0" return best_weight, round(best_conf * 100, 2), "\n".join(all_texts) except Exception as e: return f"Error: {str(e)}", 0.0, "OCR failed"