AutoWeightLogger2 / ocr_engine.py
Sanjayraju30's picture
Update ocr_engine.py
c7e59f2 verified
raw
history blame
25.2 kB
You've indicated that the previous changes didn't resolve the issue, and the application is still failing to detect the weight from the image. To help debug and improve the accuracy, I'll provide the full `ocr_engine.py` code again, but this time with a few more refinements and **added visual debugging steps**. These debugging steps will save intermediate images, which can be very helpful in understanding where the detection process might be going wrong.
**Important Steps to Use This Debugging Version:**
1. **Replace `ocr_engine.py`:** Completely replace the content of your `ocr_engine.py` file with the code provided below.
2. **Create a Debug Folder:** Before running your application, create a folder named `debug_images` in the same directory as your `app.py` and `ocr_engine.py` files. This is where the intermediate images will be saved.
3. **Run Your App:** Execute your `app.py` as usual.
4. **Upload Image and Check Debug Folder:** Upload the `Screenshot 2025-06-16 154525.png` image (or any other problematic image). After processing, check the `debug_images` folder. You should find several images showing:
* The original image.
* The grayscale version.
* The thresholded image used for ROI detection.
* The dilated image used for ROI detection.
* The detected ROI (cropped image).
* Thresholded image of the ROI used for digit detection.
* Individual digit images detected by EasyOCR.
* Sharpened and adaptively thresholded images used for general EasyOCR fallback.
By examining these images, we can pinpoint at which stage the OCR process is failing (e.g., if the ROI isn't detected correctly, if digits aren't isolated well, or if segments aren't properly recognized).
---
Here is the **full updated code for `ocr_engine.py`** with enhanced logic and visual debugging:
```python
import easyocr
import numpy as np
import cv2
import re
import logging
from datetime import datetime
import os
# Set up logging for debugging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Initialize EasyOCR
# Consider using 'en' and potentially 'ch_sim' or other relevant languages if your scales have non-English characters.
# gpu=True can speed up processing if a compatible GPU is available.
easyocr_reader = easyocr.Reader(['en'], gpu=False)
# Directory for debug images
DEBUG_DIR = "debug_images"
os.makedirs(DEBUG_DIR, exist_ok=True)
def save_debug_image(img, filename_suffix, prefix=""):
"""Saves an image to the debug directory with a timestamp."""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
filename = os.path.join(DEBUG_DIR, f"{prefix}{timestamp}_{filename_suffix}.png")
if len(img.shape) == 3: # Color image
cv2.imwrite(filename, img)
else: # Grayscale image
cv2.imwrite(filename, img)
logging.info(f"Saved debug image: {filename}")
def estimate_brightness(img):
"""Estimate image brightness to detect illuminated displays"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return np.mean(gray)
def detect_roi(img):
"""Detect and crop the region of interest (likely the digital display)"""
try:
save_debug_image(img, "01_original")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
save_debug_image(gray, "02_grayscale")
brightness = estimate_brightness(img)
# Adaptive thresholding based on brightness
# For darker images, a lower threshold might be needed.
# For very bright images, a higher threshold.
# Tuned thresholds based on observed values
if brightness > 180:
thresh_value = 230
elif brightness > 100:
thresh_value = 190
else:
thresh_value = 150 # Even lower for very dark images
_, thresh = cv2.threshold(gray, thresh_value, 255, cv2.THRESH_BINARY)
save_debug_image(thresh, f"03_roi_threshold_{thresh_value}")
# Increased kernel size for dilation to better connect segments of digits
# This helps in forming a solid contour for the display
kernel = np.ones((13, 13), np.uint8) # Slightly larger kernel
dilated = cv2.dilate(thresh, kernel, iterations=5) # Increased iterations for stronger connection
save_debug_image(dilated, "04_roi_dilated")
contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
# Filter contours by a more robust area range and shape
img_area = img.shape[0] * img.shape[1]
valid_contours = []
for c in contours:
area = cv2.contourArea(c)
# Filter out very small and very large contours (e.g., entire image, or noise)
if 1500 < area < (img_area * 0.9): # Increased min area, max area
x, y, w, h = cv2.boundingRect(c)
aspect_ratio = w / h
# Check for typical display aspect ratios and minimum size
if 2.0 <= aspect_ratio <= 5.5 and w > 100 and h > 50: # Adjusted aspect ratio and min size
valid_contours.append(c)
if valid_contours:
# Sort by area descending and iterate
for contour in sorted(valid_contours, key=cv2.contourArea, reverse=True):
x, y, w, h = cv2.boundingRect(contour)
# Expand ROI to ensure full digits are captured and a small border
padding = 40 # Increased padding
x, y = max(0, x - padding), max(0, y - padding)
w, h = min(w + 2 * padding, img.shape[1] - x), min(h + 2 * padding, img.shape[0] - y)
roi_img = img[y:y+h, x:x+w]
save_debug_image(roi_img, "05_detected_roi")
logging.info(f"Detected ROI with dimensions: ({x}, {y}, {w}, {h})")
return roi_img, (x, y, w, h)
logging.info("No suitable ROI found, returning original image for full image OCR attempt.")
save_debug_image(img, "05_no_roi_original_fallback")
return img, None
except Exception as e:
logging.error(f"ROI detection failed: {str(e)}")
save_debug_image(img, "05_roi_detection_error_fallback")
return img, None
def detect_segments(digit_img):
"""Detect seven-segment patterns in a digit image"""
h, w = digit_img.shape
if h < 15 or w < 10: # Increased minimum dimensions for a digit
return None
# Define segment regions (top, middle, bottom, left-top, left-bottom, right-top, right-bottom)
# Adjusted segment proportions for better robustness, more aggressive cropping
segments = {
'top': (int(w*0.15), int(w*0.85), 0, int(h*0.2)),
'middle': (int(w*0.15), int(w*0.85), int(h*0.4), int(h*0.6)),
'bottom': (int(w*0.15), int(w*0.85), int(h*0.8), h),
'left_top': (0, int(w*0.25), int(h*0.05), int(h*0.5)),
'left_bottom': (0, int(w*0.25), int(h*0.5), int(h*0.95)),
'right_top': (int(w*0.75), w, int(h*0.05), int(h*0.5)),
'right_bottom': (int(w*0.75), w, int(h*0.5), int(h*0.95))
}
segment_presence = {}
for name, (x1, x2, y1, y2) in segments.items():
# Ensure coordinates are within bounds
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x2), min(h, y2)
region = digit_img[y1:y2, x1:x2]
if region.size == 0:
segment_presence[name] = False
continue
# Count white pixels in the region
pixel_count = np.sum(region == 255)
total_pixels = region.size
# Segment is present if a significant portion of the region is white
# Adjusted threshold for segment presence - higher for robustness
segment_presence[name] = pixel_count / total_pixels > 0.55 # Increased sensitivity further
# Seven-segment digit patterns - remain the same
digit_patterns = {
'0': ('top', 'bottom', 'left_top', 'left_bottom', 'right_top', 'right_bottom'),
'1': ('right_top', 'right_bottom'),
'2': ('top', 'middle', 'bottom', 'left_bottom', 'right_top'),
'3': ('top', 'middle', 'bottom', 'right_top', 'right_bottom'),
'4': ('middle', 'left_top', 'right_top', 'right_bottom'),
'5': ('top', 'middle', 'bottom', 'left_top', 'right_bottom'),
'6': ('top', 'middle', 'bottom', 'left_top', 'left_bottom', 'right_bottom'),
'7': ('top', 'right_top', 'right_bottom'),
'8': ('top', 'middle', 'bottom', 'left_top', 'left_bottom', 'right_top', 'right_bottom'),
'9': ('top', 'middle', 'bottom', 'left_top', 'right_top', 'right_bottom')
}
best_match = None
max_score = -1 # Initialize with a lower value
for digit, pattern in digit_patterns.items():
matches = sum(1 for segment in pattern if segment_presence.get(segment, False))
# Penalize for segments that should NOT be present but are
non_matches_penalty = sum(1 for segment in segment_presence if segment not in pattern and segment_presence[segment])
# Prioritize digits with more matched segments and fewer incorrect segments
current_score = matches - non_matches_penalty
# Add a small bonus for matching exactly all required segments for the digit
if all(segment_presence.get(s, False) for s in pattern):
current_score += 0.5
if current_score > max_score:
max_score = current_score
best_match = digit
elif current_score == max_score and best_match is not None:
# Tie-breaking: prefer digits with fewer "extra" segments when scores are equal
current_digit_non_matches = sum(1 for segment in segment_presence if segment not in pattern and segment_presence[segment])
best_digit_pattern = digit_patterns[best_match]
best_digit_non_matches = sum(1 for segment in segment_presence if segment not in best_digit_pattern and segment_presence[best_digit_pattern]) # Corrected logic
if current_digit_non_matches < best_digit_non_matches:
best_match = digit
# Debugging segment presence
# logging.debug(f"Digit Image Shape: {digit_img.shape}, Segments: {segment_presence}, Best Match: {best_match}")
# save_debug_image(digit_img, f"digit_segment_debug_{best_match or 'none'}", prefix="10_")
return best_match
def custom_seven_segment_ocr(img, roi_bbox):
"""Perform custom OCR for seven-segment displays"""
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Adaptive thresholding for digits within ROI
# Using OTSU for automatic thresholding or a fixed value depending on brightness
brightness = estimate_brightness(img)
if brightness > 150:
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
else:
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY) # Lower threshold for darker displays
save_debug_image(thresh, "06_roi_thresh_for_digits")
# Use EasyOCR to get bounding boxes for digits
# Increased text_threshold for more confident digit detection
# Adjusted mag_ratio for better handling of digit sizes
# Added y_ths to reduce sensitivity to vertical position variations (common in scales)
results = easyocr_reader.readtext(thresh, detail=1, paragraph=False,
contrast_ths=0.2, adjust_contrast=0.8, # Slightly more contrast adjustment
text_threshold=0.85, mag_ratio=1.5, # Adjusted mag_ratio back, seems to work better for 7-seg
allowlist='0123456789.', y_ths=0.2) # Increased y_ths for row grouping tolerance
if not results:
logging.info("EasyOCR found no digits for custom seven-segment OCR.")
return None
# Sort bounding boxes left to right
digits_info = []
for (bbox, text, conf) in results:
# Ensure the text found by EasyOCR is a single digit or a decimal point
# Also filter by a minimum height of the bounding box for robustness
(x1, y1), (x2, y2), (x3, y3), (x4, y4) = bbox
h_bbox = max(y1,y2,y3,y4) - min(y1,y2,y3,y4)
if len(text) == 1 and (text.isdigit() or text == '.') and h_bbox > 10: # Min height for bbox
x_min, x_max = int(min(x1, x4)), int(max(x2, x3))
y_min, y_max = int(min(y1, y2)), int(max(y3, y4))
digits_info.append((x_min, x_max, y_min, y_max, text, conf))
# Sort by x_min (left to right)
digits_info.sort(key=lambda x: x[0])
recognized_text = ""
for idx, (x_min, x_max, y_min, y_max, easyocr_char, easyocr_conf) in enumerate(digits_info):
x_min, y_min = max(0, x_min), max(0, y_min)
x_max, y_max = min(thresh.shape[1], x_max), min(thresh.shape[0], y_max)
if x_max <= x_min or y_max <= y_min:
continue
digit_img_crop = thresh[y_min:y_max, x_min:x_max]
save_debug_image(digit_img_crop, f"07_digit_crop_{idx}_{easyocr_char}")
# If EasyOCR is very confident about a digit or it's a decimal, use its result directly
# Or if the digit crop is too small for reliable segment detection
if easyocr_conf > 0.9 or easyocr_char == '.' or digit_img_crop.shape[0] < 20 or digit_img_crop.shape[1] < 15: # Lowered confidence for direct use
recognized_text += easyocr_char
else:
# Otherwise, try the segment detection
digit_from_segments = detect_segments(digit_img_crop)
if digit_from_segments:
recognized_text += digit_from_segments
else:
# If segment detection also fails, fall back to EasyOCR's less confident result
recognized_text += easyocr_char
# Validate the recognized text
text = recognized_text
text = re.sub(r"[^\d\.]", "", text) # Remove any non-digit/non-dot characters
# Ensure there's at most one decimal point
if text.count('.') > 1:
text = text.replace('.', '', text.count('.') - 1) # Remove extra decimal points
# Basic validation for common weight formats (e.g., 75.5, 120.0, 5.0)
# Allow numbers to start with . (e.g., .5 -> 0.5) if it's the only character
if text and re.fullmatch(r"^\d*\.?\d*$", text) and len(text.replace('.', '')) > 0:
# Handle cases like ".5" -> "0.5"
if text.startswith('.') and len(text) > 1:
text = "0" + text
# Handle cases like "5." -> "5"
if text.endswith('.') and len(text) > 1:
text = text.rstrip('.')
# Ensure it's not just a single dot or empty after processing
if text == '.' or text == '':
return None
return text
logging.info(f"Custom OCR final text '{recognized_text}' failed validation.")
return None
except Exception as e:
logging.error(f"Custom seven-segment OCR failed: {str(e)}")
return None
def extract_weight_from_image(pil_img):
try:
img = np.array(pil_img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
brightness = estimate_brightness(img)
# Adjust confidence threshold more dynamically
conf_threshold = 0.9 if brightness > 150 else (0.8 if brightness > 80 else 0.7) # Adjusted thresholds
# Detect ROI
roi_img, roi_bbox = detect_roi(img)
# Try custom seven-segment OCR first
custom_result = custom_seven_segment_ocr(roi_img, roi_bbox)
if custom_result:
# Format the custom result: remove leading zeros (unless it's "0" or "0.x") and trailing zeros after decimal
if "." in custom_result:
int_part, dec_part = custom_result.split(".")
int_part = int_part.lstrip("0") or "0"
dec_part = dec_part.rstrip('0')
if not dec_part and int_part != "0": # If decimal part is empty (e.g., "50."), remove the dot
custom_result = int_part
elif not dec_part and int_part == "0": # if it's "0." keep it as "0"
custom_result = "0"
else:
custom_result = f"{int_part}.{dec_part}"
else:
custom_result = custom_result.lstrip('0') or "0"
# Additional validation for custom result to ensure it's a valid number
try:
float(custom_result)
logging.info(f"Custom OCR result: {custom_result}, Confidence: 100.0%")
return custom_result, 100.0 # High confidence for custom OCR
except ValueError:
logging.warning(f"Custom OCR result '{custom_result}' is not a valid number, falling back.")
custom_result = None # Force fallback
# Fallback to EasyOCR if custom OCR fails
logging.info("Custom OCR failed or invalid, falling back to general EasyOCR.")
# Apply more aggressive image processing for EasyOCR if custom OCR failed
processed_roi_img_gray = cv2.cvtColor(roi_img, cv2.COLOR_BGR2GRAY)
# Sharpening
kernel_sharpening = np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
sharpened_roi = cv2.filter2D(processed_roi_img_gray, -1, kernel_sharpening)
save_debug_image(sharpened_roi, "08_fallback_sharpened")
# Apply adaptive thresholding to the sharpened image for better digit isolation
# Block size and C constant can be critical
processed_roi_img_final = cv2.adaptiveThreshold(sharpened_roi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 15, 3) # Adjusted block size and C
save_debug_image(processed_roi_img_final, "09_fallback_adaptive_thresh")
# EasyOCR parameters for general text
# Adjusted parameters for better digit recognition
# added batch_size for potentially better performance on multiple texts
results = easyocr_reader.readtext(processed_roi_img_final, detail=1, paragraph=False,
contrast_ths=0.3, adjust_contrast=0.9,
text_threshold=0.6, mag_ratio=1.8, # Lowered text_threshold, increased mag_ratio
allowlist='0123456789.', batch_size=4, y_ths=0.3) # Increased y_ths
best_weight = None
best_conf = 0.0
best_score = 0.0
for (bbox, text, conf) in results:
text = text.lower().strip()
# More robust character replacements
text = text.replace(",", ".").replace(";", ".").replace(":", ".").replace(" ", "") # Remove spaces
text = text.replace("o", "0").replace("O", "0").replace("q", "0").replace("Q", "0")
text = text.replace("s", "5").replace("S", "5")
text = text.replace("g", "9").replace("G", "6")
text = text.replace("l", "1").replace("I", "1").replace("|", "1")
text = text.replace("b", "8").replace("B", "8")
text = text.replace("z", "2").replace("Z", "2")
text = text.replace("a", "4").replace("A", "4")
text = text.replace("e", "3")
text = text.replace("t", "7") # 't' can look like '7'
text = text.replace("~", "") # Common noise
text = text.replace("`", "")
# Remove common weight units and other non-numeric characters
text = re.sub(r"(kgs|kg|k|lb|g|gr|pounds|lbs)\b", "", text) # Added lbs
text = re.sub(r"[^\d\.]", "", text)
# Handle multiple decimal points (keep only the first one)
if text.count('.') > 1:
parts = text.split('.')
text = parts[0] + '.' + ''.join(parts[1:])
# Clean up leading/trailing dots if any
text = text.strip('.')
# Validate the final text format
# Allow optional leading zero, and optional decimal with up to 3 places
if re.fullmatch(r"^\d*\.?\d{0,3}$", text) and len(text.replace('.', '')) > 0: # Ensure at least one digit
try:
weight = float(text)
# Refined scoring for weights within a reasonable range
range_score = 1.0
if 0.1 <= weight <= 250: # Very common personal scale range
range_score = 1.5
elif weight > 250 and weight <= 500: # Larger weights
range_score = 1.2
elif weight > 500 and weight <= 1000:
range_score = 1.0
else: # Very small or very large weights
range_score = 0.5
digit_count = len(text.replace('.', ''))
digit_score = 1.0
if digit_count >= 2 and digit_count <= 5: # Prefer weights with 2-5 digits (e.g., 5.0, 75.5, 123.4)
digit_score = 1.3
elif digit_count == 1: # Single digit weights less common but possible
digit_score = 0.8
score = conf * range_score * digit_score
# Also consider area of the bounding box relative to ROI for confidence
if roi_bbox:
(x_roi, y_roi, w_roi, h_roi) = roi_bbox
roi_area = w_roi * h_roi
# Calculate bbox area accurately
x_min, y_min = int(min(b[0] for b in bbox)), int(min(b[1] for b in bbox))
x_max, y_max = int(max(b[0] for b in bbox)), int(max(b[1] for b in bbox))
bbox_area = (x_max - x_min) * (y_max - y_min)
if roi_area > 0 and bbox_area / roi_area < 0.03: # Very small bounding boxes might be noise
score *= 0.5
# Penalize if bbox is too narrow (e.g., single line detected as digit)
bbox_aspect_ratio = (x_max - x_min) / (y_max - y_min) if (y_max - y_min) > 0 else 0
if bbox_aspect_ratio < 0.2: # Very thin bounding boxes
score *= 0.7
if score > best_score and conf > conf_threshold:
best_weight = text
best_conf = conf
best_score = score
logging.info(f"Candidate EasyOCR weight: '{text}', Conf: {conf}, Score: {score}")
except ValueError:
logging.warning(f"Could not convert '{text}' to float during EasyOCR fallback.")
continue
if not best_weight:
logging.info("No valid weight detected after all attempts.")
return "Not detected", 0.0
# Final formatting of the best detected weight
if "." in best_weight:
int_part, dec_part = best_weight.split(".")
int_part = int_part.lstrip("0") or "0" # Remove leading zeros, keep "0" for 0.x
dec_part = dec_part.rstrip('0') # Remove trailing zeros after decimal
if not dec_part and int_part != "0": # If decimal part is empty (e.g., "50."), remove the dot
best_weight = int_part
elif not dec_part and int_part == "0": # if it's "0." keep it as "0"
best_weight = "0"
else:
best_weight = f"{int_part}.{dec_part}"
else:
best_weight = best_weight.lstrip('0') or "0" # Remove leading zeros, keep "0"
# Final check for extremely unlikely weights (e.g., 0.0001, 9999)
try:
final_float_weight = float(best_weight)
if final_float_weight < 0.01 or final_float_weight > 1000: # Adjust this range if needed
logging.warning(f"Detected weight {final_float_weight} is outside typical range, reducing confidence.")
best_conf *= 0.5 # Reduce confidence for out-of-range values
except ValueError:
pass # Should not happen if previous parsing worked
logging.info(f"Final detected weight: {best_weight}, Confidence: {round(best_conf * 100, 2)}%")
return best_weight, round(best_conf * 100, 2)
except Exception as e:
logging.error(f"Weight extraction failed unexpectedly: {str(e)}")
return "Not detected", 0.0
```