Sanjayraju30 commited on
Commit
a29f826
·
verified ·
1 Parent(s): 61b752b

Update ocr_engine.py

Browse files
Files changed (1) hide show
  1. ocr_engine.py +11 -19
ocr_engine.py CHANGED
@@ -1,32 +1,28 @@
1
- import easyocr
2
  import numpy as np
3
  import cv2
4
  import re
 
5
 
6
- # Load EasyOCR reader
7
- reader = easyocr.Reader(['en'], gpu=False)
8
 
9
  def extract_weight_from_image(pil_img):
10
  try:
11
- img = np.array(pil_img)
 
12
 
13
- # Resize very large images
14
- max_dim = 1000
15
- height, width = img.shape[:2]
16
- if max(height, width) > max_dim:
17
- scale = max_dim / max(height, width)
18
- img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
19
-
20
- # OCR recognition
21
- results = reader.readtext(img)
22
- print("DEBUG OCR RESULTS:", results)
23
 
24
  raw_texts = []
25
  weight_candidates = []
26
  fallback_weight = None
27
  fallback_conf = 0.0
28
 
29
- for _, (text, conf) in results:
 
 
30
  original = text
31
  cleaned = text.lower().strip()
32
 
@@ -40,16 +36,13 @@ def extract_weight_from_image(pil_img):
40
 
41
  raw_texts.append(f"{original} → {cleaned} (conf: {round(conf, 2)})")
42
 
43
- # Save fallback if no match later
44
  if cleaned and cleaned.replace(".", "").isdigit() and not fallback_weight:
45
  fallback_weight = cleaned
46
  fallback_conf = conf
47
 
48
- # Match proper weight format: 75.02, 97.2, 105
49
  if cleaned.count(".") <= 1 and re.fullmatch(r"\d{2,4}(\.\d{1,3})?", cleaned):
50
  weight_candidates.append((cleaned, conf))
51
 
52
- # Choose best candidate
53
  if weight_candidates:
54
  best_weight, best_conf = sorted(weight_candidates, key=lambda x: -x[1])[0]
55
  elif fallback_weight:
@@ -57,7 +50,6 @@ def extract_weight_from_image(pil_img):
57
  else:
58
  return "Not detected", 0.0, "\n".join(raw_texts)
59
 
60
- # Strip unnecessary leading zeros
61
  if "." in best_weight:
62
  int_part, dec_part = best_weight.split(".")
63
  int_part = int_part.lstrip("0") or "0"
 
1
+ from mmocr.apis import MMOCRInferencer
2
  import numpy as np
3
  import cv2
4
  import re
5
+ from PIL import Image
6
 
7
+ # Initialize MMOCR
8
+ ocr = MMOCRInferencer(det='DBNet', recog='SAR', device='cpu') # or 'cuda' if GPU available
9
 
10
  def extract_weight_from_image(pil_img):
11
  try:
12
+ # Convert PIL to OpenCV image (BGR)
13
+ img = np.array(pil_img.convert("RGB"))[:, :, ::-1]
14
 
15
+ # Run MMOCR inference
16
+ result = ocr(img)
 
 
 
 
 
 
 
 
17
 
18
  raw_texts = []
19
  weight_candidates = []
20
  fallback_weight = None
21
  fallback_conf = 0.0
22
 
23
+ for item in result['predictions'][0]:
24
+ text = item['text']
25
+ conf = item.get('score', 0.8) # Fallback confidence
26
  original = text
27
  cleaned = text.lower().strip()
28
 
 
36
 
37
  raw_texts.append(f"{original} → {cleaned} (conf: {round(conf, 2)})")
38
 
 
39
  if cleaned and cleaned.replace(".", "").isdigit() and not fallback_weight:
40
  fallback_weight = cleaned
41
  fallback_conf = conf
42
 
 
43
  if cleaned.count(".") <= 1 and re.fullmatch(r"\d{2,4}(\.\d{1,3})?", cleaned):
44
  weight_candidates.append((cleaned, conf))
45
 
 
46
  if weight_candidates:
47
  best_weight, best_conf = sorted(weight_candidates, key=lambda x: -x[1])[0]
48
  elif fallback_weight:
 
50
  else:
51
  return "Not detected", 0.0, "\n".join(raw_texts)
52
 
 
53
  if "." in best_weight:
54
  int_part, dec_part = best_weight.split(".")
55
  int_part = int_part.lstrip("0") or "0"