File size: 943 Bytes
c43e287
2f21856
 
17d218a
65ef5f8
17d218a
2f21856
 
65ef5f8
2f21856
 
17d218a
2f21856
 
17d218a
 
 
 
 
 
 
 
 
2f21856
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
from PIL import Image
import torch
import re

# Load processor and model once
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")

def extract_weight(image):
    try:
        # OCR inference
        pixel_values = processor(images=image, return_tensors="pt").pixel_values
        generated_ids = model.generate(pixel_values)
        text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
        print("OCR Output:", text)

        # Extract valid float or integer from OCR result
        match = re.search(r'\d{2,5}(\.\d{1,2})?', text)  # e.g., 65 or 5325.0
        if match:
            return match.group() + " kg"
        else:
            return "No valid weight found"
    except Exception as e:
        return f"Error: {str(e)}"