|
import os |
|
from transformers import AutoModel, AutoTokenizer |
|
from PIL import Image |
|
import torch |
|
|
|
class OCRModel: |
|
_instance = None |
|
|
|
def __new__(cls): |
|
if cls._instance is None: |
|
cls._instance = super(OCRModel, cls).__new__(cls) |
|
cls._instance.initialize() |
|
return cls._instance |
|
|
|
def initialize(self): |
|
|
|
model_path = os.getenv('MODEL_PATH', 'RufusRubin777/GOT-OCR2_0_CPU') |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
model_path, |
|
trust_remote_code=True, |
|
local_files_only=False |
|
) |
|
|
|
self.model = AutoModel.from_pretrained( |
|
model_path, |
|
trust_remote_code=True, |
|
low_cpu_mem_usage=True, |
|
device_map='cpu', |
|
use_safetensors=True, |
|
pad_token_id=self.tokenizer.eos_token_id |
|
) |
|
|
|
self.model = self.model.eval() |
|
|
|
|
|
def process_image(self, image_stream): |
|
try: |
|
|
|
image = Image.open(image_stream) |
|
|
|
with torch.no_grad(): |
|
result = self.model.chat(self.tokenizer, image, ocr_type='format') |
|
return result |
|
except Exception as e: |
|
return f"Error processing image: {str(e)}" |