import os from surya.input.langs import replace_lang_with_code, get_unique_langs from surya.input.load import load_from_folder, load_from_file, load_lang_file from surya.model.detection.model import load_model as load_detection_model, load_processor as load_detection_processor from surya.model.recognition.model import load_model as load_recognition_model from surya.model.recognition.processor import load_processor as load_recognition_processor from surya.model.recognition.tokenizer import _tokenize from surya.ocr import run_ocr from surya.postprocessing.text import draw_text_on_image def main(input_path, max_pages=None, start_page=0, langs=None, lang_file=None): assert langs or lang_file, "Must provide either langs or lang_file" if os.path.isdir(input_path): images, names = load_from_folder(input_path, max_pages, start_page) else: images, names = load_from_file(input_path, max_pages, start_page) langs = langs.split(",") replace_lang_with_code(langs) image_langs = [langs] * len(images) det_processor = load_detection_processor() det_model = load_detection_model() _, lang_tokens = _tokenize("", get_unique_langs(image_langs)) rec_model = load_recognition_model(langs=lang_tokens) # Prune model moe layer to only include languages we need rec_processor = load_recognition_processor() predictions_by_image = run_ocr(images, image_langs, det_model, det_processor, rec_model, rec_processor) for idx, (name, image, pred, langs) in enumerate(zip(names, images, predictions_by_image, image_langs)): bboxes = [l.bbox for l in pred.text_lines] pred_text = [l.text for l in pred.text_lines] page_image = draw_text_on_image(bboxes, pred_text, image.size, langs, has_math="_math" in langs) return page_image