import os os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html') os.system("git clone https://github.com/microsoft/unilm.git") import sys sys.path.append("unilm") import cv2 from unilm.dit.object_detection.ditod import add_vit_config from detectron2.config import CfgNode as CN from detectron2.config import get_cfg from detectron2.utils.visualizer import ColorMode, Visualizer from detectron2.data import MetadataCatalog from detectron2.engine import DefaultPredictor import gradio as gr # Step 1: instantiate config cfg = get_cfg() add_vit_config(cfg) cfg.merge_from_file("cascade_dit_base.yml") # Step 2: add model weights URL to config cfg.MODEL.WEIGHTS = "https://layoutlm.blob.core.windows.net/dit/dit-fts/publaynet_dit-b_cascade.pth" # Step 3: set device # TODO also support GPU cfg.MODEL.DEVICE='cpu' # Step 4: define model predictor = DefaultPredictor(cfg) def analyze_image(img): md = MetadataCatalog.get(cfg.DATASETS.TEST[0]) if cfg.DATASETS.TEST[0]=='icdar2019_test': md.set(thing_classes=["table"]) else: md.set(thing_classes=["text","title","list","table","figure"]) output = predictor(img)["instances"] v = Visualizer(img[:, :, ::-1], md, scale=1.0, instance_mode=ColorMode.SEGMENTATION) result = v.draw_instance_predictions(output.to("cpu")) result_image = result.get_image()[:, :, ::-1] return result_image title = "Interactive demo: Document Layout Analysis with DiT" description = "Demo for Microsoft's DiT, the Document Image Transformer for state-of-the-art document understanding tasks. This particular model is fine-tuned on PubLayNet, a large dataset for document layout analysis. To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'." article = "
DiT: Self-supervised Pre-training for Document Image Transformer | Github Repo
" examples =[['publaynet_example.jpeg']] css = ".output-image, .input-image, .image-preview {height: 600px !important}" iface = gr.Interface(fn=analyze_image, inputs=gr.inputs.Image(type="numpy", label="document image"), outputs=gr.outputs.Image(type="numpy", label="annotated document"), title=title, description=description, examples=examples, css=css, enable_queue=True) iface.launch(debug=True)