Spaces:
Running
Running
File size: 1,202 Bytes
11875d5 bef264e 9dbc07d 11875d5 7cce448 86af403 11875d5 2f3299d f51fc95 11875d5 2f3299d f51fc95 11875d5 bbb039c 11875d5 7901f6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
from ocr_tamil.ocr import OCR
import torch
ocr_detect = OCR(detect=True,enable_cuda=False)
ocr_recognize = OCR(detect=False,enable_cuda=False)
def predict(image_path,mode):
if mode == "recognize":
texts = ocr_recognize.predict(image_path)
else:
texts = ocr_detect.predict(image_path)
texts = [" ".join(texts[0])]
texts = texts[0]
return texts
image_examples = ["11.jpg","4.jpg","0.jpg","tamil_handwritten.jpg","1.jpg","2.jpg","3.jpg","5.jpg",
"6.jpg","7.jpg","10.jpg","14.jpg"]
mode_examples = ["recognize","recognize","detect","detect","recognize","recognize","recognize"
,"recognize","recognize","recognize","recognize"]
input_1 = gr.Image(type="numpy")
input_2 = gr.Radio(["recognize", "detect"], label="mode",
info="Only Text recognition or need both Text detection + recognition")
examples = [[i,j] for i,j in zip(image_examples,mode_examples)]
gr.Interface(
predict,
inputs=[input_1,input_2],
outputs=gr.Textbox(label="Extracted Text",interactive=False,
show_copy_button=True),
title="OCR TAMIL",
examples=examples
).launch()
|