Translate-Image / app.py
Omnibus's picture
Update app.py
734bc82
raw
history blame
1.47 kB
import gradio as gr
import pandas as pd
import easyocr
import torch
import PIL
from PIL import Image
from PIL import ImageDraw
ocr_lang=[
'abq',
'ady',
'af',
'ang',
'ar',
'as',
'ava',
'az',
'be',
'bg',
'bh',
'bho',
'bn',
'bs',
'ch_sim',
'ch_tra',
'che',
'cs',
'cy',
'da',
'dar',
'de',
'en',
'es',
'et',
'fa',
'fr',
'ga',
'gom',
'hi',
'hr',
'hu',
'id',
'inh',
'is',
'it',
'ja',
'kbd',
'kn',
'ko',
'ku',
'la',
'lbe',
'lez',
'lt',
'lv',
'mah',
'mai',
'mi',
'mn',
'mr',
'ms',
'mt',
'ne',
'new',
'nl',
'no',
'oc',
'pi',
'pl',
'pt',
'ro',
'ru',
'rs_cyrillic',
'rs_latin',
'sck',
'sk',
'sl',
'sq',
'sv',
'sw',
'ta',
'tab',
'te',
'th',
'tjk',
'tl',
'tr',
'ug',
'uk',
'ur',
'uz',
'vi',
]
def draw_boxes(image, bounds, color='blue', width=1):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
return image
def detect(img, lang=[ocr_lang,'en']):
reader = easyocr.Reader(lang)
bounds = reader.readtext(img)
im = PIL.Image.open(img)
im_out=draw_boxes(im, bounds)
return im_out,pd.DataFrame(bounds).iloc[: 0, -1:],pd.DataFrame(bounds).iloc[: 0, -1:]
with gr.Blocks() as robot:
im=gr.Image(type="filepath")
go_btn=gr.Button()
out_im=gr.Image()
with gr.Row():
out_txt=gr.Textbox(lines=8)
data_f=gr.Dataframe()
go_btn.click(detect,im,[out_im,out_txt,data_f])
robot.queue(concurrency_count=10).launch()