sussahoo commited on
Commit
d80d101
·
1 Parent(s): 2135879

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
3
+ import requests
4
+ from PIL import Image
5
+ from craft_text_detector import (
6
+ read_image,
7
+ load_craftnet_model,
8
+ load_refinenet_model,
9
+ get_prediction,
10
+ export_detected_regions,
11
+ export_extra_results,
12
+ empty_cuda_cache
13
+ )
14
+
15
+ processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
16
+ model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
17
+ craft = Craft(output_dir=None,
18
+ crop_type="poly",
19
+ export_extra=False,
20
+ link_threshold=0.1,
21
+ text_threshold=0.3,
22
+ cuda=torch.cuda.is_available())
23
+
24
+
25
+ # load image examples from the IAM database
26
+ urls = ['https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg', 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSoolxi9yWGAT5SLZShv8vVd0bz47UWRzQC19fDTeE8GmGv_Rn-PCF1pP1rrUx8kOjA4gg&usqp=CAU',
27
+ 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRNYtTuSBpZPV_nkBYPMFwVVD9asZOPgHww4epu9EqWgDmXW--sE2o8og40ZfDGo87j5w&usqp=CAU']
28
+ for idx, url in enumerate(urls):
29
+ image = Image.open(requests.get(url, stream=True).raw)
30
+ image.save(f"image_{idx}.png")
31
+
32
+ def process_image(image):
33
+ img = np.array(image)
34
+ prediction_result = craft.detect_text(img)
35
+ text = []
36
+ for i,j in enumerate(prediction_result['boxes']):
37
+ roi = img[int(prediction_result['boxes'][i][0][1]): int(prediction_result['boxes'][i][2][1]),
38
+ int(prediction_result['boxes'][i][0][0]): int(prediction_result['boxes'][i][2][0])]
39
+ image = Image.fromarray(roi).convert("RGB")
40
+ pixel_values = processor(image, return_tensors="pt").pixel_values
41
+ generated_ids = model.generate(pixel_values)
42
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
43
+ text.append(generated_text)
44
+ print('line ' + str(i) + ' has been recoginized')
45
+
46
+ generated_text = ('\n').join(text)
47
+
48
+
49
+ # # prepare image
50
+ # pixel_values = processor(image, return_tensors="pt").pixel_values
51
+
52
+ # # generate (no beam search)
53
+ # generated_ids = model.generate(pixel_values)
54
+
55
+ # # decode
56
+ # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
57
+
58
+ return generated_text
59
+
60
+ title = "Interactive demo: TrOCR"
61
+ description = "Demo for Microsoft's TrOCR, an encoder-decoder model consisting of an image Transformer encoder and a text Transformer decoder for state-of-the-art optical character recognition (OCR) on single-text line images. This particular model is fine-tuned on IAM, a dataset of annotated handwritten images. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds."
62
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models</a> | <a href='https://github.com/microsoft/unilm/tree/master/trocr'>Github Repo</a></p>"
63
+ examples =[["image_0.png"], ["image_1.png"], ["image_2.png"]]
64
+
65
+ iface = gr.Interface(fn=process_image,
66
+ inputs=gr.inputs.Image(type="pil"),
67
+ outputs=gr.outputs.Textbox(),
68
+ title=title,
69
+ description=description,
70
+ article=article,
71
+ examples=examples)
72
+ iface.launch(debug=True,share=True)