Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
3 |
from PIL import Image
|
4 |
-
import requests
|
5 |
import torch
|
|
|
6 |
|
7 |
# Load the pre-trained model and processor
|
8 |
processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
|
@@ -10,6 +10,7 @@ model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwrit
|
|
10 |
|
11 |
# Define the prediction function
|
12 |
def recognize_handwriting(image):
|
|
|
13 |
pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
14 |
generated_ids = model.generate(pixel_values)
|
15 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
@@ -23,13 +24,13 @@ def provide_feedback(image, correct_text):
|
|
23 |
|
24 |
with gr.Blocks() as demo:
|
25 |
with gr.Tab("Recognize Handwriting"):
|
26 |
-
|
27 |
output = gr.Textbox(label="Recognized Text")
|
28 |
recognize_button = gr.Button("Recognize")
|
29 |
-
recognize_button.click(fn=recognize_handwriting, inputs=
|
30 |
|
31 |
with gr.Tab("Provide Feedback"):
|
32 |
-
image_feedback = gr.Image(type="
|
33 |
correct_text = gr.Textbox(label="Correct Text")
|
34 |
feedback_button = gr.Button("Submit Feedback")
|
35 |
feedback_output = gr.Textbox()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
3 |
from PIL import Image
|
|
|
4 |
import torch
|
5 |
+
import numpy as np
|
6 |
|
7 |
# Load the pre-trained model and processor
|
8 |
processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
|
|
|
10 |
|
11 |
# Define the prediction function
|
12 |
def recognize_handwriting(image):
|
13 |
+
image = Image.fromarray(image.astype('uint8'), 'RGB')
|
14 |
pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
15 |
generated_ids = model.generate(pixel_values)
|
16 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
|
24 |
|
25 |
with gr.Blocks() as demo:
|
26 |
with gr.Tab("Recognize Handwriting"):
|
27 |
+
sketchpad = gr.Sketchpad(label="Draw something")
|
28 |
output = gr.Textbox(label="Recognized Text")
|
29 |
recognize_button = gr.Button("Recognize")
|
30 |
+
recognize_button.click(fn=recognize_handwriting, inputs=sketchpad, outputs=output)
|
31 |
|
32 |
with gr.Tab("Provide Feedback"):
|
33 |
+
image_feedback = gr.Image(type="numpy")
|
34 |
correct_text = gr.Textbox(label="Correct Text")
|
35 |
feedback_button = gr.Button("Submit Feedback")
|
36 |
feedback_output = gr.Textbox()
|