Update app.py
Browse files
app.py
CHANGED
@@ -15,17 +15,16 @@ model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-large-handwri
|
|
15 |
|
16 |
def display_sketch(sketch):
|
17 |
logging.debug(f"Received sketch data: {sketch}")
|
18 |
-
|
19 |
if isinstance(sketch, dict) and "composite" in sketch:
|
20 |
image_data = sketch["composite"]
|
21 |
logging.debug(f"Image data type: {type(image_data)}")
|
22 |
logging.debug(f"Image data shape: {np.array(image_data).shape}")
|
23 |
-
|
24 |
plt.imshow(image_data, cmap='gray')
|
25 |
plt.axis('off')
|
26 |
|
27 |
-
|
28 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
|
29 |
plt.savefig(temp_file.name, bbox_inches='tight')
|
30 |
temp_file_path = temp_file.name
|
31 |
|
@@ -50,7 +49,7 @@ with gr.Blocks() as demo:
|
|
50 |
output_image = gr.Image(label="Your Sketch")
|
51 |
recognized_text = gr.Textbox(label="Recognized Text")
|
52 |
submit_btn = gr.Button("Submit")
|
53 |
-
|
54 |
submit_btn.click(fn=display_sketch, inputs=sketchpad, outputs=output_image)
|
55 |
submit_btn.click(fn=recognize_text, inputs=output_image, outputs=recognized_text)
|
56 |
|
|
|
15 |
|
16 |
def display_sketch(sketch):
|
17 |
logging.debug(f"Received sketch data: {sketch}")
|
18 |
+
|
19 |
if isinstance(sketch, dict) and "composite" in sketch:
|
20 |
image_data = sketch["composite"]
|
21 |
logging.debug(f"Image data type: {type(image_data)}")
|
22 |
logging.debug(f"Image data shape: {np.array(image_data).shape}")
|
23 |
+
|
24 |
plt.imshow(image_data, cmap='gray')
|
25 |
plt.axis('off')
|
26 |
|
27 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir='/mnt/data') as temp_file:
|
|
|
28 |
plt.savefig(temp_file.name, bbox_inches='tight')
|
29 |
temp_file_path = temp_file.name
|
30 |
|
|
|
49 |
output_image = gr.Image(label="Your Sketch")
|
50 |
recognized_text = gr.Textbox(label="Recognized Text")
|
51 |
submit_btn = gr.Button("Submit")
|
52 |
+
|
53 |
submit_btn.click(fn=display_sketch, inputs=sketchpad, outputs=output_image)
|
54 |
submit_btn.click(fn=recognize_text, inputs=output_image, outputs=recognized_text)
|
55 |
|