Spaces:
Runtime error
Runtime error
app
Browse files
app.py
CHANGED
@@ -1,7 +1,109 @@
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import numpy as np
|
4 |
+
from openvino.runtime import Core
|
5 |
import gradio as gr
|
6 |
|
7 |
+
#####
|
8 |
+
#Load pretrained model
|
9 |
+
#####
|
10 |
+
ie = Core()
|
11 |
+
model = ie.read_model(model="model/horizontal-text-detection-0001.xml")
|
12 |
+
compiled_model = ie.compile_model(model=model, device_name="CPU")
|
13 |
+
input_layer_ir = compiled_model.input(0)
|
14 |
+
output_layer_ir = compiled_model.output("boxes")
|
15 |
|
16 |
+
#####
|
17 |
+
#Inference
|
18 |
+
#####
|
19 |
+
def predict(img: np.ndarray) -> str:
|
20 |
+
# input: numpy array of image in RGB (see defaults for https://www.gradio.app/docs/#image)
|
21 |
+
|
22 |
+
# Text detection models expect an image in BGR format.
|
23 |
+
image = cv2.cvtColor(im_cv, cv2.COLOR_RGB2BGR)
|
24 |
+
# N,C,H,W = batch size, number of channels, height, width.
|
25 |
+
N, C, H, W = input_layer_ir.shape
|
26 |
+
# Resize the image to meet network expected input sizes.
|
27 |
+
resized_image = cv2.resize(image, (W, H))
|
28 |
+
# Reshape to the network input shape.
|
29 |
+
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)
|
30 |
+
|
31 |
+
|
32 |
+
# Create an inference request.
|
33 |
+
boxes = compiled_model([input_image])[output_layer_ir]
|
34 |
+
# Remove zero only boxes.
|
35 |
+
boxes = boxes[~np.all(boxes == 0, axis=1)]
|
36 |
+
|
37 |
+
result = convert_result_to_image(image, resized_image, boxes, conf_labels=False)
|
38 |
+
|
39 |
+
plt.figure(figsize=(10, 6))
|
40 |
+
plt.axis("off")
|
41 |
+
plt.imshow(result)
|
42 |
+
|
43 |
+
|
44 |
+
# For each detection, the description is in the [x_min, y_min, x_max, y_max, conf] format:
|
45 |
+
# The image passed here is in BGR format with changed width and height. To display it in colors expected by matplotlib, use cvtColor function
|
46 |
+
def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
|
47 |
+
# Define colors for boxes and descriptions.
|
48 |
+
colors = {"red": (255, 0, 0), "green": (0, 255, 0)}
|
49 |
+
|
50 |
+
# Fetch the image shapes to calculate a ratio.
|
51 |
+
(real_y, real_x), (resized_y, resized_x) = bgr_image.shape[:2], resized_image.shape[:2]
|
52 |
+
ratio_x, ratio_y = real_x / resized_x, real_y / resized_y
|
53 |
+
|
54 |
+
# Convert the base image from BGR to RGB format.
|
55 |
+
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
|
56 |
+
|
57 |
+
# Iterate through non-zero boxes.
|
58 |
+
for box in boxes:
|
59 |
+
# Pick a confidence factor from the last place in an array.
|
60 |
+
conf = box[-1]
|
61 |
+
if conf > threshold:
|
62 |
+
# Convert float to int and multiply corner position of each box by x and y ratio.
|
63 |
+
# If the bounding box is found at the top of the image,
|
64 |
+
# position the upper box bar little lower to make it visible on the image.
|
65 |
+
(x_min, y_min, x_max, y_max) = [
|
66 |
+
int(max(corner_position * ratio_y, 10)) if idx % 2
|
67 |
+
else int(corner_position * ratio_x)
|
68 |
+
for idx, corner_position in enumerate(box[:-1])
|
69 |
+
]
|
70 |
+
|
71 |
+
# Draw a box based on the position, parameters in rectangle function are: image, start_point, end_point, color, thickness.
|
72 |
+
rgb_image = cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)
|
73 |
+
|
74 |
+
# Add text to the image based on position and confidence.
|
75 |
+
# Parameters in text function are: image, text, bottom-left_corner_textfield, font, font_scale, color, thickness, line_type.
|
76 |
+
if conf_labels:
|
77 |
+
rgb_image = cv2.putText(
|
78 |
+
rgb_image,
|
79 |
+
f"{conf:.2f}",
|
80 |
+
(x_min, y_min - 10),
|
81 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
82 |
+
0.8,
|
83 |
+
colors["red"],
|
84 |
+
1,
|
85 |
+
cv2.LINE_AA,
|
86 |
+
)
|
87 |
+
|
88 |
+
return rgb_image
|
89 |
+
|
90 |
+
#####
|
91 |
+
#Gradio Setup
|
92 |
+
#####
|
93 |
+
|
94 |
+
title = "Text Detection"
|
95 |
+
description = "Image Detection with OpenVino model"
|
96 |
+
examples = ['test.jpg']
|
97 |
+
interpretation='default'
|
98 |
+
enable_queue=True
|
99 |
+
|
100 |
+
gr.Interface(
|
101 |
+
fn=predict,
|
102 |
+
inputs=gr.inputs.Image(),
|
103 |
+
outputs=gr.outputs.Image(),
|
104 |
+
title=title,
|
105 |
+
description=description,
|
106 |
+
#examples=examples,
|
107 |
+
interpretation=interpretation,
|
108 |
+
enable_queue=enable_queue
|
109 |
+
).launch()
|