Update app.py
Browse files
app.py
CHANGED
@@ -16,32 +16,45 @@ model.eval()
|
|
16 |
|
17 |
def count_and_label_red_patches(heatmap, threshold=200):
|
18 |
red_mask = heatmap[:, :, 2] > threshold
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
original_image = np.array(image)
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
39 |
font_scale = 1
|
40 |
font_color = (255, 255, 255)
|
41 |
line_type = cv2.LINE_AA
|
42 |
-
cv2.putText(original_image, str(i), (
|
43 |
|
44 |
-
return original_image,
|
45 |
|
46 |
st.title('Saliency Detection App')
|
47 |
st.write('Upload an image for saliency detection:')
|
@@ -82,6 +95,6 @@ if uploaded_image:
|
|
82 |
|
83 |
st.image(blended_img, caption='Blended Image', use_column_width=True, channels='BGR')
|
84 |
|
85 |
-
# Create a dir with
|
86 |
cv2.imwrite('example/result15.png', blended_img, [int(cv2.IMWRITE_JPEG_QUALITY), 200])
|
87 |
st.success('Saliency detection complete. Result saved as "example/result15.png".')
|
|
|
16 |
|
17 |
def count_and_label_red_patches(heatmap, threshold=200):
|
18 |
red_mask = heatmap[:, :, 2] > threshold
|
19 |
+
contours, _ = cv2.findContours(red_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
20 |
+
|
21 |
+
# Sort the contours based on their areas in descending order
|
22 |
+
contours = sorted(contours, key=cv2.contourArea, reverse=True)
|
23 |
+
|
24 |
original_image = np.array(image)
|
25 |
+
|
26 |
+
# Find the centroid of the red spot with the highest area
|
27 |
+
M_largest = cv2.moments(contours[0])
|
28 |
+
if M_largest["m00"] != 0:
|
29 |
+
cX_largest = int(M_largest["m10"] / M_largest["m00"])
|
30 |
+
cY_largest = int(M_largest["m01"] / M_largest["m00"])
|
31 |
+
else:
|
32 |
+
cX_largest, cY_largest = 0, 0
|
33 |
+
|
34 |
+
for i, contour in enumerate(contours, start=1):
|
35 |
+
# Compute the centroid of the current contour
|
36 |
+
M = cv2.moments(contour)
|
37 |
+
if M["m00"] != 0:
|
38 |
+
cX = int(M["m10"] / M["m00"])
|
39 |
+
cY = int(M["m01"] / M["m00"])
|
40 |
+
else:
|
41 |
+
cX, cY = 0, 0
|
42 |
+
|
43 |
+
radius = 20 # Adjust the circle radius to fit the numbers
|
44 |
+
circle_color = (0, 0, 0) # Blue color
|
45 |
+
cv2.circle(original_image, (cX, cY), radius, circle_color, -1) # Draw blue circle
|
46 |
+
|
47 |
+
# Connect the current red spot to the red spot with the highest area
|
48 |
+
line_color = (0, 0, 0) # Red color
|
49 |
+
cv2.line(original_image, (cX, cY), (cX_largest, cY_largest), line_color, 2)
|
50 |
|
51 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
52 |
font_scale = 1
|
53 |
font_color = (255, 255, 255)
|
54 |
line_type = cv2.LINE_AA
|
55 |
+
cv2.putText(original_image, str(i), (cX - 10, cY + 10), font, font_scale, font_color, 2, line_type)
|
56 |
|
57 |
+
return original_image, len(contours)
|
58 |
|
59 |
st.title('Saliency Detection App')
|
60 |
st.write('Upload an image for saliency detection:')
|
|
|
95 |
|
96 |
st.image(blended_img, caption='Blended Image', use_column_width=True, channels='BGR')
|
97 |
|
98 |
+
# Create a dir with name example to save
|
99 |
cv2.imwrite('example/result15.png', blended_img, [int(cv2.IMWRITE_JPEG_QUALITY), 200])
|
100 |
st.success('Saliency detection complete. Result saved as "example/result15.png".')
|