Spaces:
Running
Running
Update predict.py
Browse files- predict.py +58 -2
predict.py
CHANGED
@@ -10,6 +10,9 @@ from scipy import ndimage
|
|
10 |
from ultralytics import YOLO
|
11 |
from PIL import Image
|
12 |
import io
|
|
|
|
|
|
|
13 |
|
14 |
app = FastAPI()
|
15 |
uploads_dir = 'uploads'
|
@@ -56,13 +59,33 @@ def calculate_wound_dimensions(mask):
|
|
56 |
area_cm2 = length_cm * breadth_cm
|
57 |
return length_cm, breadth_cm, depth_cm, area_cm2
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
@app.post("/analyze_wound")
|
60 |
async def analyze_wounds(file: UploadFile = File(...)):
|
61 |
if file.filename.lower().endswith(('.png', '.jpg', '.jpeg')):
|
62 |
contents = await file.read()
|
63 |
-
nparr = np.
|
64 |
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
|
|
65 |
results = yolo_model(img)
|
|
|
|
|
66 |
combined_xmin = float('inf')
|
67 |
combined_ymin = float('inf')
|
68 |
combined_xmax = float('-inf')
|
@@ -73,29 +96,37 @@ async def analyze_wounds(file: UploadFile = File(...)):
|
|
73 |
combined_ymin = min(combined_ymin, ymin)
|
74 |
combined_xmax = max(combined_xmax, xmax)
|
75 |
combined_ymax = max(combined_ymax, ymax)
|
|
|
76 |
combined_xmin = int(combined_xmin)
|
77 |
combined_ymin = int(combined_ymin)
|
78 |
combined_xmax = int(combined_xmax)
|
79 |
combined_ymax = int(combined_ymax)
|
|
|
80 |
combined_img = img[combined_ymin:combined_ymax, combined_xmin:combined_xmax]
|
81 |
combined_img_resized = cv2.resize(combined_img, (224, 224))
|
82 |
img_array = img_to_array(combined_img_resized) / 255.0
|
83 |
img_array = np.expand_dims(img_array, axis=0)
|
|
|
84 |
output = segmentation_model.predict(img_array)
|
85 |
predicted_mask = output[0]
|
|
|
86 |
mask_overlay = (predicted_mask.squeeze() * 255).astype(np.uint8)
|
87 |
mask_overlay_colored = np.zeros((mask_overlay.shape[0], mask_overlay.shape[1], 3), dtype=np.uint8)
|
88 |
mask_overlay_colored[mask_overlay > 200] = [255, 0, 0] # Red
|
89 |
mask_overlay_colored[(mask_overlay > 100) & (mask_overlay <= 200)] = [0, 255, 0] # Green
|
90 |
mask_overlay_colored[mask_overlay <= 100] = [0, 0, 255] # Blue
|
|
|
91 |
mask_overlay_colored = cv2.resize(mask_overlay_colored, (224, 224))
|
92 |
blended_image = cv2.addWeighted(combined_img_resized.astype(np.uint8), 0.6, mask_overlay_colored, 0.4, 0)
|
|
|
93 |
segmented_image = Image.fromarray(cv2.cvtColor(blended_image, cv2.COLOR_BGR2RGB))
|
94 |
img_byte_arr = io.BytesIO()
|
95 |
segmented_image.save(img_byte_arr, format='PNG')
|
96 |
img_byte_arr.seek(0)
|
|
|
97 |
length_cm, breadth_cm, depth_cm, area_cm2 = calculate_wound_dimensions(predicted_mask)
|
98 |
moisture = calculate_moisture_and_texture(combined_img)
|
|
|
99 |
response = Response(img_byte_arr.getvalue(), media_type='image/png')
|
100 |
response.headers['X-Length-Cm'] = str(length_cm)
|
101 |
response.headers['X-Breadth-Cm'] = str(breadth_cm)
|
@@ -103,4 +134,29 @@ async def analyze_wounds(file: UploadFile = File(...)):
|
|
103 |
response.headers['X-Area-Cm2'] = str(area_cm2)
|
104 |
response.headers['X-Moisture'] = str(moisture)
|
105 |
return response
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
from ultralytics import YOLO
|
11 |
from PIL import Image
|
12 |
import io
|
13 |
+
import threading
|
14 |
+
|
15 |
+
live_view_running = False
|
16 |
|
17 |
app = FastAPI()
|
18 |
uploads_dir = 'uploads'
|
|
|
59 |
area_cm2 = length_cm * breadth_cm
|
60 |
return length_cm, breadth_cm, depth_cm, area_cm2
|
61 |
|
62 |
+
# Draw YOLO detection landmarks (bounding boxes) on the image
|
63 |
+
def draw_square_landmarks(frame):
|
64 |
+
results = yolo_model(frame)[0]
|
65 |
+
for box in results.boxes.xyxy.tolist():
|
66 |
+
x1, y1, x2, y2 = map(int, box)
|
67 |
+
w = x2 - x1
|
68 |
+
h = y2 - y1
|
69 |
+
side = max(w, h)
|
70 |
+
cx = x1 + w // 2
|
71 |
+
cy = y1 + h // 2
|
72 |
+
new_x1 = max(cx - side // 2, 0)
|
73 |
+
new_y1 = max(cy - side // 2, 0)
|
74 |
+
new_x2 = new_x1 + side
|
75 |
+
new_y2 = new_y1 + side
|
76 |
+
cv2.rectangle(frame, (new_x1, new_y1), (new_x2, new_y2), (0, 255, 0), 2)
|
77 |
+
return frame
|
78 |
+
|
79 |
@app.post("/analyze_wound")
|
80 |
async def analyze_wounds(file: UploadFile = File(...)):
|
81 |
if file.filename.lower().endswith(('.png', '.jpg', '.jpeg')):
|
82 |
contents = await file.read()
|
83 |
+
nparr = np.frombuffer(contents, np.uint8) # safer than np.fromstring
|
84 |
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
85 |
+
|
86 |
results = yolo_model(img)
|
87 |
+
img = draw_square_landmarks(img) # ✅ Add landmarks to the original image
|
88 |
+
|
89 |
combined_xmin = float('inf')
|
90 |
combined_ymin = float('inf')
|
91 |
combined_xmax = float('-inf')
|
|
|
96 |
combined_ymin = min(combined_ymin, ymin)
|
97 |
combined_xmax = max(combined_xmax, xmax)
|
98 |
combined_ymax = max(combined_ymax, ymax)
|
99 |
+
|
100 |
combined_xmin = int(combined_xmin)
|
101 |
combined_ymin = int(combined_ymin)
|
102 |
combined_xmax = int(combined_xmax)
|
103 |
combined_ymax = int(combined_ymax)
|
104 |
+
|
105 |
combined_img = img[combined_ymin:combined_ymax, combined_xmin:combined_xmax]
|
106 |
combined_img_resized = cv2.resize(combined_img, (224, 224))
|
107 |
img_array = img_to_array(combined_img_resized) / 255.0
|
108 |
img_array = np.expand_dims(img_array, axis=0)
|
109 |
+
|
110 |
output = segmentation_model.predict(img_array)
|
111 |
predicted_mask = output[0]
|
112 |
+
|
113 |
mask_overlay = (predicted_mask.squeeze() * 255).astype(np.uint8)
|
114 |
mask_overlay_colored = np.zeros((mask_overlay.shape[0], mask_overlay.shape[1], 3), dtype=np.uint8)
|
115 |
mask_overlay_colored[mask_overlay > 200] = [255, 0, 0] # Red
|
116 |
mask_overlay_colored[(mask_overlay > 100) & (mask_overlay <= 200)] = [0, 255, 0] # Green
|
117 |
mask_overlay_colored[mask_overlay <= 100] = [0, 0, 255] # Blue
|
118 |
+
|
119 |
mask_overlay_colored = cv2.resize(mask_overlay_colored, (224, 224))
|
120 |
blended_image = cv2.addWeighted(combined_img_resized.astype(np.uint8), 0.6, mask_overlay_colored, 0.4, 0)
|
121 |
+
|
122 |
segmented_image = Image.fromarray(cv2.cvtColor(blended_image, cv2.COLOR_BGR2RGB))
|
123 |
img_byte_arr = io.BytesIO()
|
124 |
segmented_image.save(img_byte_arr, format='PNG')
|
125 |
img_byte_arr.seek(0)
|
126 |
+
|
127 |
length_cm, breadth_cm, depth_cm, area_cm2 = calculate_wound_dimensions(predicted_mask)
|
128 |
moisture = calculate_moisture_and_texture(combined_img)
|
129 |
+
|
130 |
response = Response(img_byte_arr.getvalue(), media_type='image/png')
|
131 |
response.headers['X-Length-Cm'] = str(length_cm)
|
132 |
response.headers['X-Breadth-Cm'] = str(breadth_cm)
|
|
|
134 |
response.headers['X-Area-Cm2'] = str(area_cm2)
|
135 |
response.headers['X-Moisture'] = str(moisture)
|
136 |
return response
|
137 |
+
|
138 |
+
return {'error': 'Invalid file format'}
|
139 |
+
|
140 |
+
def start_camera():
|
141 |
+
global live_view_running
|
142 |
+
cap = cv2.VideoCapture(0)
|
143 |
+
live_view_running = True
|
144 |
+
while live_view_running:
|
145 |
+
ret, frame = cap.read()
|
146 |
+
if not ret:
|
147 |
+
break
|
148 |
+
frame = draw_square_landmarks(frame)
|
149 |
+
cv2.imshow('Live Landmarks - Press Q to stop', frame)
|
150 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
151 |
+
live_view_running = False
|
152 |
+
break
|
153 |
+
cap.release()
|
154 |
+
cv2.destroyAllWindows()
|
155 |
+
|
156 |
+
@app.get("/live_landmarks")
|
157 |
+
def live_camera_with_landmarks():
|
158 |
+
if not live_view_running:
|
159 |
+
threading.Thread(target=start_camera).start()
|
160 |
+
return {"message": "Live camera started. Check your system's display window."}
|
161 |
+
else:
|
162 |
+
return {"message": "Live camera already running."}
|