Spaces:
Runtime error
Runtime error
srivatsavdamaraju
commited on
Commit
•
a3c3dff
1
Parent(s):
b91b65f
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
from flask import Flask, request, render_template, Response
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import tensorflow as tf
|
6 |
+
|
7 |
+
# Load the TFLite model
|
8 |
+
interpreter = tf.lite.Interpreter(model_path=r'midas.tflite')
|
9 |
+
interpreter.allocate_tensors()
|
10 |
+
|
11 |
+
input_details = interpreter.get_input_details()
|
12 |
+
output_details = interpreter.get_output_details()
|
13 |
+
|
14 |
+
app = Flask(__name__)
|
15 |
+
|
16 |
+
# Function to preprocess the image
|
17 |
+
def preprocess_image(image):
|
18 |
+
image = cv2.resize(image, (256, 256)) # Resize to 256x256
|
19 |
+
image = image.astype(np.float32) / 255.0 # Normalize to [0,1]
|
20 |
+
image = np.expand_dims(image, axis=0) # Add batch dimension
|
21 |
+
return image
|
22 |
+
|
23 |
+
# Function to process the frame
|
24 |
+
def process_frame(frame):
|
25 |
+
input_image = preprocess_image(frame)
|
26 |
+
|
27 |
+
# Set the input tensor
|
28 |
+
interpreter.set_tensor(input_details[0]['index'], input_image)
|
29 |
+
|
30 |
+
# Run inference
|
31 |
+
interpreter.invoke()
|
32 |
+
|
33 |
+
# Get the output tensor
|
34 |
+
depth_map = interpreter.get_tensor(output_details[0]['index'])
|
35 |
+
|
36 |
+
# Process depth map
|
37 |
+
depth_map = np.squeeze(depth_map)
|
38 |
+
depth_map = (depth_map / np.max(depth_map) * 255).astype(np.uint8)
|
39 |
+
depth_map_gray = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2BGR)
|
40 |
+
|
41 |
+
# Apply Canny edge detection on the original frame
|
42 |
+
edges = cv2.Canny(frame, threshold1=100, threshold2=200)
|
43 |
+
edges_colored = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
|
44 |
+
|
45 |
+
# Resize edges_colored to match depth_map dimensions
|
46 |
+
edges_colored = cv2.resize(edges_colored, depth_map_gray.shape[1::-1])
|
47 |
+
|
48 |
+
# Create an overlay
|
49 |
+
alpha = 0.5
|
50 |
+
overlay = cv2.addWeighted(depth_map_gray, alpha, edges_colored, alpha, 0)
|
51 |
+
overlay_gray = cv2.cvtColor(overlay, cv2.COLOR_BGR2GRAY)
|
52 |
+
|
53 |
+
# Segment processing
|
54 |
+
height, width = overlay_gray.shape
|
55 |
+
segment_width = width // 7
|
56 |
+
avg_pixel_densities = []
|
57 |
+
|
58 |
+
for i in range(7):
|
59 |
+
start_x = i * segment_width
|
60 |
+
end_x = (i + 1) * segment_width if i < 6 else width
|
61 |
+
segment_pixels = overlay_gray[:, start_x:end_x]
|
62 |
+
avg_pixel_density = np.mean(segment_pixels)
|
63 |
+
avg_pixel_densities.append(avg_pixel_density)
|
64 |
+
|
65 |
+
# Draw vertical lines and pixel density values
|
66 |
+
for i in range(7):
|
67 |
+
x = i * segment_width
|
68 |
+
cv2.line(overlay_gray, (x, 0), (x, height), (255, 255, 255), 1)
|
69 |
+
cv2.putText(overlay_gray, f"{avg_pixel_densities[i]:.2f}", (x + 5, 40),
|
70 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
|
71 |
+
|
72 |
+
# Draw a dot and horizontal line
|
73 |
+
center_x = width // 2
|
74 |
+
bottom_y = height - 10
|
75 |
+
dot_position = (center_x, bottom_y)
|
76 |
+
cv2.circle(overlay_gray, dot_position, 5, (255, 255, 255), -1)
|
77 |
+
middle_y = height // 2
|
78 |
+
cv2.line(overlay_gray, (0, middle_y), (width, middle_y), (255, 255, 255), 1)
|
79 |
+
|
80 |
+
# Draw arrow
|
81 |
+
lowest_index = np.argmin(avg_pixel_densities)
|
82 |
+
lowest_x = lowest_index * segment_width + segment_width // 2
|
83 |
+
arrow_end = (lowest_x, height // 2)
|
84 |
+
cv2.arrowedLine(overlay_gray, dot_position, arrow_end, (255, 0, 0), 2, tipLength=0.1)
|
85 |
+
|
86 |
+
return overlay_gray
|
87 |
+
|
88 |
+
@app.route('/')
|
89 |
+
def index():
|
90 |
+
return render_template('depthmap.html')
|
91 |
+
|
92 |
+
@app.route('/video_feed', methods=['POST'])
|
93 |
+
async def video_feed():
|
94 |
+
# Receive the frame from the client
|
95 |
+
frame_data = await request.files['frame'].read()
|
96 |
+
nparr = np.frombuffer(frame_data, np.uint8)
|
97 |
+
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
98 |
+
|
99 |
+
# Process the frame asynchronously
|
100 |
+
processed_frame = await asyncio.to_thread(process_frame, frame)
|
101 |
+
|
102 |
+
# Encode the processed frame as JPEG
|
103 |
+
_, jpeg = cv2.imencode('.jpg', processed_frame)
|
104 |
+
return Response(jpeg.tobytes(), mimetype='image/jpeg')
|
105 |
+
|
106 |
+
if __name__ == '__main__':
|
107 |
+
app.run(debug=True, use_reloader=False)
|