Spaces:
Runtime error
Runtime error
# app.py | |
from flask import Flask, request, render_template, Response | |
import cv2 | |
import numpy as np | |
import tensorflow as tf | |
# Load the TFLite model | |
interpreter = tf.lite.Interpreter(model_path=r'midas.tflite') | |
interpreter.allocate_tensors() | |
input_details = interpreter.get_input_details() | |
output_details = interpreter.get_output_details() | |
app = Flask(__name__) | |
# Function to preprocess the image | |
def preprocess_image(image): | |
image = cv2.resize(image, (256, 256)) # Resize to 256x256 | |
image = image.astype(np.float32) / 255.0 # Normalize to [0,1] | |
image = np.expand_dims(image, axis=0) # Add batch dimension | |
return image | |
# Function to process the frame | |
def process_frame(frame): | |
input_image = preprocess_image(frame) | |
# Set the input tensor | |
interpreter.set_tensor(input_details[0]['index'], input_image) | |
# Run inference | |
interpreter.invoke() | |
# Get the output tensor | |
depth_map = interpreter.get_tensor(output_details[0]['index']) | |
# Process depth map | |
depth_map = np.squeeze(depth_map) | |
depth_map = (depth_map / np.max(depth_map) * 255).astype(np.uint8) | |
depth_map_gray = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2BGR) | |
# Apply Canny edge detection on the original frame | |
edges = cv2.Canny(frame, threshold1=100, threshold2=200) | |
edges_colored = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR) | |
# Resize edges_colored to match depth_map dimensions | |
edges_colored = cv2.resize(edges_colored, depth_map_gray.shape[1::-1]) | |
# Create an overlay | |
alpha = 0.5 | |
overlay = cv2.addWeighted(depth_map_gray, alpha, edges_colored, alpha, 0) | |
overlay_gray = cv2.cvtColor(overlay, cv2.COLOR_BGR2GRAY) | |
# Segment processing | |
height, width = overlay_gray.shape | |
segment_width = width // 7 | |
avg_pixel_densities = [] | |
for i in range(7): | |
start_x = i * segment_width | |
end_x = (i + 1) * segment_width if i < 6 else width | |
segment_pixels = overlay_gray[:, start_x:end_x] | |
avg_pixel_density = np.mean(segment_pixels) | |
avg_pixel_densities.append(avg_pixel_density) | |
# Draw vertical lines and pixel density values | |
for i in range(7): | |
x = i * segment_width | |
cv2.line(overlay_gray, (x, 0), (x, height), (255, 255, 255), 1) | |
cv2.putText(overlay_gray, f"{avg_pixel_densities[i]:.2f}", (x + 5, 40), | |
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1) | |
# Draw a dot and horizontal line | |
center_x = width // 2 | |
bottom_y = height - 10 | |
dot_position = (center_x, bottom_y) | |
cv2.circle(overlay_gray, dot_position, 5, (255, 255, 255), -1) | |
middle_y = height // 2 | |
cv2.line(overlay_gray, (0, middle_y), (width, middle_y), (255, 255, 255), 1) | |
# Draw arrow | |
lowest_index = np.argmin(avg_pixel_densities) | |
lowest_x = lowest_index * segment_width + segment_width // 2 | |
arrow_end = (lowest_x, height // 2) | |
cv2.arrowedLine(overlay_gray, dot_position, arrow_end, (255, 0, 0), 2, tipLength=0.1) | |
return overlay_gray | |
@app.route('/') | |
def index(): | |
return render_template('depthmap.html') | |
@app.route('/video_feed', methods=['POST']) | |
def video_feed(): | |
# Receive the frame from the client | |
frame_data = request.files['frame'].read() | |
nparr = np.frombuffer(frame_data, np.uint8) | |
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) | |
# Process the frame | |
processed_frame = process_frame(frame) | |
# Encode the processed frame as JPEG | |
_, jpeg = cv2.imencode('.jpg', processed_frame) | |
return Response(jpeg.tobytes(), mimetype='image/jpeg') | |
if __name__ == '__main__': | |
app.run(debug=True) | |