Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -410,35 +410,83 @@ class NeuralNetworkSimulator:
|
|
410 |
|
411 |
|
412 |
|
413 |
-
#
|
414 |
-
|
415 |
-
|
416 |
|
417 |
-
#
|
418 |
-
|
|
|
|
|
|
|
|
|
419 |
image = cv2.imread(image_path)
|
420 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
421 |
-
results =
|
|
|
|
|
|
|
422 |
|
|
|
423 |
if results.pose_landmarks:
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
|
430 |
-
# Function to apply touch points on detected humanoid keypoints
|
431 |
def apply_touch_points(image_path, keypoints):
|
432 |
image = cv2.imread(image_path)
|
433 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
434 |
image_pil = Image.fromarray(image_rgb)
|
435 |
draw = ImageDraw.Draw(image_pil)
|
436 |
|
437 |
-
|
438 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
439 |
|
440 |
return image_pil
|
441 |
|
|
|
442 |
# Function to create a sensation map
|
443 |
def create_sensation_map(width, height, keypoints):
|
444 |
sensation_map = np.random.rand(height, width, 12) * 0.5 + 0.5
|
|
|
410 |
|
411 |
|
412 |
|
413 |
+
# Initialize MediaPipe Holistic
|
414 |
+
mp_holistic = mp.solutions.holistic
|
415 |
+
holistic = mp_holistic.Holistic(static_image_mode=True, min_detection_confidence=0.7)
|
416 |
|
417 |
+
# Lists of landmark names for pose, face, and hands
|
418 |
+
pose_landmarks = [name for name in mp_holistic.PoseLandmark.__members__]
|
419 |
+
face_landmarks = [name for name in mp_holistic.FaceLandmark.__members__]
|
420 |
+
hand_landmarks = [name for name in mp_holistic.HandLandmark.__members__]
|
421 |
+
|
422 |
+
def detect_landmarks(image_path):
|
423 |
image = cv2.imread(image_path)
|
424 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
425 |
+
results = holistic.process(image_rgb)
|
426 |
+
|
427 |
+
image_height, image_width, _ = image.shape
|
428 |
+
keypoints = []
|
429 |
|
430 |
+
# Pose landmarks
|
431 |
if results.pose_landmarks:
|
432 |
+
for idx, landmark in enumerate(results.pose_landmarks.landmark):
|
433 |
+
x = int(landmark.x * image_width)
|
434 |
+
y = int(landmark.y * image_height)
|
435 |
+
keypoints.append(('POSE_' + pose_landmarks[idx], (x, y)))
|
436 |
+
|
437 |
+
# Compute chest point
|
438 |
+
nose = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE]
|
439 |
+
left_shoulder = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER]
|
440 |
+
right_shoulder = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER]
|
441 |
+
left_hip = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_HIP]
|
442 |
+
right_hip = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_HIP]
|
443 |
+
|
444 |
+
chest_x = int((nose.x + left_shoulder.x + right_shoulder.x + left_hip.x + right_hip.x) / 5 * image_width)
|
445 |
+
chest_y = int((nose.y + left_shoulder.y + right_shoulder.y + left_hip.y + right_hip.y) / 5 * image_height)
|
446 |
+
keypoints.append(('CHEST', (chest_x, chest_y)))
|
447 |
+
|
448 |
+
# Face landmarks
|
449 |
+
if results.face_landmarks:
|
450 |
+
for idx, landmark in enumerate(results.face_landmarks.landmark):
|
451 |
+
x = int(landmark.x * image_width)
|
452 |
+
y = int(landmark.y * image_height)
|
453 |
+
keypoints.append(('FACE_' + face_landmarks[idx], (x, y)))
|
454 |
+
|
455 |
+
# Left hand landmarks
|
456 |
+
if results.left_hand_landmarks:
|
457 |
+
for idx, landmark in enumerate(results.left_hand_landmarks.landmark):
|
458 |
+
x = int(landmark.x * image_width)
|
459 |
+
y = int(landmark.y * image_height)
|
460 |
+
keypoints.append(('LEFT_HAND_' + hand_landmarks[idx], (x, y)))
|
461 |
+
|
462 |
+
# Right hand landmarks
|
463 |
+
if results.right_hand_landmarks:
|
464 |
+
for idx, landmark in enumerate(results.right_hand_landmarks.landmark):
|
465 |
+
x = int(landmark.x * image_width)
|
466 |
+
y = int(landmark.y * image_height)
|
467 |
+
keypoints.append(('RIGHT_HAND_' + hand_landmarks[idx], (x, y)))
|
468 |
+
|
469 |
+
return keypoints
|
470 |
|
|
|
471 |
def apply_touch_points(image_path, keypoints):
|
472 |
image = cv2.imread(image_path)
|
473 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
474 |
image_pil = Image.fromarray(image_rgb)
|
475 |
draw = ImageDraw.Draw(image_pil)
|
476 |
|
477 |
+
font = ImageFont.load_default() # Specify a better font if needed
|
478 |
+
|
479 |
+
for name, point in keypoints:
|
480 |
+
if 'CHEST' in name:
|
481 |
+
draw.ellipse([point[0] - 10, point[1] - 10, point[0] + 10, point[1] + 10], fill='blue')
|
482 |
+
draw.text((point[0] + 15, point[1]), name, fill='blue', font=font)
|
483 |
+
else:
|
484 |
+
draw.ellipse([point[0] - 5, point[1] - 5, point[0] + 5, point[1] + 5], fill='red')
|
485 |
+
draw.text((point[0] + 10, point[1]), name, fill='red', font=font)
|
486 |
|
487 |
return image_pil
|
488 |
|
489 |
+
|
490 |
# Function to create a sensation map
|
491 |
def create_sensation_map(width, height, keypoints):
|
492 |
sensation_map = np.random.rand(height, width, 12) * 0.5 + 0.5
|