Rajadhurai commited on
Commit
8eff39a
·
verified ·
1 Parent(s): e876d5a

this uplode code

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -4,9 +4,10 @@ import numpy as np
4
  import gradio as gr
5
  import tempfile
6
 
7
- # Load model
8
  MODEL_PATH = "hand_landmarker.task"
9
 
 
10
  BaseOptions = mp.tasks.BaseOptions
11
  HandLandmarker = mp.tasks.vision.HandLandmarker
12
  HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
@@ -14,7 +15,7 @@ VisionRunningMode = mp.tasks.vision.RunningMode
14
  mp_image = mp.Image
15
  mp_format = mp.ImageFormat
16
 
17
- # Finger connections and colors
18
  HAND_CONNECTIONS = [
19
  (0, 1), (1, 2), (2, 3), (3, 4),
20
  (0, 5), (5, 6), (6, 7), (7, 8),
@@ -46,18 +47,28 @@ def get_finger_color(start_idx):
46
  else:
47
  return FINGER_COLORS['palm']
48
 
49
- def process_video(video_path):
 
 
 
 
 
 
50
  cap = cv2.VideoCapture(video_path)
 
 
 
 
 
 
51
 
 
52
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
53
  tmp_out = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
54
  out_path = tmp_out.name
55
-
56
- fps = cap.get(cv2.CAP_PROP_FPS)
57
- w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
58
- h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
59
  out = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
60
 
 
61
  options = HandLandmarkerOptions(
62
  base_options=BaseOptions(model_asset_path=MODEL_PATH),
63
  running_mode=VisionRunningMode.IMAGE,
@@ -80,12 +91,10 @@ def process_video(video_path):
80
  if results.hand_landmarks:
81
  for hand_landmarks in results.hand_landmarks:
82
  points = [(int(lm.x * w), int(lm.y * h)) for lm in hand_landmarks]
83
-
84
  for start, end in HAND_CONNECTIONS:
85
  color = get_finger_color(start)
86
  cv2.line(frame, points[start], points[end], color, 2)
87
-
88
- for i, (x, y) in enumerate(points):
89
  cv2.circle(frame, (x, y), 4, (0, 255, 255), -1)
90
 
91
  out.write(frame)
@@ -94,13 +103,14 @@ def process_video(video_path):
94
  out.release()
95
  return out_path
96
 
97
- # Gradio interface
98
  demo = gr.Interface(
99
  fn=process_video,
100
- inputs=gr.Video(label="Upload Video or Use Webcam"),
101
  outputs=gr.Video(label="Hand Landmark Annotated Video"),
102
- title="Hand Detection ",
103
- description="Upload a video or use webcam to detect hands."
104
  )
105
 
106
- demo.launch()
 
 
4
  import gradio as gr
5
  import tempfile
6
 
7
+ # Path to hand landmark model file (make sure it's in your repo!)
8
  MODEL_PATH = "hand_landmarker.task"
9
 
10
+ # MediaPipe setup
11
  BaseOptions = mp.tasks.BaseOptions
12
  HandLandmarker = mp.tasks.vision.HandLandmarker
13
  HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
 
15
  mp_image = mp.Image
16
  mp_format = mp.ImageFormat
17
 
18
+ # Define hand connections and colors for visualization
19
  HAND_CONNECTIONS = [
20
  (0, 1), (1, 2), (2, 3), (3, 4),
21
  (0, 5), (5, 6), (6, 7), (7, 8),
 
47
  else:
48
  return FINGER_COLORS['palm']
49
 
50
+ def process_video(video_file):
51
+ # Gradio may send a dict or path string depending on how input is passed
52
+ if isinstance(video_file, dict):
53
+ video_path = video_file["name"]
54
+ else:
55
+ video_path = video_file
56
+
57
  cap = cv2.VideoCapture(video_path)
58
+ if not cap.isOpened():
59
+ raise ValueError("Could not open video.")
60
+
61
+ fps = cap.get(cv2.CAP_PROP_FPS) or 24
62
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
63
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
64
 
65
+ # Prepare output video path
66
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
67
  tmp_out = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
68
  out_path = tmp_out.name
 
 
 
 
69
  out = cv2.VideoWriter(out_path, fourcc, fps, (w, h))
70
 
71
+ # Load hand detection model
72
  options = HandLandmarkerOptions(
73
  base_options=BaseOptions(model_asset_path=MODEL_PATH),
74
  running_mode=VisionRunningMode.IMAGE,
 
91
  if results.hand_landmarks:
92
  for hand_landmarks in results.hand_landmarks:
93
  points = [(int(lm.x * w), int(lm.y * h)) for lm in hand_landmarks]
 
94
  for start, end in HAND_CONNECTIONS:
95
  color = get_finger_color(start)
96
  cv2.line(frame, points[start], points[end], color, 2)
97
+ for x, y in points:
 
98
  cv2.circle(frame, (x, y), 4, (0, 255, 255), -1)
99
 
100
  out.write(frame)
 
103
  out.release()
104
  return out_path
105
 
106
+ # Gradio app interface
107
  demo = gr.Interface(
108
  fn=process_video,
109
+ inputs=gr.Video(label="Upload Video or Record via Webcam"),
110
  outputs=gr.Video(label="Hand Landmark Annotated Video"),
111
+ title="🖐️ Hand Detection using MediaPipe",
112
+ description="Upload a video or record from webcam. The system will detect hands and annotate keypoints using MediaPipe HandLandmarker."
113
  )
114
 
115
+ if __name__ == "__main__":
116
+ demo.launch()