HarshitJoshi commited on
Commit
edc687b
·
verified ·
1 Parent(s): 0d64151

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -29
app.py CHANGED
@@ -23,13 +23,13 @@ for i, url in enumerate(file_urls):
23
 
24
  colors = {
25
  0: (255, 0, 0), # Red for class 0
26
- 1: (0, 255, 0), # Green for class 1
27
  2: (0, 0, 255), # Blue for class 2
28
  3: (255, 255, 0), # Yellow for class 3
29
  4: (255, 0, 255), # Magenta for class 4
30
  5: (0, 255, 255), # Cyan for class 5
31
  6: (128, 0, 0), # Maroon for class 6
32
- 7: (0, 128, 0), # Green (dark) for class 7
33
  }
34
 
35
  model = YOLO('modelbest.pt')
@@ -95,41 +95,61 @@ interface_image = gr.Interface(
95
  )
96
 
97
  def show_preds_video(video_path):
 
98
  cap = cv2.VideoCapture(video_path)
 
 
 
 
 
 
 
 
 
 
99
  while cap.isOpened():
100
  ret, frame = cap.read()
101
- if ret:
102
- frame_copy = frame.copy()
103
- outputs = model.predict(source=frame)
104
- results = outputs[0].cpu().numpy()
105
-
106
- for i, det in enumerate(results.boxes.xyxy):
107
- class_id = int(results.boxes.cls[i])
108
- label = model.names[class_id]
109
-
110
- # Get the bounding box coordinates
111
- x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
112
-
113
- # Draw the bounding box with the specified color
114
- color = colors.get(class_id, (0, 0, 255))
115
- cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
116
-
117
- # Calculate text size and position
118
- label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
119
- text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
120
- text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
121
-
122
- # Draw the label text
123
- cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
124
-
125
- yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
126
 
 
 
 
 
 
 
 
 
 
 
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  inputs_video = [
129
  gr.Video(format="mp4", label="Input Video"),
130
  ]
131
  outputs_video = [
132
- gr.Image(type="numpy", label="Output Image"),
133
  ]
134
  interface_video = gr.Interface(
135
  fn=show_preds_video,
@@ -139,12 +159,12 @@ interface_video = gr.Interface(
139
  examples=video_path,
140
  cache_examples=False,
141
  )
142
-
143
  gr.TabbedInterface(
144
  [interface_image, interface_video],
145
  tab_names=['Image inference', 'Video inference']
146
  ).queue().launch()
147
 
 
148
  # import gradio as gr
149
  # import cv2
150
  # import requests
 
23
 
24
  colors = {
25
  0: (255, 0, 0), # Red for class 0
26
+ 1: (0, 128, 0), # Green (dark) for class 1
27
  2: (0, 0, 255), # Blue for class 2
28
  3: (255, 255, 0), # Yellow for class 3
29
  4: (255, 0, 255), # Magenta for class 4
30
  5: (0, 255, 255), # Cyan for class 5
31
  6: (128, 0, 0), # Maroon for class 6
32
+ 7: (0, 225, 0), # Green for class 7
33
  }
34
 
35
  model = YOLO('modelbest.pt')
 
95
  )
96
 
97
  def show_preds_video(video_path):
98
+ # Open the input video
99
  cap = cv2.VideoCapture(video_path)
100
+
101
+ # Get video properties
102
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
103
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
104
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
105
+
106
+ # Define the codec and create a VideoWriter object
107
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 'mp4v' for .mp4 format
108
+ out = cv2.VideoWriter('output_video.mp4', fourcc, fps, (width, height))
109
+
110
  while cap.isOpened():
111
  ret, frame = cap.read()
112
+ if not ret:
113
+ break
114
+
115
+ frame_copy = frame.copy()
116
+ outputs = model.predict(source=frame)
117
+ results = outputs[0].cpu().numpy()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ for i, det in enumerate(results.boxes.xyxy):
120
+ class_id = int(results.boxes.cls[i])
121
+ label = model.names[class_id]
122
+
123
+ # Get the bounding box coordinates
124
+ x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
125
+
126
+ # Draw the bounding box with the specified color
127
+ color = colors.get(class_id, (0, 0, 255))
128
+ cv2.rectangle(frame_copy, (x1, y1), (x2, y2), color, 2, cv2.LINE_AA)
129
 
130
+ # Calculate text size and position
131
+ label_size, _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.75, 2)
132
+ text_x = x1 + (x2 - x1) // 2 - label_size[0] // 2
133
+ text_y = y1 + (y2 - y1) // 2 + label_size[1] // 2
134
+
135
+ # Draw the label text
136
+ cv2.putText(frame_copy, label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2, cv2.LINE_AA)
137
+
138
+ # Write the frame to the output video
139
+ out.write(frame_copy)
140
+
141
+ # Release everything
142
+ cap.release()
143
+ out.release()
144
+
145
+ return 'output_video.mp4'
146
+
147
+ # Updated Gradio interface
148
  inputs_video = [
149
  gr.Video(format="mp4", label="Input Video"),
150
  ]
151
  outputs_video = [
152
+ gr.Video(label="Output Video"),
153
  ]
154
  interface_video = gr.Interface(
155
  fn=show_preds_video,
 
159
  examples=video_path,
160
  cache_examples=False,
161
  )
 
162
  gr.TabbedInterface(
163
  [interface_image, interface_video],
164
  tab_names=['Image inference', 'Video inference']
165
  ).queue().launch()
166
 
167
+
168
  # import gradio as gr
169
  # import cv2
170
  # import requests