randomshit11 commited on
Commit
2c806a3
·
verified ·
1 Parent(s): 942815c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -27
app.py CHANGED
@@ -1,24 +1,14 @@
 
1
  import cv2
2
  import numpy as np
3
  import gradio as gr
4
  from ultralytics import YOLO
5
 
6
- model = YOLO('yolov8n-seg.pt')
 
7
 
8
- # def show_preds_image(image_path):
9
- # image = cv2.imread(image_path)
10
- # image_copy = image.copy()
11
- # threshold = 0.1
12
- # results = model(image)[0]
13
- # for result in results.boxes.data.tolist():
14
- # x1, y1, x2, y2, score, class_id = result
15
- # if score > threshold:
16
- # cv2.rectangle(image_copy, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 4)
17
- # cv2.putText(image_copy, results.names[int(class_id)].upper(), (int(x1), int(y1 - 10)),
18
- # cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 255, 0), 3, cv2.LINE_AA)
19
- # cv2.putText(image_copy, str(score), (int(x1), int(y2 + 10)),
20
- # cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 255), 3, cv2.LINE_AA)
21
- # return cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
22
 
23
  def process_video(input_video_path):
24
  cap = cv2.VideoCapture(input_video_path)
@@ -37,9 +27,15 @@ def process_video(input_video_path):
37
  if not ret:
38
  break
39
 
 
 
 
 
 
 
40
  threshold = 0.1
41
  frame_copy = frame.copy()
42
- results = model(frame)[0]
43
  for result in results.boxes.data.tolist():
44
  x1, y1, x2, y2, score, class_id = result
45
  if score > threshold:
@@ -52,26 +48,21 @@ def process_video(input_video_path):
52
 
53
  cap.release()
54
  out.release()
 
55
 
56
- # inputs_image = [gr.Image(label="Input Image")]
57
- # outputs_image = [gr.Image( label="Output Image")]
58
-
59
- # interface_image = gr.Interface(
60
- # fn=show_preds_image,
61
- # inputs=inputs_image,
62
- # outputs=outputs_image,
63
- # title="Animal detector using YOLOv8 NANO for Images",
64
- # )
65
 
 
66
  inputs_video = gr.Video(label="Input Video")
67
  outputs_video = gr.Video(label="Output Video")
68
 
 
69
  demo = gr.Interface(
70
  fn=process_video,
71
  inputs=inputs_video,
72
  outputs=outputs_video,
73
- title="Animal detector using YOLOv8 NANO for Videos",
74
  )
75
 
76
- # = gr.Interface([ interface_video], title="Animal Detector using YOLOv8 NANO")
77
  demo.launch()
 
1
+ import torch
2
  import cv2
3
  import numpy as np
4
  import gradio as gr
5
  from ultralytics import YOLO
6
 
7
+ # Check if CUDA (GPU support) is available
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
 
10
+ # Load the YOLOv8 model
11
+ model = YOLO(r'E:\CV-project\MAIN-DATA\GRADIO\Yolo-v8-Video\yolov8x-seg.pt').to(device)
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def process_video(input_video_path):
14
  cap = cv2.VideoCapture(input_video_path)
 
27
  if not ret:
28
  break
29
 
30
+ # Resize frame to match the expected input shape of the model
31
+ resized_frame = cv2.resize(frame, (640, 640))
32
+
33
+ # Convert resized frame to torch tensor and move it to GPU
34
+ frame_tensor = torch.from_numpy(resized_frame).permute(2, 0, 1).unsqueeze(0).float().to(device) / 255.0
35
+
36
  threshold = 0.1
37
  frame_copy = frame.copy()
38
+ results = model(frame_tensor)[0]
39
  for result in results.boxes.data.tolist():
40
  x1, y1, x2, y2, score, class_id = result
41
  if score > threshold:
 
48
 
49
  cap.release()
50
  out.release()
51
+ cv2.destroyAllWindows()
52
 
53
+ return "output_video.mp4"
 
 
 
 
 
 
 
 
54
 
55
+ # Define the input and output interfaces for Gradio
56
  inputs_video = gr.Video(label="Input Video")
57
  outputs_video = gr.Video(label="Output Video")
58
 
59
+ # Create the Gradio interface
60
  demo = gr.Interface(
61
  fn=process_video,
62
  inputs=inputs_video,
63
  outputs=outputs_video,
64
+ title="Animal detector using YOLOv8 NANO for Videos (GPU)",
65
  )
66
 
67
+ # Launch the interface
68
  demo.launch()