randomshit11 commited on
Commit
942815c
·
verified ·
1 Parent(s): b281904

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -13
app.py CHANGED
@@ -1,16 +1,25 @@
1
- import torch
2
  import cv2
3
  import numpy as np
4
  import gradio as gr
5
  from ultralytics import YOLO
6
 
7
- # Check if CUDA (GPU support) is available
8
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
 
10
- # Load the YOLOv8 model
11
- model = YOLO('yolov8n-seg.pt').to(device)
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Define the function to process the video on GPU
14
  def process_video(input_video_path):
15
  cap = cv2.VideoCapture(input_video_path)
16
  if not cap.isOpened():
@@ -30,9 +39,7 @@ def process_video(input_video_path):
30
 
31
  threshold = 0.1
32
  frame_copy = frame.copy()
33
- # Convert frame to torch tensor and move it to GPU
34
- frame_tensor = torch.from_numpy(frame_copy).permute(2, 0, 1).unsqueeze(0).float().to(device) / 255.0
35
- results = model(frame_tensor)[0]
36
  for result in results.boxes.data.tolist():
37
  x1, y1, x2, y2, score, class_id = result
38
  if score > threshold:
@@ -46,17 +53,25 @@ def process_video(input_video_path):
46
  cap.release()
47
  out.release()
48
 
49
- # Define the input and output interfaces for Gradio
 
 
 
 
 
 
 
 
 
50
  inputs_video = gr.Video(label="Input Video")
51
  outputs_video = gr.Video(label="Output Video")
52
 
53
- # Create the Gradio interface
54
  demo = gr.Interface(
55
  fn=process_video,
56
  inputs=inputs_video,
57
  outputs=outputs_video,
58
- title="Animal detector using YOLOv8 NANO for Videos (GPU)",
59
  )
60
 
61
- # Launch the interface
62
  demo.launch()
 
 
1
  import cv2
2
  import numpy as np
3
  import gradio as gr
4
  from ultralytics import YOLO
5
 
6
+ model = YOLO('yolov8n-seg.pt')
 
7
 
8
+ # def show_preds_image(image_path):
9
+ # image = cv2.imread(image_path)
10
+ # image_copy = image.copy()
11
+ # threshold = 0.1
12
+ # results = model(image)[0]
13
+ # for result in results.boxes.data.tolist():
14
+ # x1, y1, x2, y2, score, class_id = result
15
+ # if score > threshold:
16
+ # cv2.rectangle(image_copy, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 4)
17
+ # cv2.putText(image_copy, results.names[int(class_id)].upper(), (int(x1), int(y1 - 10)),
18
+ # cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 255, 0), 3, cv2.LINE_AA)
19
+ # cv2.putText(image_copy, str(score), (int(x1), int(y2 + 10)),
20
+ # cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 255), 3, cv2.LINE_AA)
21
+ # return cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
22
 
 
23
  def process_video(input_video_path):
24
  cap = cv2.VideoCapture(input_video_path)
25
  if not cap.isOpened():
 
39
 
40
  threshold = 0.1
41
  frame_copy = frame.copy()
42
+ results = model(frame)[0]
 
 
43
  for result in results.boxes.data.tolist():
44
  x1, y1, x2, y2, score, class_id = result
45
  if score > threshold:
 
53
  cap.release()
54
  out.release()
55
 
56
+ # inputs_image = [gr.Image(label="Input Image")]
57
+ # outputs_image = [gr.Image( label="Output Image")]
58
+
59
+ # interface_image = gr.Interface(
60
+ # fn=show_preds_image,
61
+ # inputs=inputs_image,
62
+ # outputs=outputs_image,
63
+ # title="Animal detector using YOLOv8 NANO for Images",
64
+ # )
65
+
66
  inputs_video = gr.Video(label="Input Video")
67
  outputs_video = gr.Video(label="Output Video")
68
 
 
69
  demo = gr.Interface(
70
  fn=process_video,
71
  inputs=inputs_video,
72
  outputs=outputs_video,
73
+ title="Animal detector using YOLOv8 NANO for Videos",
74
  )
75
 
76
+ # = gr.Interface([ interface_video], title="Animal Detector using YOLOv8 NANO")
77
  demo.launch()