Delete app.py
Browse files
app.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import cv2
|
3 |
-
import requests
|
4 |
-
import os
|
5 |
-
import torch
|
6 |
-
import numpy as np
|
7 |
-
from ultralytics import YOLO
|
8 |
-
|
9 |
-
model = torch.hub.load('ultralytics/yolov5', 'yolov5l', pretrained=True)
|
10 |
-
|
11 |
-
area = [(48,430), (18, 515), (407,485), (750,425), (690,370)]
|
12 |
-
total_space = 12
|
13 |
-
count=0
|
14 |
-
|
15 |
-
def show_preds_video():
|
16 |
-
cap = cv2.VideoCapture('V1.mp4')
|
17 |
-
count=0
|
18 |
-
while(cap.isOpened()):
|
19 |
-
ret, frame = cap.read()
|
20 |
-
if not ret:
|
21 |
-
break
|
22 |
-
count += 1
|
23 |
-
if count % 2 != 0:
|
24 |
-
continue
|
25 |
-
|
26 |
-
frame=cv2.resize(frame,(1020,600))
|
27 |
-
frame_copy = frame.copy()
|
28 |
-
Vehicle_cnt = 0
|
29 |
-
|
30 |
-
results=model(frame)
|
31 |
-
for index, row in results.pandas().xyxy[0].iterrows():
|
32 |
-
x1 = int(row['xmin'])
|
33 |
-
y1 = int(row['ymin'])
|
34 |
-
x2 = int(row['xmax'])
|
35 |
-
y2 = int(row['ymax'])
|
36 |
-
d=(row['name'])
|
37 |
-
|
38 |
-
cx=int(x1+x2)//2
|
39 |
-
cy=int(y1+y2)//2
|
40 |
-
|
41 |
-
if ('car' or 'truck') in d:
|
42 |
-
results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
|
43 |
-
if results >0:
|
44 |
-
cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
|
45 |
-
cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),2)
|
46 |
-
Vehicle_cnt += 1
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
free_space = total_space - Vehicle_cnt
|
51 |
-
cv2.putText(frame_copy, ("Free space: " + str(free_space)), (50,50) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
|
52 |
-
|
53 |
-
cv2.putText(frame_copy, str(str("vehicles: ")+ str(Vehicle_cnt) ), (50,85) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
|
54 |
-
|
55 |
-
cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
|
56 |
-
|
57 |
-
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
|
58 |
-
|
59 |
-
|
60 |
-
inputs_video = [
|
61 |
-
gr.components.Video(type="filepath", label="Input Video"),
|
62 |
-
|
63 |
-
]
|
64 |
-
outputs_video = [
|
65 |
-
#gr.components.Image(type="numpy", label="Output Image"),
|
66 |
-
]
|
67 |
-
interface_video = gr.Interface(
|
68 |
-
fn=show_preds_video,
|
69 |
-
inputs=inputs_video,
|
70 |
-
outputs=outputs_video,
|
71 |
-
title="Parking counter",
|
72 |
-
description="Click generate !!!'",
|
73 |
-
#examples=video_path,
|
74 |
-
cache_examples=False,
|
75 |
-
)
|
76 |
-
|
77 |
-
gr.TabbedInterface(
|
78 |
-
[interface_video],
|
79 |
-
tab_names=['Video inference']
|
80 |
-
).queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|