turhancan97 commited on
Commit
2360fe8
·
1 Parent(s): cc9af23

yolov8 space

Browse files
Files changed (1) hide show
  1. app.py +143 -4
app.py CHANGED
@@ -1,7 +1,146 @@
1
  import gradio as gr
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from ultralytics import YOLO
4
+ import cv2
5
+ import numpy as np
6
+ from math import atan2, cos, sin, sqrt, pi
7
 
8
+ # Images
9
+ torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/cefd9731-c57c-428b-b401-fd54a8bd0a95', 'highway.jpg')
10
+ torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/acbad76a-33f9-4028-b012-4ece5998c272', 'highway1.jpg')
11
+ torch.hub.download_url_to_file('https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/7fa95f52-3c8b-4ea0-8bca-7374792a4c55', 'small-vehicles1.jpeg')
12
 
13
+ def drawAxis(img, p_, q_, color, scale):
14
+ p = list(p_)
15
+ q = list(q_)
16
+
17
+ ## [visualization1]
18
+ angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians
19
+ hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
20
+
21
+ # Here we lengthen the arrow by a factor of scale
22
+ q[0] = p[0] - scale * hypotenuse * cos(angle)
23
+ q[1] = p[1] - scale * hypotenuse * sin(angle)
24
+ cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
25
+
26
+ # create the arrow hooks
27
+ p[0] = q[0] + 9 * cos(angle + pi / 4)
28
+ p[1] = q[1] + 9 * sin(angle + pi / 4)
29
+ cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
30
+
31
+ p[0] = q[0] + 9 * cos(angle - pi / 4)
32
+ p[1] = q[1] + 9 * sin(angle - pi / 4)
33
+ cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
34
+ ## [visualization1]
35
+
36
+
37
+ def getOrientation(pts, img):
38
+ ## [pca]
39
+ # Construct a buffer used by the pca analysis
40
+ sz = len(pts)
41
+ data_pts = np.empty((sz, 2), dtype=np.float64)
42
+ for i in range(data_pts.shape[0]):
43
+ data_pts[i,0] = pts[i,0,0]
44
+ data_pts[i,1] = pts[i,0,1]
45
+
46
+ # Perform PCA analysis
47
+ mean = np.empty((0))
48
+ mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
49
+
50
+ # Store the center of the object
51
+ cntr = (int(mean[0,0]), int(mean[0,1]))
52
+ ## [pca]
53
+
54
+ ## [visualization]
55
+ # Draw the principal components
56
+ cv2.circle(img, cntr, 3, (255, 0, 255), 10)
57
+ p1 = (cntr[0] + 0.02 * eigenvectors[0,0] * eigenvalues[0,0], cntr[1] + 0.02 * eigenvectors[0,1] * eigenvalues[0,0])
58
+ p2 = (cntr[0] - 0.02 * eigenvectors[1,0] * eigenvalues[1,0], cntr[1] - 0.02 * eigenvectors[1,1] * eigenvalues[1,0])
59
+ drawAxis(img, cntr, p1, (255, 255, 0), 1)
60
+ drawAxis(img, cntr, p2, (0, 0, 255), 3)
61
+
62
+ angle = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians
63
+ ## [visualization]
64
+ angle_deg = -(int(np.rad2deg(angle))-180) % 180
65
+
66
+ # Label with the rotation angle
67
+ label = " Rotation Angle: " + str(int(np.rad2deg(angle))) + " degrees"
68
+ textbox = cv2.rectangle(img, (cntr[0], cntr[1]-25), (cntr[0] + 250, cntr[1] + 10), (255,255,255), -1)
69
+ cv2.putText(img, label, (cntr[0], cntr[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1, cv2.LINE_AA)
70
+
71
+ return angle_deg
72
+
73
+ def yolov8_inference(
74
+ image: gr.inputs.Image = None,
75
+ model_path: gr.inputs.Dropdown = None,
76
+ image_size: gr.inputs.Slider = 640,
77
+ conf_threshold: gr.inputs.Slider = 0.25,
78
+ iou_threshold: gr.inputs.Slider = 0.45,
79
+ ):
80
+ """
81
+ YOLOv8 inference function
82
+ Args:
83
+ image: Input image
84
+ model_path: Path to the model
85
+ image_size: Image size
86
+ conf_threshold: Confidence threshold
87
+ iou_threshold: IOU threshold
88
+ Returns:
89
+ Rendered image
90
+ """
91
+ model = YOLO(model_path)
92
+ model.conf = conf_threshold
93
+ model.iou = iou_threshold
94
+ #read image
95
+ image = cv2.imread(image)
96
+ #resize image (optional)
97
+ img_res_toshow = cv2.resize(image, None, fx= 0.5, fy= 0.5, interpolation= cv2.INTER_LINEAR)
98
+ height=img_res_toshow.shape[0]
99
+ width=img_res_toshow.shape[1]
100
+ dim=(width,height)
101
+ results = model.predict(image, imgsz=image_size, return_outputs=True)
102
+ #obtain BW image
103
+ bw=(results[0].masks.masks[0].cpu().numpy() * 255).astype("uint8")
104
+ #BW image with same dimention of initial image
105
+ bw=cv2.resize(bw, dim, interpolation = cv2.INTER_AREA)
106
+ img=img_res_toshow
107
+ contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
108
+ for i, c in enumerate(contours):
109
+ # Calculate the area of each contour
110
+ area = cv2.contourArea(c)
111
+
112
+ # Ignore contours that are too small or too large
113
+ if area < 3700 or 100000 < area:
114
+ continue
115
+
116
+ # Draw each contour only for visualisation purposes
117
+ cv2.drawContours(img, contours, i, (0, 0, 255), 2)
118
+
119
+ # Find the orientation of each shape
120
+ print(getOrientation(c, img))
121
+
122
+ return img
123
+
124
+ inputs = [
125
+ gr.inputs.Image(type="filepath", label="Input Image"),
126
+ gr.inputs.Dropdown(["kadirnar/yolov8n-v8.0", "kadirnar/yolov8m-v8.0", "kadirnar/yolov8l-v8.0", "kadirnar/yolov8x-v8.0", "kadirnar/yolov8x6-v8.0"],
127
+ default="kadirnar/yolov8m-v8.0", label="Model"),
128
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
129
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
130
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
131
+ ]
132
+
133
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
134
+ title = "Ultralytics YOLOv8: State-of-the-Art YOLO Models"
135
+
136
+ examples = [['highway.jpg', 'kadirnar/yolov8m-v8.0', 640, 0.25, 0.45], ['highway1.jpg', 'kadirnar/yolov8l-v8.0', 640, 0.25, 0.45], ['small-vehicles1.jpeg', 'kadirnar/yolov8x-v8.0', 1280, 0.25, 0.45]]
137
+ demo_app = gr.Interface(
138
+ fn=yolov8_inference,
139
+ inputs=inputs,
140
+ outputs=outputs,
141
+ title=title,
142
+ examples=examples,
143
+ cache_examples=True,
144
+ theme='huggingface',
145
+ )
146
+ demo_app.launch(debug=True, enable_queue=True)