Upload 3 files
Browse files- best.pt +3 -0
- handler.py +58 -48
- requirements.txt +1 -2
best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d025427b1b29ea551bf60c80d148f93dac0d121feaa46bc9d534a781e1c3cffb
|
3 |
+
size 22503193
|
handler.py
CHANGED
@@ -1,78 +1,88 @@
|
|
1 |
from typing import Dict, List, Any
|
2 |
-
import supervision as sv
|
3 |
import urllib.request
|
4 |
import numpy as np
|
5 |
import cv2
|
6 |
import base64
|
7 |
-
from
|
8 |
|
9 |
|
10 |
class EndpointHandler:
|
11 |
-
def __init__(self
|
|
|
|
|
|
|
|
|
|
|
12 |
pass
|
13 |
-
|
14 |
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
15 |
inputs = data.get("inputs")
|
16 |
isurl = inputs.get("isurl")
|
17 |
path = inputs.get("path")
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
if isurl: # for url set isurl = 1
|
24 |
req = urllib.request.urlopen(path)
|
|
|
25 |
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
|
26 |
-
|
27 |
-
|
|
|
28 |
img = cv2.imread(path)
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
# clothing-segmentation-dataset/1
|
35 |
# t-shirts-detector/1
|
36 |
-
# clothing-detection-s4ioc/6
|
37 |
# mainmodel/2
|
38 |
-
|
39 |
-
result =
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# print(detections)
|
42 |
-
|
|
|
|
|
|
|
43 |
|
44 |
-
|
45 |
# only pass the first detection
|
46 |
# change 1 -> to len(detections.xyxy) to pass all photos
|
47 |
-
|
48 |
-
if detections.confidence.size == 0:
|
49 |
return "Not Found"
|
50 |
else:
|
51 |
-
x1, y1, x2, y2 = (
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
int(detections.xyxy[0][3]),
|
56 |
-
)
|
57 |
-
clothes = img[y1:y2, x1:x2]
|
58 |
-
retval, buffer = cv2.imencode(".jpg", clothes)
|
59 |
# create base 64 object
|
60 |
-
jpg_as_text = base64.b64encode(buffer)
|
61 |
-
|
62 |
-
return jpg_as_text
|
63 |
-
|
64 |
-
|
65 |
###########################################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
|
|
|
|
67 |
|
68 |
-
# data = {
|
69 |
-
# "inputs": {
|
70 |
-
# "isurl": True,
|
71 |
-
# "path": "http://192.168.10.20/cam-hi.jpg",
|
72 |
-
# "key": "iJuYzEzNEFSaQq4e0hfE",
|
73 |
-
# }
|
74 |
-
# }
|
75 |
|
76 |
-
# # test run
|
77 |
-
# Model = EndpointHandler()
|
78 |
-
# print(Model(data))
|
|
|
1 |
from typing import Dict, List, Any
|
|
|
2 |
import urllib.request
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
import base64
|
6 |
+
from ultralytics import YOLO
|
7 |
|
8 |
|
9 |
class EndpointHandler:
|
10 |
+
def __init__(self): #pass api key to model
|
11 |
+
# self.CLIENT = InferenceHTTPClient(
|
12 |
+
# api_url="https://detect.roboflow.com",
|
13 |
+
# api_key=key
|
14 |
+
# )
|
15 |
+
# print("checkpoint 1")
|
16 |
pass
|
17 |
+
|
18 |
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
|
19 |
inputs = data.get("inputs")
|
20 |
isurl = inputs.get("isurl")
|
21 |
path = inputs.get("path")
|
22 |
+
|
23 |
+
model = YOLO("./best.pt")
|
24 |
+
########################### Load Image #################################
|
25 |
+
if(isurl): # for url set isurl = 1
|
26 |
+
print("checkpoint 2-1")
|
|
|
27 |
req = urllib.request.urlopen(path)
|
28 |
+
print("checkpoint 2-2")
|
29 |
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
|
30 |
+
print("checkpoint 2-3")
|
31 |
+
img = cv2.imdecode(arr, -1) # 'Load it as it is'
|
32 |
+
else: # for image file
|
33 |
img = cv2.imread(path)
|
34 |
+
|
35 |
+
print("checkpoint 2")
|
36 |
+
###########################################################################
|
37 |
+
|
38 |
+
|
39 |
+
########################### Model Detection #################################
|
40 |
+
# change model_id to use a different model
|
41 |
+
# can try:
|
42 |
+
# clothing-detection-s4ioc/6 //good
|
43 |
# clothing-segmentation-dataset/1
|
44 |
# t-shirts-detector/1
|
|
|
45 |
# mainmodel/2
|
46 |
+
#result = self.CLIENT.infer(path, model_id="mainmodel/2")
|
47 |
+
result = model(img)
|
48 |
+
#annotated_frame = result[0].plot()
|
49 |
+
detections = result[0].boxes
|
50 |
+
#print(result[0].boxes.xyxy)
|
51 |
+
#cv2.imshow("YOLOv8 Inference", annotated_frame)
|
52 |
+
# print(result)
|
53 |
+
#cv2.waitKey(0)
|
54 |
+
#detections = sv.Detections.from_inference(result)
|
55 |
# print(detections)
|
56 |
+
|
57 |
+
print("checkpoint 3")
|
58 |
+
###########################################################################
|
59 |
+
|
60 |
|
61 |
+
########################### Data proccessing #################################
|
62 |
# only pass the first detection
|
63 |
# change 1 -> to len(detections.xyxy) to pass all photos
|
64 |
+
if(detections.xyxy.size == 0):
|
|
|
65 |
return "Not Found"
|
66 |
else:
|
67 |
+
x1, y1, x2, y2 = int(detections.xyxy[0][0]), int(detections.xyxy[0][1]), int(detections.xyxy[0][2]), int(detections.xyxy[0][3])
|
68 |
+
clothes = img[y1: y2, x1: x2]
|
69 |
+
retval , buffer = cv2.imencode('.jpg', clothes)
|
70 |
+
cv2.imwrite("result.jpg", clothes)
|
|
|
|
|
|
|
|
|
71 |
# create base 64 object
|
72 |
+
jpg_as_text = base64.b64encode(buffer)
|
73 |
+
print("checkpoint 4")
|
|
|
|
|
|
|
74 |
###########################################################################
|
75 |
+
return jpg_as_text
|
76 |
+
###########################################################################
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
# test run
|
81 |
+
# Model = Image_detect()
|
82 |
+
# test file image
|
83 |
+
# print(Model("test_images/test6.jpg", 0))
|
84 |
|
85 |
+
#test url
|
86 |
+
# print(Model("http://10.10.2.100/cam-lo.jpg", 1))
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
supervision
|
2 |
numpy
|
3 |
opencv-python
|
4 |
-
|
|
|
|
|
1 |
numpy
|
2 |
opencv-python
|
3 |
+
ultralytics
|