File size: 3,549 Bytes
a7d8e83 d3d7a67 049083b 53c4179 7229a70 beaeeb6 a0403c5 7229a70 049083b 7229a70 a7d8e83 7229a70 a7d8e83 049083b a0403c5 049083b d3d7a67 049083b d3d7a67 049083b d3d7a67 049083b d3d7a67 049083b d3d7a67 049083b d3d7a67 049083b d3d7a67 7229a70 d3d7a67 049083b d3d7a67 4fcdff4 049083b beaeeb6 049083b 4fcdff4 beaeeb6 049083b beaeeb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
from typing import Dict, List, Any
import urllib.request
import numpy as np
import cv2
import base64
from ultralytics import YOLO
import os
import gdown
class EndpointHandler:
def __init__(self, path='.'): # pass api key to model
# current_directory = os.getcwd()
# print("Current working directory:", current_directory)
url = "https://drive.google.com/uc?id=1jB8sDYYOTfuF7B1PMcDjkm5R7huv97Wm"
gdown.download(url, './best.pt', quiet=False)
self.model = YOLO("./best.pt")
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
os.getcwd()
inputs = data.get("inputs")
isurl = inputs.get("isurl")
path = inputs.get("path")
model = self.model
########################### Load Image #################################
if(isurl): # for url set isurl = 1
print("checkpoint 2-1")
req = urllib.request.urlopen(path)
print("checkpoint 2-2")
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
print("checkpoint 2-3")
img = cv2.imdecode(arr, -1) # 'Load it as it is'
else: # for image file
img = cv2.imread(path)
print("checkpoint 2")
###########################################################################
########################### Model Detection #################################
# change model_id to use a different model
# can try:
# clothing-detection-s4ioc/6 //good
# clothing-segmentation-dataset/1
# t-shirts-detector/1
# mainmodel/2
#result = self.CLIENT.infer(path, model_id="mainmodel/2")
result = model(img)
#annotated_frame = result[0].plot()
detections = result[0].boxes
#print(result[0].boxes.xyxy)
#cv2.imshow("YOLOv8 Inference", annotated_frame)
# print(result)
#cv2.waitKey(0)
#detections = sv.Detections.from_inference(result)
# print(detections)
print("checkpoint 3")
###########################################################################
########################### Data proccessing #################################
# only pass the first detection
# change 1 -> to len(detections.xyxy) to pass all photos
if(detections.xyxy.shape[0] == 0):
return "Not Found"
else:
x1, y1, x2, y2 = int(detections.xyxy[0][0]), int(detections.xyxy[0][1]), int(detections.xyxy[0][2]), int(detections.xyxy[0][3])
clothes = img[y1: y2, x1: x2]
retval , buffer = cv2.imencode('.jpg', clothes)
cv2.imwrite("result.jpg", clothes)
# create base 64 object
jpg_as_text = base64.b64encode(buffer).decode("utf-8") # Decode bytes to string")
print("checkpoint 4")
###########################################################################
return jpg_as_text
###########################################################################
# test run
# Model = EndpointHandler()
# data = {
# "inputs": {
# "isurl": True,
# # "path": "http://10.10.2.100/cam-lo.jpg",
# "path": "https://www.next.us/nxtcms/resource/blob/5791586/ee0fc6a294be647924fa5f5e7e3df8e9/hoodies-data.jpg",
# # "key": "iJuYzEzNEFSaQq4e0hfE",
# }
# }
# # test file image
# print(Model(data))
#test url
# print(Model("http://10.10.2.100/cam-lo.jpg", 1))
|