Auto-UTF handling (#4594)
Browse files- detect.py +4 -3
- models/common.py +5 -4
- utils/general.py +1 -1
detect.py
CHANGED
@@ -21,9 +21,9 @@ sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
|
|
21 |
|
22 |
from models.experimental import attempt_load
|
23 |
from utils.datasets import LoadStreams, LoadImages
|
24 |
-
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \
|
25 |
apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
|
26 |
-
from utils.plots import
|
27 |
from utils.torch_utils import select_device, load_classifier, time_sync
|
28 |
|
29 |
|
@@ -105,6 +105,7 @@ def run(weights='yolov5s.pt', # model.pt path(s)
|
|
105 |
output_details = interpreter.get_output_details() # outputs
|
106 |
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
107 |
imgsz = check_img_size(imgsz, s=stride) # check image size
|
|
|
108 |
|
109 |
# Dataloader
|
110 |
if webcam:
|
@@ -181,7 +182,7 @@ def run(weights='yolov5s.pt', # model.pt path(s)
|
|
181 |
s += '%gx%g ' % img.shape[2:] # print string
|
182 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
183 |
imc = im0.copy() if save_crop else im0 # for save_crop
|
184 |
-
annotator = Annotator(im0, line_width=line_thickness, pil=
|
185 |
if len(det):
|
186 |
# Rescale boxes from img_size to im0 size
|
187 |
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
|
|
21 |
|
22 |
from models.experimental import attempt_load
|
23 |
from utils.datasets import LoadStreams, LoadImages
|
24 |
+
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \
|
25 |
apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
|
26 |
+
from utils.plots import Annotator, colors
|
27 |
from utils.torch_utils import select_device, load_classifier, time_sync
|
28 |
|
29 |
|
|
|
105 |
output_details = interpreter.get_output_details() # outputs
|
106 |
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
107 |
imgsz = check_img_size(imgsz, s=stride) # check image size
|
108 |
+
ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
|
109 |
|
110 |
# Dataloader
|
111 |
if webcam:
|
|
|
182 |
s += '%gx%g ' % img.shape[2:] # print string
|
183 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
184 |
imc = im0.copy() if save_crop else im0 # for save_crop
|
185 |
+
annotator = Annotator(im0, line_width=line_thickness, pil=not ascii)
|
186 |
if len(det):
|
187 |
# Rescale boxes from img_size to im0 size
|
188 |
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
models/common.py
CHANGED
@@ -18,9 +18,9 @@ from PIL import Image
|
|
18 |
from torch.cuda import amp
|
19 |
|
20 |
from utils.datasets import exif_transpose, letterbox
|
21 |
-
from utils.general import colorstr,
|
22 |
-
|
23 |
-
from utils.plots import
|
24 |
from utils.torch_utils import time_sync
|
25 |
|
26 |
LOGGER = logging.getLogger(__name__)
|
@@ -354,6 +354,7 @@ class Detections:
|
|
354 |
self.imgs = imgs # list of images as numpy arrays
|
355 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
356 |
self.names = names # class names
|
|
|
357 |
self.files = files # image filenames
|
358 |
self.xyxy = pred # xyxy pixels
|
359 |
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
@@ -371,7 +372,7 @@ class Detections:
|
|
371 |
n = (pred[:, -1] == c).sum() # detections per class
|
372 |
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
373 |
if show or save or render or crop:
|
374 |
-
annotator = Annotator(im, pil=
|
375 |
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
376 |
label = f'{self.names[int(cls)]} {conf:.2f}'
|
377 |
if crop:
|
|
|
18 |
from torch.cuda import amp
|
19 |
|
20 |
from utils.datasets import exif_transpose, letterbox
|
21 |
+
from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \
|
22 |
+
scale_coords, xyxy2xywh
|
23 |
+
from utils.plots import Annotator, colors
|
24 |
from utils.torch_utils import time_sync
|
25 |
|
26 |
LOGGER = logging.getLogger(__name__)
|
|
|
354 |
self.imgs = imgs # list of images as numpy arrays
|
355 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
356 |
self.names = names # class names
|
357 |
+
self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
|
358 |
self.files = files # image filenames
|
359 |
self.xyxy = pred # xyxy pixels
|
360 |
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
|
|
372 |
n = (pred[:, -1] == c).sum() # detections per class
|
373 |
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
374 |
if show or save or render or crop:
|
375 |
+
annotator = Annotator(im, pil=not self.ascii)
|
376 |
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
377 |
label = f'{self.names[int(cls)]} {conf:.2f}'
|
378 |
if crop:
|
utils/general.py
CHANGED
@@ -124,7 +124,7 @@ def is_pip():
|
|
124 |
|
125 |
def is_ascii(s=''):
|
126 |
# Is string composed of all ASCII (no UTF) characters?
|
127 |
-
s = str(s) # convert
|
128 |
return len(s.encode().decode('ascii', 'ignore')) == len(s)
|
129 |
|
130 |
|
|
|
124 |
|
125 |
def is_ascii(s=''):
|
126 |
# Is string composed of all ASCII (no UTF) characters?
|
127 |
+
s = str(s) # convert list, tuple, None, etc. to str
|
128 |
return len(s.encode().decode('ascii', 'ignore')) == len(s)
|
129 |
|
130 |
|