Automatic Chinese fonts plotting (#4951)
Browse files* Automatic Chinese fonts plotting
* Default PIL=False
- detect.py +2 -3
- models/common.py +6 -7
- utils/general.py +3 -4
- utils/plots.py +7 -6
detect.py
CHANGED
@@ -23,7 +23,7 @@ if str(ROOT) not in sys.path:
|
|
23 |
from models.experimental import attempt_load
|
24 |
from utils.datasets import LoadImages, LoadStreams
|
25 |
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
|
26 |
-
increment_path,
|
27 |
strip_optimizer, xyxy2xywh
|
28 |
from utils.plots import Annotator, colors
|
29 |
from utils.torch_utils import load_classifier, select_device, time_sync
|
@@ -108,7 +108,6 @@ def run(weights='yolov5s.pt', # model.pt path(s)
|
|
108 |
output_details = interpreter.get_output_details() # outputs
|
109 |
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
110 |
imgsz = check_img_size(imgsz, s=stride) # check image size
|
111 |
-
ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
|
112 |
|
113 |
# Dataloader
|
114 |
if webcam:
|
@@ -190,7 +189,7 @@ def run(weights='yolov5s.pt', # model.pt path(s)
|
|
190 |
s += '%gx%g ' % img.shape[2:] # print string
|
191 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
192 |
imc = im0.copy() if save_crop else im0 # for save_crop
|
193 |
-
annotator = Annotator(im0, line_width=line_thickness,
|
194 |
if len(det):
|
195 |
# Rescale boxes from img_size to im0 size
|
196 |
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
|
|
23 |
from models.experimental import attempt_load
|
24 |
from utils.datasets import LoadImages, LoadStreams
|
25 |
from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \
|
26 |
+
increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \
|
27 |
strip_optimizer, xyxy2xywh
|
28 |
from utils.plots import Annotator, colors
|
29 |
from utils.torch_utils import load_classifier, select_device, time_sync
|
|
|
108 |
output_details = interpreter.get_output_details() # outputs
|
109 |
int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
110 |
imgsz = check_img_size(imgsz, s=stride) # check image size
|
|
|
111 |
|
112 |
# Dataloader
|
113 |
if webcam:
|
|
|
189 |
s += '%gx%g ' % img.shape[2:] # print string
|
190 |
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
191 |
imc = im0.copy() if save_crop else im0 # for save_crop
|
192 |
+
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
193 |
if len(det):
|
194 |
# Rescale boxes from img_size to im0 size
|
195 |
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
|
models/common.py
CHANGED
@@ -18,7 +18,7 @@ from PIL import Image
|
|
18 |
from torch.cuda import amp
|
19 |
|
20 |
from utils.datasets import exif_transpose, letterbox
|
21 |
-
from utils.general import colorstr, increment_path,
|
22 |
scale_coords, xyxy2xywh
|
23 |
from utils.plots import Annotator, colors
|
24 |
from utils.torch_utils import time_sync
|
@@ -356,7 +356,6 @@ class Detections:
|
|
356 |
self.imgs = imgs # list of images as numpy arrays
|
357 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
358 |
self.names = names # class names
|
359 |
-
self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8)
|
360 |
self.files = files # image filenames
|
361 |
self.xyxy = pred # xyxy pixels
|
362 |
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
@@ -369,13 +368,13 @@ class Detections:
|
|
369 |
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
|
370 |
crops = []
|
371 |
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
|
372 |
-
|
373 |
if pred.shape[0]:
|
374 |
for c in pred[:, -1].unique():
|
375 |
n = (pred[:, -1] == c).sum() # detections per class
|
376 |
-
|
377 |
if show or save or render or crop:
|
378 |
-
annotator = Annotator(im,
|
379 |
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
380 |
label = f'{self.names[int(cls)]} {conf:.2f}'
|
381 |
if crop:
|
@@ -386,11 +385,11 @@ class Detections:
|
|
386 |
annotator.box_label(box, label, color=colors(cls))
|
387 |
im = annotator.im
|
388 |
else:
|
389 |
-
|
390 |
|
391 |
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
|
392 |
if pprint:
|
393 |
-
LOGGER.info(
|
394 |
if show:
|
395 |
im.show(self.files[i]) # show
|
396 |
if save:
|
|
|
18 |
from torch.cuda import amp
|
19 |
|
20 |
from utils.datasets import exif_transpose, letterbox
|
21 |
+
from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \
|
22 |
scale_coords, xyxy2xywh
|
23 |
from utils.plots import Annotator, colors
|
24 |
from utils.torch_utils import time_sync
|
|
|
356 |
self.imgs = imgs # list of images as numpy arrays
|
357 |
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
358 |
self.names = names # class names
|
|
|
359 |
self.files = files # image filenames
|
360 |
self.xyxy = pred # xyxy pixels
|
361 |
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
|
|
368 |
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
|
369 |
crops = []
|
370 |
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
|
371 |
+
s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
|
372 |
if pred.shape[0]:
|
373 |
for c in pred[:, -1].unique():
|
374 |
n = (pred[:, -1] == c).sum() # detections per class
|
375 |
+
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
376 |
if show or save or render or crop:
|
377 |
+
annotator = Annotator(im, example=str(self.names))
|
378 |
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
379 |
label = f'{self.names[int(cls)]} {conf:.2f}'
|
380 |
if crop:
|
|
|
385 |
annotator.box_label(box, label, color=colors(cls))
|
386 |
im = annotator.im
|
387 |
else:
|
388 |
+
s += '(no detections)'
|
389 |
|
390 |
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
|
391 |
if pprint:
|
392 |
+
LOGGER.info(s.rstrip(', '))
|
393 |
if show:
|
394 |
im.show(self.files[i]) # show
|
395 |
if save:
|
utils/general.py
CHANGED
@@ -161,10 +161,9 @@ def is_pip():
|
|
161 |
return 'site-packages' in Path(__file__).resolve().parts
|
162 |
|
163 |
|
164 |
-
def
|
165 |
-
# Is string composed of
|
166 |
-
|
167 |
-
return len(s.encode().decode('ascii', 'ignore')) == len(s)
|
168 |
|
169 |
|
170 |
def emojis(str=''):
|
|
|
161 |
return 'site-packages' in Path(__file__).resolve().parts
|
162 |
|
163 |
|
164 |
+
def is_chinese(s='人工智能'):
|
165 |
+
# Is string composed of any Chinese characters?
|
166 |
+
return re.search('[\u4e00-\u9fff]', s)
|
|
|
167 |
|
168 |
|
169 |
def emojis(str=''):
|
utils/plots.py
CHANGED
@@ -17,7 +17,7 @@ import seaborn as sn
|
|
17 |
import torch
|
18 |
from PIL import Image, ImageDraw, ImageFont
|
19 |
|
20 |
-
from utils.general import user_config_dir,
|
21 |
from utils.metrics import fitness
|
22 |
|
23 |
# Settings
|
@@ -66,20 +66,21 @@ class Annotator:
|
|
66 |
check_font() # download TTF if necessary
|
67 |
|
68 |
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
69 |
-
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=
|
70 |
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
71 |
-
self.pil = pil
|
72 |
if self.pil: # use PIL
|
73 |
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
74 |
self.draw = ImageDraw.Draw(self.im)
|
75 |
-
self.font = check_font(font
|
|
|
76 |
else: # use cv2
|
77 |
self.im = im
|
78 |
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
79 |
|
80 |
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
81 |
# Add one xyxy box to image with label
|
82 |
-
if self.pil or not
|
83 |
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
84 |
if label:
|
85 |
w, h = self.font.getsize(label) # text width, height
|
@@ -177,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max
|
|
177 |
|
178 |
# Annotate
|
179 |
fs = int((h + w) * ns * 0.01) # font size
|
180 |
-
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs)
|
181 |
for i in range(i + 1):
|
182 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
183 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|
|
|
17 |
import torch
|
18 |
from PIL import Image, ImageDraw, ImageFont
|
19 |
|
20 |
+
from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh
|
21 |
from utils.metrics import fitness
|
22 |
|
23 |
# Settings
|
|
|
66 |
check_font() # download TTF if necessary
|
67 |
|
68 |
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
69 |
+
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
|
70 |
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
71 |
+
self.pil = pil or not example.isascii() or is_chinese(example)
|
72 |
if self.pil: # use PIL
|
73 |
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
|
74 |
self.draw = ImageDraw.Draw(self.im)
|
75 |
+
self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
|
76 |
+
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
77 |
else: # use cv2
|
78 |
self.im = im
|
79 |
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
80 |
|
81 |
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
82 |
# Add one xyxy box to image with label
|
83 |
+
if self.pil or not label.isascii():
|
84 |
self.draw.rectangle(box, width=self.lw, outline=color) # box
|
85 |
if label:
|
86 |
w, h = self.font.getsize(label) # text width, height
|
|
|
178 |
|
179 |
# Annotate
|
180 |
fs = int((h + w) * ns * 0.01) # font size
|
181 |
+
annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
|
182 |
for i in range(i + 1):
|
183 |
x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
|
184 |
annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
|