Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -218,7 +218,7 @@ def shrink_bbox(image: np.ndarray, shrink_factor: float):
|
|
218 |
x1 = max(center_x - new_width // 2, 0)
|
219 |
y1 = max(center_y - new_height // 2, 0)
|
220 |
x2 = min(center_x + new_width // 2, width)
|
221 |
-
y2 = min(center_y +
|
222 |
return image[y1:y2, x1:x2]
|
223 |
|
224 |
def exclude_scaling_box(image: np.ndarray, bbox: np.ndarray, orig_size: tuple, processed_size: tuple, expansion_factor: float = 1.2) -> np.ndarray:
|
@@ -347,7 +347,7 @@ def save_dxf_spline(inflated_contours, scaling_factor, height, finger_clearance=
|
|
347 |
print(f"Skipping contour: {e}")
|
348 |
return doc, final_polygons_inch
|
349 |
|
350 |
-
def add_rectangular_boundary(doc, polygons_inch, boundary_length, boundary_width, offset_unit):
|
351 |
msp = doc.modelspace()
|
352 |
# First, if unit is mm, check if values seem too low (accidental inches) and convert them.
|
353 |
if offset_unit.lower() == "mm":
|
@@ -380,24 +380,26 @@ def add_rectangular_boundary(doc, polygons_inch, boundary_length, boundary_width
|
|
380 |
inner_width = max_x - min_x
|
381 |
inner_length = max_y - min_y
|
382 |
|
383 |
-
#
|
384 |
-
|
|
|
|
|
|
|
385 |
|
386 |
# New check: if the provided boundary dimensions are too small relative to the inner contours, raise an error.
|
387 |
-
if boundary_width_in <= inner_width +
|
|
|
388 |
raise BoundaryOverlapError("Error: The specified boundary dimensions are too small and overlap with the inner contours. Please provide larger values.")
|
389 |
|
390 |
# Compute the boundary rectangle centered on the inner contours.
|
391 |
shape_cx = (min_x + max_x) / 2
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
bottom = shape_cy - half_l
|
398 |
-
top = shape_cy + half_l
|
399 |
-
rect_coords = [(left, bottom), (right, bottom), (right, top), (left, top), (left, bottom)]
|
400 |
|
|
|
401 |
from shapely.geometry import Polygon as ShapelyPolygon
|
402 |
boundary_polygon = ShapelyPolygon(rect_coords)
|
403 |
msp.add_lwpolyline(rect_coords, close=True, dxfattribs={"layer": "BOUNDARY"})
|
@@ -446,11 +448,16 @@ def predict(
|
|
446 |
image = np.array(Image.open(io.BytesIO(base64.b64decode(image))).convert("RGB"))
|
447 |
except Exception:
|
448 |
raise ValueError("Invalid base64 image data")
|
|
|
449 |
# Apply sharpness enhancement.
|
450 |
if isinstance(image, np.ndarray):
|
451 |
pil_image = Image.fromarray(image)
|
452 |
enhanced_image = ImageEnhance.Sharpness(pil_image).enhance(1.5)
|
453 |
image = np.array(enhanced_image)
|
|
|
|
|
|
|
|
|
454 |
try:
|
455 |
t = time.time()
|
456 |
drawer_img = yolo_detect(image)
|
@@ -462,16 +469,25 @@ def predict(
|
|
462 |
print("Image shrinking completed in {:.2f} seconds".format(time.time() - t))
|
463 |
except DrawerNotDetectedError:
|
464 |
raise DrawerNotDetectedError("Drawer not detected! Please take another picture with a drawer.")
|
|
|
|
|
|
|
|
|
465 |
try:
|
466 |
t = time.time()
|
467 |
reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img)
|
468 |
print("Reference square detection completed in {:.2f} seconds".format(time.time() - t))
|
469 |
except ReferenceBoxNotDetectedError:
|
470 |
raise ReferenceBoxNotDetectedError("Reference box not detected! Please take another picture with a reference box.")
|
|
|
|
|
|
|
|
|
471 |
t = time.time()
|
472 |
reference_obj_img = make_square(reference_obj_img)
|
473 |
reference_square_mask = remove_bg_u2netp(reference_obj_img)
|
474 |
print("Reference image processing completed in {:.2f} seconds".format(time.time() - t))
|
|
|
475 |
t = time.time()
|
476 |
try:
|
477 |
cv2.imwrite("mask.jpg", cv2.cvtColor(reference_obj_img, cv2.COLOR_RGB2GRAY))
|
@@ -486,20 +502,21 @@ def predict(
|
|
486 |
except Exception as e:
|
487 |
scaling_factor = None
|
488 |
print(f"Error calculating scaling factor: {e}")
|
|
|
489 |
if scaling_factor is None or scaling_factor == 0:
|
490 |
scaling_factor = 1.0
|
491 |
print("Using default scaling factor of 1.0 due to calculation error")
|
492 |
gc.collect()
|
493 |
print("Scaling factor determined: {}".format(scaling_factor))
|
494 |
-
|
495 |
# ---------------------
|
496 |
-
#
|
497 |
# ---------------------
|
498 |
if add_boundary.lower() == "yes":
|
499 |
image_height_px, image_width_px = shrunked_img.shape[:2]
|
500 |
image_height_in = image_height_px * scaling_factor
|
501 |
image_width_in = image_width_px * scaling_factor
|
502 |
-
#
|
503 |
if offset_unit.lower() == "mm":
|
504 |
if boundary_length < 50:
|
505 |
boundary_length = boundary_length * 25.4
|
@@ -510,49 +527,77 @@ def predict(
|
|
510 |
else:
|
511 |
boundary_length_in = boundary_length
|
512 |
boundary_width_in = boundary_width
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
517 |
if offset_unit.lower() == "mm":
|
518 |
if offset_value < 1:
|
519 |
offset_value = offset_value * 25.4
|
520 |
offset_inches = offset_value / 25.4
|
521 |
else:
|
522 |
offset_inches = offset_value
|
|
|
523 |
t = time.time()
|
524 |
orig_size = shrunked_img.shape[:2]
|
525 |
objects_mask = remove_bg(shrunked_img)
|
526 |
processed_size = objects_mask.shape[:2]
|
|
|
|
|
527 |
objects_mask = exclude_scaling_box(objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=1.2)
|
528 |
objects_mask = resize_img(objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0]))
|
529 |
del scaling_box_coords
|
530 |
gc.collect()
|
531 |
print("Object masking completed in {:.2f} seconds".format(time.time() - t))
|
|
|
|
|
532 |
t = time.time()
|
533 |
offset_pixels = (offset_inches / scaling_factor) * 2 + 1 if scaling_factor != 0 else 1
|
534 |
dilated_mask = cv2.dilate(objects_mask, np.ones((int(offset_pixels), int(offset_pixels)), np.uint8))
|
535 |
del objects_mask
|
536 |
gc.collect()
|
537 |
print("Mask dilation completed in {:.2f} seconds".format(time.time() - t))
|
|
|
538 |
Image.fromarray(dilated_mask).save("./outputs/scaled_mask_new.jpg")
|
|
|
|
|
|
|
|
|
539 |
t = time.time()
|
540 |
outlines, contours = extract_outlines(dilated_mask)
|
541 |
print("Outline extraction completed in {:.2f} seconds".format(time.time() - t))
|
|
|
542 |
output_img = shrunked_img.copy()
|
543 |
del shrunked_img
|
544 |
gc.collect()
|
|
|
545 |
t = time.time()
|
546 |
use_finger_clearance = True if finger_clearance.lower() == "yes" else False
|
547 |
-
doc, final_polygons_inch = save_dxf_spline(
|
|
|
|
|
548 |
del contours
|
549 |
gc.collect()
|
550 |
print("DXF generation completed in {:.2f} seconds".format(time.time() - t))
|
|
|
|
|
|
|
|
|
551 |
boundary_polygon = None
|
552 |
if add_boundary.lower() == "yes":
|
553 |
-
boundary_polygon = add_rectangular_boundary(
|
|
|
|
|
554 |
if boundary_polygon is not None:
|
555 |
final_polygons_inch.append(boundary_polygon)
|
|
|
|
|
556 |
min_x = float("inf")
|
557 |
min_y = float("inf")
|
558 |
max_x = -float("inf")
|
@@ -567,36 +612,81 @@ def predict(
|
|
567 |
max_x = b[2]
|
568 |
if b[3] > max_y:
|
569 |
max_y = b[3]
|
570 |
-
|
571 |
-
|
572 |
-
if
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
msp = doc.modelspace()
|
577 |
if annotation_text.strip():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
578 |
text_entity = msp.add_text(
|
579 |
annotation_text.strip(),
|
580 |
dxfattribs={
|
581 |
-
"height":
|
582 |
"layer": "ANNOTATION",
|
583 |
"style": "Bold"
|
584 |
}
|
585 |
)
|
586 |
-
text_entity.dxf.insert = (text_x,
|
|
|
|
|
587 |
dxf_filepath = os.path.join("./outputs", "out.dxf")
|
588 |
doc.saveas(dxf_filepath)
|
589 |
-
|
|
|
|
|
|
|
|
|
590 |
new_outlines = np.ones_like(output_img) * 255
|
591 |
-
draw_polygons_inch(final_polygons_inch, new_outlines, scaling_factor, processed_size[0], color=(0,0,255), thickness=2)
|
|
|
592 |
if annotation_text.strip():
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
outlines_color = cv2.cvtColor(new_outlines, cv2.COLOR_BGR2RGB)
|
599 |
print("Total prediction time: {:.2f} seconds".format(time.time() - overall_start))
|
|
|
600 |
return (
|
601 |
cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB),
|
602 |
outlines_color,
|
|
|
218 |
x1 = max(center_x - new_width // 2, 0)
|
219 |
y1 = max(center_y - new_height // 2, 0)
|
220 |
x2 = min(center_x + new_width // 2, width)
|
221 |
+
y2 = min(center_y + new_width // 2, height)
|
222 |
return image[y1:y2, x1:x2]
|
223 |
|
224 |
def exclude_scaling_box(image: np.ndarray, bbox: np.ndarray, orig_size: tuple, processed_size: tuple, expansion_factor: float = 1.2) -> np.ndarray:
|
|
|
347 |
print(f"Skipping contour: {e}")
|
348 |
return doc, final_polygons_inch
|
349 |
|
350 |
+
def add_rectangular_boundary(doc, polygons_inch, boundary_length, boundary_width, offset_unit, annotation_text=""):
|
351 |
msp = doc.modelspace()
|
352 |
# First, if unit is mm, check if values seem too low (accidental inches) and convert them.
|
353 |
if offset_unit.lower() == "mm":
|
|
|
380 |
inner_width = max_x - min_x
|
381 |
inner_length = max_y - min_y
|
382 |
|
383 |
+
# Set clearance margins.
|
384 |
+
clearance_side = 0.25 # Left and right clearance
|
385 |
+
clearance_top = 0.25 # Top clearance
|
386 |
+
# Bottom clearance: 0.75 if annotation text is provided; otherwise 0.25.
|
387 |
+
clearance_bottom = 0.75 if annotation_text.strip() else 0.25
|
388 |
|
389 |
# New check: if the provided boundary dimensions are too small relative to the inner contours, raise an error.
|
390 |
+
if boundary_width_in <= inner_width + 2 * clearance_side or \
|
391 |
+
boundary_length_in <= inner_length + clearance_top + clearance_bottom:
|
392 |
raise BoundaryOverlapError("Error: The specified boundary dimensions are too small and overlap with the inner contours. Please provide larger values.")
|
393 |
|
394 |
# Compute the boundary rectangle centered on the inner contours.
|
395 |
shape_cx = (min_x + max_x) / 2
|
396 |
+
left = shape_cx - boundary_width_in / 2
|
397 |
+
right = shape_cx + boundary_width_in / 2
|
398 |
+
# Align bottom to inner bounding box with the desired bottom clearance.
|
399 |
+
bottom = min_y - clearance_bottom
|
400 |
+
top = bottom + boundary_length_in
|
|
|
|
|
|
|
401 |
|
402 |
+
rect_coords = [(left, bottom), (right, bottom), (right, top), (left, top), (left, bottom)]
|
403 |
from shapely.geometry import Polygon as ShapelyPolygon
|
404 |
boundary_polygon = ShapelyPolygon(rect_coords)
|
405 |
msp.add_lwpolyline(rect_coords, close=True, dxfattribs={"layer": "BOUNDARY"})
|
|
|
448 |
image = np.array(Image.open(io.BytesIO(base64.b64decode(image))).convert("RGB"))
|
449 |
except Exception:
|
450 |
raise ValueError("Invalid base64 image data")
|
451 |
+
|
452 |
# Apply sharpness enhancement.
|
453 |
if isinstance(image, np.ndarray):
|
454 |
pil_image = Image.fromarray(image)
|
455 |
enhanced_image = ImageEnhance.Sharpness(pil_image).enhance(1.5)
|
456 |
image = np.array(enhanced_image)
|
457 |
+
|
458 |
+
# ---------------------
|
459 |
+
# 1) Detect the drawer with YOLOWorld
|
460 |
+
# ---------------------
|
461 |
try:
|
462 |
t = time.time()
|
463 |
drawer_img = yolo_detect(image)
|
|
|
469 |
print("Image shrinking completed in {:.2f} seconds".format(time.time() - t))
|
470 |
except DrawerNotDetectedError:
|
471 |
raise DrawerNotDetectedError("Drawer not detected! Please take another picture with a drawer.")
|
472 |
+
|
473 |
+
# ---------------------
|
474 |
+
# 2) Detect the reference box with YOLO
|
475 |
+
# ---------------------
|
476 |
try:
|
477 |
t = time.time()
|
478 |
reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img)
|
479 |
print("Reference square detection completed in {:.2f} seconds".format(time.time() - t))
|
480 |
except ReferenceBoxNotDetectedError:
|
481 |
raise ReferenceBoxNotDetectedError("Reference box not detected! Please take another picture with a reference box.")
|
482 |
+
|
483 |
+
# ---------------------
|
484 |
+
# 3) Remove background of the reference box to compute scaling factor
|
485 |
+
# ---------------------
|
486 |
t = time.time()
|
487 |
reference_obj_img = make_square(reference_obj_img)
|
488 |
reference_square_mask = remove_bg_u2netp(reference_obj_img)
|
489 |
print("Reference image processing completed in {:.2f} seconds".format(time.time() - t))
|
490 |
+
|
491 |
t = time.time()
|
492 |
try:
|
493 |
cv2.imwrite("mask.jpg", cv2.cvtColor(reference_obj_img, cv2.COLOR_RGB2GRAY))
|
|
|
502 |
except Exception as e:
|
503 |
scaling_factor = None
|
504 |
print(f"Error calculating scaling factor: {e}")
|
505 |
+
|
506 |
if scaling_factor is None or scaling_factor == 0:
|
507 |
scaling_factor = 1.0
|
508 |
print("Using default scaling factor of 1.0 due to calculation error")
|
509 |
gc.collect()
|
510 |
print("Scaling factor determined: {}".format(scaling_factor))
|
511 |
+
|
512 |
# ---------------------
|
513 |
+
# 4) Optional boundary dimension checks
|
514 |
# ---------------------
|
515 |
if add_boundary.lower() == "yes":
|
516 |
image_height_px, image_width_px = shrunked_img.shape[:2]
|
517 |
image_height_in = image_height_px * scaling_factor
|
518 |
image_width_in = image_width_px * scaling_factor
|
519 |
+
# If units are mm, convert them to inches if the values look small
|
520 |
if offset_unit.lower() == "mm":
|
521 |
if boundary_length < 50:
|
522 |
boundary_length = boundary_length * 25.4
|
|
|
527 |
else:
|
528 |
boundary_length_in = boundary_length
|
529 |
boundary_width_in = boundary_width
|
530 |
+
|
531 |
+
# Basic check so user doesn't request an impossible boundary
|
532 |
+
if boundary_length_in > (image_height_in - 0.5) or boundary_width_in > (image_width_in - 0.5):
|
533 |
+
raise BoundaryExceedsError(
|
534 |
+
"Error: The specified boundary dimensions exceed the allowed image dimensions. Please enter smaller values."
|
535 |
+
)
|
536 |
+
|
537 |
+
# ---------------------
|
538 |
+
# 5) Remove background from the shrunked drawer image (main objects)
|
539 |
+
# ---------------------
|
540 |
if offset_unit.lower() == "mm":
|
541 |
if offset_value < 1:
|
542 |
offset_value = offset_value * 25.4
|
543 |
offset_inches = offset_value / 25.4
|
544 |
else:
|
545 |
offset_inches = offset_value
|
546 |
+
|
547 |
t = time.time()
|
548 |
orig_size = shrunked_img.shape[:2]
|
549 |
objects_mask = remove_bg(shrunked_img)
|
550 |
processed_size = objects_mask.shape[:2]
|
551 |
+
|
552 |
+
# Exclude the reference box region from the mask
|
553 |
objects_mask = exclude_scaling_box(objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=1.2)
|
554 |
objects_mask = resize_img(objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0]))
|
555 |
del scaling_box_coords
|
556 |
gc.collect()
|
557 |
print("Object masking completed in {:.2f} seconds".format(time.time() - t))
|
558 |
+
|
559 |
+
# Dilate mask by offset_pixels
|
560 |
t = time.time()
|
561 |
offset_pixels = (offset_inches / scaling_factor) * 2 + 1 if scaling_factor != 0 else 1
|
562 |
dilated_mask = cv2.dilate(objects_mask, np.ones((int(offset_pixels), int(offset_pixels)), np.uint8))
|
563 |
del objects_mask
|
564 |
gc.collect()
|
565 |
print("Mask dilation completed in {:.2f} seconds".format(time.time() - t))
|
566 |
+
|
567 |
Image.fromarray(dilated_mask).save("./outputs/scaled_mask_new.jpg")
|
568 |
+
|
569 |
+
# ---------------------
|
570 |
+
# 6) Extract outlines from the mask and convert them to DXF splines
|
571 |
+
# ---------------------
|
572 |
t = time.time()
|
573 |
outlines, contours = extract_outlines(dilated_mask)
|
574 |
print("Outline extraction completed in {:.2f} seconds".format(time.time() - t))
|
575 |
+
|
576 |
output_img = shrunked_img.copy()
|
577 |
del shrunked_img
|
578 |
gc.collect()
|
579 |
+
|
580 |
t = time.time()
|
581 |
use_finger_clearance = True if finger_clearance.lower() == "yes" else False
|
582 |
+
doc, final_polygons_inch = save_dxf_spline(
|
583 |
+
contours, scaling_factor, processed_size[0], finger_clearance=use_finger_clearance
|
584 |
+
)
|
585 |
del contours
|
586 |
gc.collect()
|
587 |
print("DXF generation completed in {:.2f} seconds".format(time.time() - t))
|
588 |
+
|
589 |
+
# ---------------------
|
590 |
+
# 7) Add optional rectangular boundary (with the 0.75 bottom margin if annotation text is provided)
|
591 |
+
# ---------------------
|
592 |
boundary_polygon = None
|
593 |
if add_boundary.lower() == "yes":
|
594 |
+
boundary_polygon = add_rectangular_boundary(
|
595 |
+
doc, final_polygons_inch, boundary_length, boundary_width, offset_unit, annotation_text
|
596 |
+
)
|
597 |
if boundary_polygon is not None:
|
598 |
final_polygons_inch.append(boundary_polygon)
|
599 |
+
|
600 |
+
# Compute bounding box of all polygons to know where to place text
|
601 |
min_x = float("inf")
|
602 |
min_y = float("inf")
|
603 |
max_x = -float("inf")
|
|
|
612 |
max_x = b[2]
|
613 |
if b[3] > max_y:
|
614 |
max_y = b[3]
|
615 |
+
|
616 |
+
# ---------------------
|
617 |
+
# 8) Add annotation text (if provided) in the DXF
|
618 |
+
# - Text is 0.5 inches high
|
619 |
+
# - Placed so that there is 0.125 inch ABOVE min_y
|
620 |
+
# ---------------------
|
621 |
msp = doc.modelspace()
|
622 |
if annotation_text.strip():
|
623 |
+
text_x = (min_x + max_x) / 2.0 # horizontally centered
|
624 |
+
text_height_dxf = 0.5
|
625 |
+
|
626 |
+
# If boundary is "yes", we'll place the baseline right above the min_y (0.125 above)
|
627 |
+
if add_boundary.lower() == "yes":
|
628 |
+
text_y_dxf = min_y + 0.125
|
629 |
+
else:
|
630 |
+
# If no boundary, place it 0.125 above the min_y
|
631 |
+
text_y_dxf = min_y + 0.125
|
632 |
+
|
633 |
text_entity = msp.add_text(
|
634 |
annotation_text.strip(),
|
635 |
dxfattribs={
|
636 |
+
"height": text_height_dxf,
|
637 |
"layer": "ANNOTATION",
|
638 |
"style": "Bold"
|
639 |
}
|
640 |
)
|
641 |
+
text_entity.dxf.insert = (text_x, text_y_dxf)
|
642 |
+
|
643 |
+
# Save the DXF
|
644 |
dxf_filepath = os.path.join("./outputs", "out.dxf")
|
645 |
doc.saveas(dxf_filepath)
|
646 |
+
|
647 |
+
# ---------------------
|
648 |
+
# 9) For the preview images, draw the polygons and place text similarly
|
649 |
+
# ---------------------
|
650 |
+
draw_polygons_inch(final_polygons_inch, output_img, scaling_factor, processed_size[0], color=(0, 0, 255), thickness=2)
|
651 |
new_outlines = np.ones_like(output_img) * 255
|
652 |
+
draw_polygons_inch(final_polygons_inch, new_outlines, scaling_factor, processed_size[0], color=(0, 0, 255), thickness=2)
|
653 |
+
|
654 |
if annotation_text.strip():
|
655 |
+
text_height_cv = 0.5 # same "logical" height in inches
|
656 |
+
if add_boundary.lower() == "yes":
|
657 |
+
text_y_in = min_y + 0.125
|
658 |
+
else:
|
659 |
+
text_y_in = min_y + 0.125
|
660 |
+
|
661 |
+
# Convert to pixel coords
|
662 |
+
text_px = int(((min_x + max_x) / 2.0) / scaling_factor)
|
663 |
+
text_py = int(processed_size[0] - (text_y_in / scaling_factor))
|
664 |
+
org = (text_px - int(len(annotation_text.strip()) * 6), text_py) # shift left so text is centered
|
665 |
+
|
666 |
+
cv2.putText(
|
667 |
+
output_img,
|
668 |
+
annotation_text.strip(),
|
669 |
+
org,
|
670 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
671 |
+
1.3, # scale factor
|
672 |
+
(0, 0, 255),
|
673 |
+
3,
|
674 |
+
cv2.LINE_AA
|
675 |
+
)
|
676 |
+
cv2.putText(
|
677 |
+
new_outlines,
|
678 |
+
annotation_text.strip(),
|
679 |
+
org,
|
680 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
681 |
+
1.3,
|
682 |
+
(0, 0, 255),
|
683 |
+
3,
|
684 |
+
cv2.LINE_AA
|
685 |
+
)
|
686 |
+
|
687 |
outlines_color = cv2.cvtColor(new_outlines, cv2.COLOR_BGR2RGB)
|
688 |
print("Total prediction time: {:.2f} seconds".format(time.time() - overall_start))
|
689 |
+
|
690 |
return (
|
691 |
cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB),
|
692 |
outlines_color,
|