Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ from ezdxf.addons.text2path import make_paths_from_str
|
|
13 |
from ezdxf import path
|
14 |
from ezdxf.addons import text2path
|
15 |
from ezdxf.enums import TextEntityAlignment
|
16 |
-
from ezdxf.fonts.fonts import FontFace,get_font_face
|
17 |
import gradio as gr
|
18 |
from PIL import Image, ImageEnhance
|
19 |
from pathlib import Path
|
@@ -155,7 +155,7 @@ def yolo_detect(image: Union[str, Path, int, Image.Image, list, tuple, np.ndarra
|
|
155 |
|
156 |
def detect_reference_square(img: np.ndarray):
|
157 |
t = time.time()
|
158 |
-
res = reference_detector_global.predict(img, conf=0.
|
159 |
if not res or len(res) == 0 or len(res[0].boxes) == 0:
|
160 |
raise ReferenceBoxNotDetectedError("Reference box not detected in the image.")
|
161 |
print("Reference detection completed in {:.2f} seconds".format(time.time() - t))
|
@@ -286,35 +286,39 @@ def polygon_to_exterior_coords(poly: Polygon):
|
|
286 |
return []
|
287 |
return list(poly.exterior.coords)
|
288 |
|
289 |
-
def
|
290 |
import random
|
291 |
needed_center_distance = circle_diameter + min_gap
|
292 |
radius = circle_diameter / 2.0
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
|
|
|
|
|
|
318 |
print("Warning: Could not place a finger cut circle meeting all spacing requirements.")
|
319 |
return None, None
|
320 |
|
@@ -340,7 +344,7 @@ def save_dxf_spline(inflated_contours, scaling_factor, height, finger_clearance=
|
|
340 |
points_inch.append(points_inch[0])
|
341 |
tool_polygon = build_tool_polygon(points_inch)
|
342 |
if finger_clearance:
|
343 |
-
union_poly, center =
|
344 |
if union_poly is not None:
|
345 |
tool_polygon = union_poly
|
346 |
exterior_coords = polygon_to_exterior_coords(tool_polygon)
|
@@ -391,11 +395,6 @@ def add_rectangular_boundary(doc, polygons_inch, boundary_length, boundary_width
|
|
391 |
if annotation_text.strip():
|
392 |
clearance_tb = 0.75
|
393 |
|
394 |
-
# Check if boundary dimensions are at least larger than inner box plus clearance
|
395 |
-
# if boundary_width_in <= inner_width + 2 * clearance_side or boundary_length_in <= inner_length + 2 * clearance_tb:
|
396 |
-
# raise BoundaryOverlapError("Error: The specified boundary dimensions are too small and overlap with the inner contours. Please provide larger values.")
|
397 |
-
|
398 |
-
|
399 |
# Calculate center of inner contours
|
400 |
center_x = (min_x + max_x) / 2
|
401 |
center_y = (min_y + max_y) / 2
|
@@ -411,8 +410,8 @@ def add_rectangular_boundary(doc, polygons_inch, boundary_length, boundary_width
|
|
411 |
boundary_polygon = ShapelyPolygon(rect_coords)
|
412 |
msp.add_lwpolyline(rect_coords, close=True, dxfattribs={"layer": "BOUNDARY"})
|
413 |
|
414 |
-
text_top=boundary_polygon.bounds[1] + 1
|
415 |
-
if text_top > (min_y-0.75):
|
416 |
raise TextOverlapError("Error: The Text is overlapping the inner contours of the object.")
|
417 |
|
418 |
return boundary_polygon
|
@@ -561,7 +560,7 @@ def predict(
|
|
561 |
objects_mask = remove_bg(shrunked_img)
|
562 |
processed_size = objects_mask.shape[:2]
|
563 |
|
564 |
-
objects_mask = exclude_scaling_box(objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=
|
565 |
objects_mask = resize_img(objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0]))
|
566 |
del scaling_box_coords
|
567 |
gc.collect()
|
@@ -635,19 +634,9 @@ def predict(
|
|
635 |
msp = doc.modelspace()
|
636 |
|
637 |
if annotation_text.strip():
|
638 |
-
|
639 |
text_x = ((inner_min_x + inner_max_x) / 2.0) - (int(len(annotation_text.strip()) / 2.0))
|
640 |
text_height_dxf = 0.75
|
641 |
-
text_y_dxf = boundary_polygon.bounds[1] + 0.25
|
642 |
-
# text_entity = msp.add_text(
|
643 |
-
# annotation_text.strip().upper(),
|
644 |
-
# dxfattribs={
|
645 |
-
# "height": text_height_dxf,
|
646 |
-
# "layer": "ANNOTATION",
|
647 |
-
# "style": "Simplex",
|
648 |
-
# }
|
649 |
-
# )
|
650 |
-
# text_entity.dxf.insert = (text_x, text_y_dxf)
|
651 |
font = get_font_face("Arial")
|
652 |
paths = text2path.make_paths_from_str(
|
653 |
annotation_text.strip().upper(),
|
@@ -658,7 +647,7 @@ def predict(
|
|
658 |
|
659 |
# Create a translation matrix
|
660 |
translation = ezdxf.math.Matrix44.translate(text_x, text_y_dxf, 0)
|
661 |
-
|
662 |
translated_paths = [p.transform(translation) for p in paths]
|
663 |
|
664 |
# Render the paths as splines and polylines
|
@@ -679,33 +668,6 @@ def predict(
|
|
679 |
new_outlines = np.ones_like(output_img) * 255
|
680 |
draw_polygons_inch(final_polygons_inch, new_outlines, scaling_factor, processed_size[0], color=(0, 0, 255), thickness=2)
|
681 |
|
682 |
-
# if annotation_text.strip():
|
683 |
-
# text_height_cv = 0.75
|
684 |
-
# text_x_img = int(((inner_min_x + inner_max_x) / 2.0) / scaling_factor)
|
685 |
-
# text_y_in = boundary_polygon.bounds[1] + 0.25 #+ text_height_cv#inner_min_y - 0.125 - text_height_cv
|
686 |
-
# text_y_img = int(processed_size[0] - (text_y_in / scaling_factor))
|
687 |
-
# org = (text_x_img - int(len(annotation_text.strip()) * 6), text_y_img)
|
688 |
-
|
689 |
-
# cv2.putText(
|
690 |
-
# output_img,
|
691 |
-
# annotation_text.strip().upper(),
|
692 |
-
# org,
|
693 |
-
# cv2.FONT_HERSHEY_SIMPLEX,
|
694 |
-
# 1.2,
|
695 |
-
# (0, 0, 255),
|
696 |
-
# 2,
|
697 |
-
# cv2.LINE_AA
|
698 |
-
# )
|
699 |
-
# cv2.putText(
|
700 |
-
# new_outlines,
|
701 |
-
# annotation_text.strip().upper(),
|
702 |
-
# org,
|
703 |
-
# cv2.FONT_HERSHEY_SIMPLEX,
|
704 |
-
# 1.2,
|
705 |
-
# (0, 0, 255),
|
706 |
-
# 2,
|
707 |
-
# cv2.LINE_AA
|
708 |
-
# )
|
709 |
if annotation_text.strip():
|
710 |
text_height_cv = 0.75
|
711 |
text_x_img = int(((inner_min_x + inner_max_x) / 2.0) / scaling_factor)
|
@@ -715,10 +677,8 @@ def predict(
|
|
715 |
|
716 |
# Method 2: Use two different thicknesses
|
717 |
# Draw thicker outline
|
718 |
-
# Create a clean temporary image for drawing the text outline
|
719 |
temp_img = np.zeros_like(output_img)
|
720 |
|
721 |
-
# Draw thick text on temp image
|
722 |
cv2.putText(
|
723 |
temp_img,
|
724 |
annotation_text.strip().upper(),
|
@@ -730,7 +690,6 @@ def predict(
|
|
730 |
cv2.LINE_AA
|
731 |
)
|
732 |
|
733 |
-
# Draw inner text in black on temp image
|
734 |
cv2.putText(
|
735 |
temp_img,
|
736 |
annotation_text.strip().upper(),
|
@@ -742,27 +701,11 @@ def predict(
|
|
742 |
cv2.LINE_AA
|
743 |
)
|
744 |
|
745 |
-
# Create a mask from the temp image
|
746 |
outline_mask = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
|
747 |
_, outline_mask = cv2.threshold(outline_mask, 1, 255, cv2.THRESH_BINARY)
|
748 |
|
749 |
-
# Apply only the red channel from the temp image to the output image
|
750 |
-
# This preserves the original background where the outline isn't
|
751 |
output_img[outline_mask > 0] = temp_img[outline_mask > 0]
|
752 |
|
753 |
-
# Draw inner part with background color
|
754 |
-
# cv2.putText(
|
755 |
-
# output_img,
|
756 |
-
# annotation_text.strip().upper(),
|
757 |
-
# org,
|
758 |
-
# cv2.FONT_HERSHEY_SIMPLEX,
|
759 |
-
# 2,
|
760 |
-
# (255, 255, 255), # Assuming black background, adjust as needed
|
761 |
-
# 2, # Thinner inner part
|
762 |
-
# cv2.LINE_AA
|
763 |
-
# )
|
764 |
-
|
765 |
-
# Do the same for new_outlines
|
766 |
cv2.putText(
|
767 |
new_outlines,
|
768 |
annotation_text.strip().upper(),
|
@@ -780,7 +723,7 @@ def predict(
|
|
780 |
org,
|
781 |
cv2.FONT_HERSHEY_SIMPLEX,
|
782 |
2,
|
783 |
-
(255, 255, 255), #
|
784 |
2, # Thinner inner part
|
785 |
cv2.LINE_AA
|
786 |
)
|
|
|
13 |
from ezdxf import path
|
14 |
from ezdxf.addons import text2path
|
15 |
from ezdxf.enums import TextEntityAlignment
|
16 |
+
from ezdxf.fonts.fonts import FontFace, get_font_face
|
17 |
import gradio as gr
|
18 |
from PIL import Image, ImageEnhance
|
19 |
from pathlib import Path
|
|
|
155 |
|
156 |
def detect_reference_square(img: np.ndarray):
|
157 |
t = time.time()
|
158 |
+
res = reference_detector_global.predict(img, conf=0.15)
|
159 |
if not res or len(res) == 0 or len(res[0].boxes) == 0:
|
160 |
raise ReferenceBoxNotDetectedError("Reference box not detected in the image.")
|
161 |
print("Reference detection completed in {:.2f} seconds".format(time.time() - t))
|
|
|
286 |
return []
|
287 |
return list(poly.exterior.coords)
|
288 |
|
289 |
+
def place_finger_cut_adjusted(tool_polygon, points_inch, existing_centers, all_polygons, circle_diameter=1.0, min_gap=0.25, max_attempts=30):
|
290 |
import random
|
291 |
needed_center_distance = circle_diameter + min_gap
|
292 |
radius = circle_diameter / 2.0
|
293 |
+
attempts = 0
|
294 |
+
indices = list(range(len(points_inch)))
|
295 |
+
random.shuffle(indices) # Shuffle indices for randomness
|
296 |
+
|
297 |
+
for i in indices:
|
298 |
+
if attempts >= max_attempts:
|
299 |
+
break
|
300 |
+
cx, cy = points_inch[i]
|
301 |
+
# Try small adjustments around the chosen candidate
|
302 |
+
for dx in np.linspace(-0.1, 0.1, 5):
|
303 |
+
for dy in np.linspace(-0.1, 0.1, 5):
|
304 |
+
candidate_center = (cx + dx, cy + dy)
|
305 |
+
# Check distance from already placed centers
|
306 |
+
if any(np.hypot(candidate_center[0] - ex, candidate_center[1] - ey) < needed_center_distance for ex, ey in existing_centers):
|
307 |
+
continue
|
308 |
+
circle_poly = Point(candidate_center).buffer(radius, resolution=64)
|
309 |
+
union_poly = tool_polygon.union(circle_poly)
|
310 |
+
overlap = False
|
311 |
+
# Check against other tool polygons for overlap or proximity issues
|
312 |
+
for poly in all_polygons:
|
313 |
+
if union_poly.intersects(poly) or circle_poly.buffer(min_gap).intersects(poly):
|
314 |
+
overlap = True
|
315 |
+
break
|
316 |
+
if overlap:
|
317 |
+
continue
|
318 |
+
# If candidate passes, accept it
|
319 |
+
existing_centers.append(candidate_center)
|
320 |
+
return union_poly, candidate_center
|
321 |
+
attempts += 1
|
322 |
print("Warning: Could not place a finger cut circle meeting all spacing requirements.")
|
323 |
return None, None
|
324 |
|
|
|
344 |
points_inch.append(points_inch[0])
|
345 |
tool_polygon = build_tool_polygon(points_inch)
|
346 |
if finger_clearance:
|
347 |
+
union_poly, center = place_finger_cut_adjusted(tool_polygon, points_inch, finger_cut_centers, final_polygons_inch, circle_diameter=1.0, min_gap=0.25, max_attempts=30)
|
348 |
if union_poly is not None:
|
349 |
tool_polygon = union_poly
|
350 |
exterior_coords = polygon_to_exterior_coords(tool_polygon)
|
|
|
395 |
if annotation_text.strip():
|
396 |
clearance_tb = 0.75
|
397 |
|
|
|
|
|
|
|
|
|
|
|
398 |
# Calculate center of inner contours
|
399 |
center_x = (min_x + max_x) / 2
|
400 |
center_y = (min_y + max_y) / 2
|
|
|
410 |
boundary_polygon = ShapelyPolygon(rect_coords)
|
411 |
msp.add_lwpolyline(rect_coords, close=True, dxfattribs={"layer": "BOUNDARY"})
|
412 |
|
413 |
+
text_top = boundary_polygon.bounds[1] + 1
|
414 |
+
if text_top > (min_y - 0.75):
|
415 |
raise TextOverlapError("Error: The Text is overlapping the inner contours of the object.")
|
416 |
|
417 |
return boundary_polygon
|
|
|
560 |
objects_mask = remove_bg(shrunked_img)
|
561 |
processed_size = objects_mask.shape[:2]
|
562 |
|
563 |
+
objects_mask = exclude_scaling_box(objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=2)
|
564 |
objects_mask = resize_img(objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0]))
|
565 |
del scaling_box_coords
|
566 |
gc.collect()
|
|
|
634 |
msp = doc.modelspace()
|
635 |
|
636 |
if annotation_text.strip():
|
|
|
637 |
text_x = ((inner_min_x + inner_max_x) / 2.0) - (int(len(annotation_text.strip()) / 2.0))
|
638 |
text_height_dxf = 0.75
|
639 |
+
text_y_dxf = boundary_polygon.bounds[1] + 0.25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
640 |
font = get_font_face("Arial")
|
641 |
paths = text2path.make_paths_from_str(
|
642 |
annotation_text.strip().upper(),
|
|
|
647 |
|
648 |
# Create a translation matrix
|
649 |
translation = ezdxf.math.Matrix44.translate(text_x, text_y_dxf, 0)
|
650 |
+
# Apply the translation to each path
|
651 |
translated_paths = [p.transform(translation) for p in paths]
|
652 |
|
653 |
# Render the paths as splines and polylines
|
|
|
668 |
new_outlines = np.ones_like(output_img) * 255
|
669 |
draw_polygons_inch(final_polygons_inch, new_outlines, scaling_factor, processed_size[0], color=(0, 0, 255), thickness=2)
|
670 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
671 |
if annotation_text.strip():
|
672 |
text_height_cv = 0.75
|
673 |
text_x_img = int(((inner_min_x + inner_max_x) / 2.0) / scaling_factor)
|
|
|
677 |
|
678 |
# Method 2: Use two different thicknesses
|
679 |
# Draw thicker outline
|
|
|
680 |
temp_img = np.zeros_like(output_img)
|
681 |
|
|
|
682 |
cv2.putText(
|
683 |
temp_img,
|
684 |
annotation_text.strip().upper(),
|
|
|
690 |
cv2.LINE_AA
|
691 |
)
|
692 |
|
|
|
693 |
cv2.putText(
|
694 |
temp_img,
|
695 |
annotation_text.strip().upper(),
|
|
|
701 |
cv2.LINE_AA
|
702 |
)
|
703 |
|
|
|
704 |
outline_mask = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
|
705 |
_, outline_mask = cv2.threshold(outline_mask, 1, 255, cv2.THRESH_BINARY)
|
706 |
|
|
|
|
|
707 |
output_img[outline_mask > 0] = temp_img[outline_mask > 0]
|
708 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
709 |
cv2.putText(
|
710 |
new_outlines,
|
711 |
annotation_text.strip().upper(),
|
|
|
723 |
org,
|
724 |
cv2.FONT_HERSHEY_SIMPLEX,
|
725 |
2,
|
726 |
+
(255, 255, 255), # Inner text in white
|
727 |
2, # Thinner inner part
|
728 |
cv2.LINE_AA
|
729 |
)
|