umairahmad1789 commited on
Commit
8ca529b
·
verified ·
1 Parent(s): a3f6ca3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -55
app.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  from pathlib import Path
3
  from typing import List, Union
4
  from PIL import Image
 
5
  import numpy as np
6
  import torch
7
  from torchvision import transforms
@@ -14,7 +15,25 @@ import ezdxf
14
  import gradio as gr
15
  import gc
16
  from scalingtestupdated import calculate_scaling_factor
 
 
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
 
20
  def yolo_detect(
@@ -36,23 +55,6 @@ def yolo_detect(
36
 
37
 
38
  def remove_bg(image: np.ndarray) -> np.ndarray:
39
- birefnet = AutoModelForImageSegmentation.from_pretrained(
40
- "zhengpeng7/BiRefNet", trust_remote_code=True
41
- )
42
-
43
- device = "cpu"
44
- torch.set_float32_matmul_precision(["high", "highest"][0])
45
-
46
- birefnet.to(device)
47
- birefnet.eval()
48
- transform_image = transforms.Compose(
49
- [
50
- transforms.Resize((1024, 1024)),
51
- transforms.ToTensor(),
52
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
53
- ]
54
- )
55
-
56
  image = Image.fromarray(image)
57
  input_images = transform_image(image).unsqueeze(0).to("cpu")
58
 
@@ -62,16 +64,62 @@ def remove_bg(image: np.ndarray) -> np.ndarray:
62
  pred = preds[0].squeeze()
63
 
64
  # Show Results
65
- pred_pil = transforms.ToPILImage()(pred)
 
66
  # Scale proportionally with max length to 1024 for faster showing
67
  scale_ratio = 1024 / max(image.size)
68
  scaled_size = (int(image.size[0] * scale_ratio), int(image.size[1] * scale_ratio))
69
-
70
- del birefnet
71
 
72
  return np.array(pred_pil.resize(scaled_size))
73
 
74
- def exclude_scaling_box(image: np.ndarray, bbox: np.ndarray, orig_size: tuple, processed_size: tuple, expansion_factor: float = 1.5) -> np.ndarray:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  # Unpack the bounding box
76
  x_min, y_min, x_max, y_max = map(int, bbox)
77
 
@@ -89,9 +137,13 @@ def exclude_scaling_box(image: np.ndarray, bbox: np.ndarray, orig_size: tuple, p
89
  box_width = x_max - x_min
90
  box_height = y_max - y_min
91
  expanded_x_min = max(0, int(x_min - (expansion_factor - 1) * box_width / 2))
92
- expanded_x_max = min(image.shape[1], int(x_max + (expansion_factor - 1) * box_width / 2))
 
 
93
  expanded_y_min = max(0, int(y_min - (expansion_factor - 1) * box_height / 2))
94
- expanded_y_max = min(image.shape[0], int(y_max + (expansion_factor - 1) * box_height / 2))
 
 
95
 
96
  # Black out the expanded region
97
  image[expanded_y_min:expanded_y_max, expanded_x_min:expanded_x_max] = 0
@@ -99,6 +151,61 @@ def exclude_scaling_box(image: np.ndarray, bbox: np.ndarray, orig_size: tuple, p
99
  return image
100
 
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  def extract_outlines(binary_image: np.ndarray) -> np.ndarray:
103
  """
104
  Extracts and draws the outlines of masks from a binary image.
@@ -109,26 +216,21 @@ def extract_outlines(binary_image: np.ndarray) -> np.ndarray:
109
  """
110
  # Detect contours from the binary image
111
  contours, _ = cv2.findContours(
112
- binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
113
  )
114
 
 
 
 
115
  # Create a blank image to draw contours
116
  outline_image = np.zeros_like(binary_image)
117
- # Smooth the contours
118
- smoothed_contours = []
119
- for contour in contours:
120
- # Calculate epsilon for approxPolyDP
121
- epsilon = 0.002 * cv2.arcLength(contour, True)
122
- # Approximate the contour with fewer points
123
- smoothed_contour = cv2.approxPolyDP(contour, epsilon, True)
124
- smoothed_contours.append(smoothed_contour)
125
 
126
  # Draw the contours on the blank image
127
  cv2.drawContours(
128
- outline_image, smoothed_contours, -1, (255), thickness=1
129
  ) # White color for outlines
130
 
131
- return cv2.bitwise_not(outline_image), smoothed_contours
132
 
133
 
134
  def shrink_bbox(image: np.ndarray, shrink_factor: float):
@@ -162,7 +264,7 @@ def shrink_bbox(image: np.ndarray, shrink_factor: float):
162
  # lower_range_tuple = (0)
163
 
164
  # doc = ezdxf.new('R2010')
165
- # msp = doc.modelspace()
166
  # masked_jpg = cv2.inRange(outlines,lower_range_tuple, upper_range_tuple)
167
 
168
  # for i in range(0,masked_jpg.shape[0]):
@@ -173,6 +275,7 @@ def shrink_bbox(image: np.ndarray, shrink_factor: float):
173
  # doc.saveas("./outputs/out.dxf")
174
  # return "./outputs/out.dxf"
175
 
 
176
  def to_dxf(contours):
177
  doc = ezdxf.new()
178
  msp = doc.modelspace()
@@ -180,10 +283,11 @@ def to_dxf(contours):
180
  for contour in contours:
181
  points = [(point[0][0], point[0][1]) for point in contour]
182
  msp.add_lwpolyline(points, close=True) # Add a polyline for each contour
183
-
184
  doc.saveas("./outputs/out.dxf")
185
  return "./outputs/out.dxf"
186
 
 
187
  def smooth_contours(contour):
188
  epsilon = 0.01 * cv2.arcLength(contour, True) # Adjust factor (e.g., 0.01)
189
  return cv2.approxPolyDP(contour, epsilon, True)
@@ -224,40 +328,68 @@ def detect_reference_square(img) -> np.ndarray:
224
  box_detector = YOLO("./last.pt")
225
  res = box_detector.predict(img)
226
  del box_detector
227
- return save_one_box(res[0].cpu().boxes.xyxy, res[0].orig_img, save=False), res[0].cpu().boxes.xyxy[0
228
- ]
 
229
 
230
- def predict(image):
 
 
 
 
 
231
  drawer_img = yolo_detect(image, ["box"])
232
- shrunked_img = shrink_bbox(drawer_img, 0.8)
233
  # Detect the scaling reference square
234
  reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img)
235
- reference_obj_img_scaled = shrink_bbox(reference_obj_img, 1.2)
 
 
 
 
 
 
 
 
 
236
  try:
237
  scaling_factor = calculate_scaling_factor(
238
  reference_image_path="./Reference_ScalingBox.jpg",
239
- target_image=reference_obj_img_scaled,
240
- feature_detector="SIFT",
241
  )
242
  except:
243
  scaling_factor = 1.0
 
244
  # Save original size before `remove_bg` processing
245
  orig_size = shrunked_img.shape[:2]
246
  # Generate foreground mask and save its size
247
  objects_mask = remove_bg(shrunked_img)
 
248
  processed_size = objects_mask.shape[:2]
249
  # Exclude scaling box region from objects mask
250
  objects_mask = exclude_scaling_box(
251
- objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=3.0
 
 
 
 
 
 
 
 
 
 
 
252
  )
253
- # Scale the object mask according to scaling factor
254
- objects_mask_scaled = scale_image(objects_mask, scaling_factor)
255
- Image.fromarray(objects_mask_scaled).save("./outputs/scaled_mask_new.jpg")
256
- outlines, contours = extract_outlines(objects_mask_scaled)
257
- dxf = to_dxf(contours)
258
 
259
- return outlines, dxf, objects_mask, scaling_factor, reference_obj_img_scaled
 
 
 
 
260
 
 
261
 
262
 
263
  if __name__ == "__main__":
@@ -265,15 +397,22 @@ if __name__ == "__main__":
265
 
266
  ifer = gr.Interface(
267
  fn=predict,
268
- inputs=[gr.Image(label="Input Image")],
269
  outputs=[
270
  gr.Image(label="Ouput Image"),
271
  gr.File(label="DXF file"),
272
  gr.Image(label="Mask"),
273
- gr.Textbox(label="Scaling Factor(mm)", placeholder="Every pixel is equal to mentioned number in mm(milimeter)"),
274
- gr.Image(label="Image used for calculating scaling factor")
 
 
 
 
 
 
 
 
 
275
  ],
276
- examples=["./examples/Test20.jpg", "./examples/Test21.jpg", "./examples/Test22.jpg", "./examples/Test23.jpg"]
277
  )
278
  ifer.launch(share=True)
279
-
 
2
  from pathlib import Path
3
  from typing import List, Union
4
  from PIL import Image
5
+ import ezdxf.units
6
  import numpy as np
7
  import torch
8
  from torchvision import transforms
 
15
  import gradio as gr
16
  import gc
17
  from scalingtestupdated import calculate_scaling_factor
18
+ from scipy.interpolate import splprep, splev
19
+ from scipy.ndimage import gaussian_filter1d
20
 
21
+ birefnet = AutoModelForImageSegmentation.from_pretrained(
22
+ "zhengpeng7/BiRefNet", trust_remote_code=True
23
+ )
24
+
25
+ device = "cpu"
26
+ torch.set_float32_matmul_precision(["high", "highest"][0])
27
+
28
+ birefnet.to(device)
29
+ birefnet.eval()
30
+ transform_image = transforms.Compose(
31
+ [
32
+ transforms.Resize((1024, 1024)),
33
+ transforms.ToTensor(),
34
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
35
+ ]
36
+ )
37
 
38
 
39
  def yolo_detect(
 
55
 
56
 
57
  def remove_bg(image: np.ndarray) -> np.ndarray:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  image = Image.fromarray(image)
59
  input_images = transform_image(image).unsqueeze(0).to("cpu")
60
 
 
64
  pred = preds[0].squeeze()
65
 
66
  # Show Results
67
+ pred_pil: Image = transforms.ToPILImage()(pred)
68
+ print(pred_pil)
69
  # Scale proportionally with max length to 1024 for faster showing
70
  scale_ratio = 1024 / max(image.size)
71
  scaled_size = (int(image.size[0] * scale_ratio), int(image.size[1] * scale_ratio))
 
 
72
 
73
  return np.array(pred_pil.resize(scaled_size))
74
 
75
+
76
+ def make_square(img: np.ndarray):
77
+ # Get dimensions
78
+ height, width = img.shape[:2]
79
+
80
+ # Find the larger dimension
81
+ max_dim = max(height, width)
82
+
83
+ # Calculate padding
84
+ pad_height = (max_dim - height) // 2
85
+ pad_width = (max_dim - width) // 2
86
+
87
+ # Handle odd dimensions
88
+ pad_height_extra = max_dim - height - 2 * pad_height
89
+ pad_width_extra = max_dim - width - 2 * pad_width
90
+
91
+ # Create padding with edge colors
92
+ if len(img.shape) == 3: # Color image
93
+ # Pad the image
94
+ padded = np.pad(
95
+ img,
96
+ (
97
+ (pad_height, pad_height + pad_height_extra),
98
+ (pad_width, pad_width + pad_width_extra),
99
+ (0, 0),
100
+ ),
101
+ mode="edge",
102
+ )
103
+ else: # Grayscale image
104
+ padded = np.pad(
105
+ img,
106
+ (
107
+ (pad_height, pad_height + pad_height_extra),
108
+ (pad_width, pad_width + pad_width_extra),
109
+ ),
110
+ mode="edge",
111
+ )
112
+
113
+ return padded
114
+
115
+
116
+ def exclude_scaling_box(
117
+ image: np.ndarray,
118
+ bbox: np.ndarray,
119
+ orig_size: tuple,
120
+ processed_size: tuple,
121
+ expansion_factor: float = 1.5,
122
+ ) -> np.ndarray:
123
  # Unpack the bounding box
124
  x_min, y_min, x_max, y_max = map(int, bbox)
125
 
 
137
  box_width = x_max - x_min
138
  box_height = y_max - y_min
139
  expanded_x_min = max(0, int(x_min - (expansion_factor - 1) * box_width / 2))
140
+ expanded_x_max = min(
141
+ image.shape[1], int(x_max + (expansion_factor - 1) * box_width / 2)
142
+ )
143
  expanded_y_min = max(0, int(y_min - (expansion_factor - 1) * box_height / 2))
144
+ expanded_y_max = min(
145
+ image.shape[0], int(y_max + (expansion_factor - 1) * box_height / 2)
146
+ )
147
 
148
  # Black out the expanded region
149
  image[expanded_y_min:expanded_y_max, expanded_x_min:expanded_x_max] = 0
 
151
  return image
152
 
153
 
154
+ def resample_contour(contour):
155
+ # ---------------------------------------------------------------------------------------- #
156
+ # Get all the parameters at the start:
157
+ num_points = 1000
158
+ smoothing_factor = 5
159
+
160
+ smoothed_x_sigma = 1
161
+ smoothed_y_sigma = 1
162
+ # ---------------------------------------------------------------------------------------- #
163
+ contour = contour[:, 0, :]
164
+
165
+ tck, _ = splprep([contour[:, 0], contour[:, 1]], s=smoothing_factor)
166
+
167
+ u = np.linspace(0, 1, num_points)
168
+ resampled_points = splev(u, tck)
169
+
170
+ smoothed_x = gaussian_filter1d(resampled_points[0], sigma=smoothed_x_sigma)
171
+ smoothed_y = gaussian_filter1d(resampled_points[1], sigma=smoothed_y_sigma)
172
+
173
+ return np.array([smoothed_x, smoothed_y]).T
174
+
175
+
176
+ def save_dxf_spline(inflated_contours, scaling_factor, height):
177
+ # ---------------------------------------------------------------------------------------- #
178
+ # Get all the parameters at the start:
179
+ degree = 3
180
+ closed = True
181
+ # ---------------------------------------------------------------------------------------- #
182
+
183
+ doc = ezdxf.new(units=0)
184
+ doc.units = ezdxf.units.IN
185
+ doc.header['$INSUNITS'] = ezdxf.units.IN
186
+
187
+ msp = doc.modelspace()
188
+
189
+ for contour in inflated_contours:
190
+ resampled_contour = resample_contour(contour)
191
+ points = [
192
+ (x * scaling_factor, (height - y)* scaling_factor)
193
+ for x, y in resampled_contour
194
+ ]
195
+ if len(points) >= 3:
196
+ # Manually Closing the Contour in case it hasn't been closed by the contours before.
197
+ if np.linalg.norm(np.array(points[0]) - np.array(points[-1])) > 1e-2:
198
+ points.append(points[0])
199
+
200
+ spline = msp.add_spline(points, degree=degree)
201
+ spline.closed = closed
202
+
203
+ # Step 14: Save the DXF file
204
+ dxf_filepath = os.path.join("./outputs", "out.dxf")
205
+ doc.saveas(dxf_filepath)
206
+ return dxf_filepath
207
+
208
+
209
  def extract_outlines(binary_image: np.ndarray) -> np.ndarray:
210
  """
211
  Extracts and draws the outlines of masks from a binary image.
 
216
  """
217
  # Detect contours from the binary image
218
  contours, _ = cv2.findContours(
219
+ binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
220
  )
221
 
222
+ # smooth_contours_list = []
223
+ # for contour in contours:
224
+ # smooth_contours_list.append(smooth_contours(contour))
225
  # Create a blank image to draw contours
226
  outline_image = np.zeros_like(binary_image)
 
 
 
 
 
 
 
 
227
 
228
  # Draw the contours on the blank image
229
  cv2.drawContours(
230
+ outline_image, contours, -1, (255), thickness=1
231
  ) # White color for outlines
232
 
233
+ return cv2.bitwise_not(outline_image), contours
234
 
235
 
236
  def shrink_bbox(image: np.ndarray, shrink_factor: float):
 
264
  # lower_range_tuple = (0)
265
 
266
  # doc = ezdxf.new('R2010')
267
+ # msp = doc.modelspace()
268
  # masked_jpg = cv2.inRange(outlines,lower_range_tuple, upper_range_tuple)
269
 
270
  # for i in range(0,masked_jpg.shape[0]):
 
275
  # doc.saveas("./outputs/out.dxf")
276
  # return "./outputs/out.dxf"
277
 
278
+
279
  def to_dxf(contours):
280
  doc = ezdxf.new()
281
  msp = doc.modelspace()
 
283
  for contour in contours:
284
  points = [(point[0][0], point[0][1]) for point in contour]
285
  msp.add_lwpolyline(points, close=True) # Add a polyline for each contour
286
+
287
  doc.saveas("./outputs/out.dxf")
288
  return "./outputs/out.dxf"
289
 
290
+
291
  def smooth_contours(contour):
292
  epsilon = 0.01 * cv2.arcLength(contour, True) # Adjust factor (e.g., 0.01)
293
  return cv2.approxPolyDP(contour, epsilon, True)
 
328
  box_detector = YOLO("./last.pt")
329
  res = box_detector.predict(img)
330
  del box_detector
331
+ return save_one_box(res[0].cpu().boxes.xyxy, res[0].orig_img, save=False), res[
332
+ 0
333
+ ].cpu().boxes.xyxy[0]
334
 
335
+
336
+ def resize_img(img: np.ndarray, resize_dim):
337
+ return np.array(Image.fromarray(img).resize(resize_dim))
338
+
339
+
340
+ def predict(image, offset_inches):
341
  drawer_img = yolo_detect(image, ["box"])
342
+ shrunked_img = make_square(shrink_bbox(drawer_img, 0.8))
343
  # Detect the scaling reference square
344
  reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img)
345
+ # reference_obj_img_scaled = shrink_bbox(reference_obj_img, 1.2)
346
+ # make the image sqaure so it does not effect the size of objects
347
+ reference_obj_img = make_square(reference_obj_img)
348
+ reference_square_mask = remove_bg(reference_obj_img)
349
+
350
+ # make the mask same size as org image
351
+ reference_square_mask = resize_img(
352
+ reference_square_mask, (reference_obj_img.shape[1], reference_obj_img.shape[0])
353
+ )
354
+
355
  try:
356
  scaling_factor = calculate_scaling_factor(
357
  reference_image_path="./Reference_ScalingBox.jpg",
358
+ target_image=reference_square_mask,
359
+ feature_detector="ORB",
360
  )
361
  except:
362
  scaling_factor = 1.0
363
+
364
  # Save original size before `remove_bg` processing
365
  orig_size = shrunked_img.shape[:2]
366
  # Generate foreground mask and save its size
367
  objects_mask = remove_bg(shrunked_img)
368
+
369
  processed_size = objects_mask.shape[:2]
370
  # Exclude scaling box region from objects mask
371
  objects_mask = exclude_scaling_box(
372
+ objects_mask,
373
+ scaling_box_coords,
374
+ orig_size,
375
+ processed_size,
376
+ expansion_factor=3.0,
377
+ )
378
+ objects_mask = resize_img(
379
+ objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0])
380
+ )
381
+ offset_pixels = offset_inches / scaling_factor
382
+ dilated_mask = cv2.dilate(
383
+ objects_mask, np.ones((int(offset_pixels), int(offset_pixels)), np.uint8)
384
  )
 
 
 
 
 
385
 
386
+ # Scale the object mask according to scaling factor
387
+ # objects_mask_scaled = scale_image(objects_mask, scaling_factor)
388
+ Image.fromarray(dilated_mask).save("./outputs/scaled_mask_new.jpg")
389
+ outlines, contours = extract_outlines(dilated_mask)
390
+ dxf = save_dxf_spline(contours, scaling_factor, processed_size[0])
391
 
392
+ return outlines, dxf, dilated_mask, scaling_factor, reference_obj_img
393
 
394
 
395
  if __name__ == "__main__":
 
397
 
398
  ifer = gr.Interface(
399
  fn=predict,
400
+ inputs=[gr.Image(label="Input Image"), gr.Number(label="Offset value for Mask(inches)", value=0.075)],
401
  outputs=[
402
  gr.Image(label="Ouput Image"),
403
  gr.File(label="DXF file"),
404
  gr.Image(label="Mask"),
405
+ gr.Textbox(
406
+ label="Scaling Factor(mm)",
407
+ placeholder="Every pixel is equal to mentioned number in mm(milimeter)",
408
+ ),
409
+ gr.Image(label="Image used for calculating scaling factor"),
410
+ ],
411
+ examples=[
412
+ ["./examples/Test20.jpg", 0.075],
413
+ ["./examples/Test21.jpg", 0.075],
414
+ ["./examples/Test22.jpg", 0.075],
415
+ ["./examples/Test23.jpg", 0.075],
416
  ],
 
417
  )
418
  ifer.launch(share=True)