mlbench123 commited on
Commit
9486cca
·
verified ·
1 Parent(s): 8dba782

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -28
app.py CHANGED
@@ -36,23 +36,6 @@ transform_image = transforms.Compose(
36
  )
37
 
38
 
39
- def yolo_detect(
40
- image: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor],
41
- classes: List[str],
42
- ) -> np.ndarray:
43
- drawer_detector = YOLOWorld("yolov8x-worldv2.pt")
44
- drawer_detector.set_classes(classes)
45
- results: List[Results] = drawer_detector.predict(image)
46
- boxes = []
47
- for result in results:
48
- boxes.append(
49
- save_one_box(result.cpu().boxes.xyxy, im=result.orig_img, save=False)
50
- )
51
-
52
- del drawer_detector
53
-
54
- return boxes[0]
55
-
56
 
57
  def remove_bg(image: np.ndarray) -> np.ndarray:
58
  image = Image.fromarray(image)
@@ -338,17 +321,12 @@ def resize_img(img: np.ndarray, resize_dim):
338
 
339
 
340
  def predict(image, offset_inches):
341
- try:
342
- drawer_img = yolo_detect(image, ["box"])
343
- shrunked_img = make_square(shrink_bbox(drawer_img, 0.8))
344
- except:
345
- raise gr.Error("Unable to DETECT DRAWER, please take another picture with different magnification level!")
346
-
347
  # Detect the scaling reference square
348
  try:
349
- reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img)
350
  except:
351
- raise gr.Error("Unable to DETECT REFERENCE BOX, please take another picture with different magnification level!")
352
 
353
  # reference_obj_img_scaled = shrink_bbox(reference_obj_img, 1.2)
354
  # make the image sqaure so it does not effect the size of objects
@@ -370,9 +348,9 @@ def predict(image, offset_inches):
370
  scaling_factor = 1.0
371
 
372
  # Save original size before `remove_bg` processing
373
- orig_size = shrunked_img.shape[:2]
374
  # Generate foreground mask and save its size
375
- objects_mask = remove_bg(shrunked_img)
376
 
377
  processed_size = objects_mask.shape[:2]
378
  # Exclude scaling box region from objects mask
@@ -384,7 +362,7 @@ def predict(image, offset_inches):
384
  expansion_factor=3.0,
385
  )
386
  objects_mask = resize_img(
387
- objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0])
388
  )
389
  offset_pixels = (offset_inches / scaling_factor) * 2 + 1
390
  dilated_mask = cv2.dilate(
 
36
  )
37
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  def remove_bg(image: np.ndarray) -> np.ndarray:
41
  image = Image.fromarray(image)
 
321
 
322
 
323
  def predict(image, offset_inches):
324
+
 
 
 
 
 
325
  # Detect the scaling reference square
326
  try:
327
+ reference_obj_img, scaling_box_coords = detect_reference_square(image)
328
  except:
329
+ raise gr.Error("Unable to DETECT COIN, please take another picture with different magnification level!")
330
 
331
  # reference_obj_img_scaled = shrink_bbox(reference_obj_img, 1.2)
332
  # make the image sqaure so it does not effect the size of objects
 
348
  scaling_factor = 1.0
349
 
350
  # Save original size before `remove_bg` processing
351
+ orig_size = image.shape[:2]
352
  # Generate foreground mask and save its size
353
+ objects_mask = remove_bg(image)
354
 
355
  processed_size = objects_mask.shape[:2]
356
  # Exclude scaling box region from objects mask
 
362
  expansion_factor=3.0,
363
  )
364
  objects_mask = resize_img(
365
+ objects_mask, (image.shape[1], image.shape[0])
366
  )
367
  offset_pixels = (offset_inches / scaling_factor) * 2 + 1
368
  dilated_mask = cv2.dilate(