nolenfelten commited on
Commit
bf076df
·
verified ·
1 Parent(s): 6198aa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -45
app.py CHANGED
@@ -34,13 +34,11 @@ from PIL import Image, ImageDraw
34
  print("import scipy")
35
  from scipy.ndimage import gaussian_filter
36
 
37
-
38
  REPO_ID = "thoucentric/Shelf_Objects_Detection_Yolov7_Pytorch"
39
  FILENAME = "best.pt"
40
 
41
  yolov7_custom_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
42
 
43
-
44
  # Load YOLOv7 Custom Model
45
  print("Load YOLOv7 Custom Model")
46
  model = torch.hub.load('Owaiskhan9654/yolov7-1:main', model='custom', path_or_model=yolov7_custom_weights, force_reload=True)
@@ -51,7 +49,6 @@ rf = Roboflow(api_key="gHiUgOSq9GqTnRy5mErk")
51
  project = rf.workspace().project("sku-110k")
52
  model = project.version(2).model
53
 
54
-
55
  def resize_image(image, max_size=1500):
56
  if isinstance(image, np.ndarray):
57
  image = Image.fromarray(image)
@@ -99,8 +96,6 @@ def roboflow(image, confidence, overlap, stroke_width=1, labels=False):
99
  "image": image_response
100
  }
101
 
102
-
103
-
104
  # Image Splitting and Merging Functionality
105
  def split_image(image, tile_size=640, overlap=160):
106
  img_width, img_height = image.size
@@ -116,8 +111,6 @@ def split_image(image, tile_size=640, overlap=160):
116
 
117
  return tiles
118
 
119
-
120
-
121
  def merge_bounding_boxes(results, box):
122
  adjusted_bboxes = []
123
  for idx, row in results.pandas().xyxy[0].iterrows():
@@ -134,9 +127,6 @@ def merge_bounding_boxes(results, box):
134
  adjusted_bboxes.append(adjusted_bbox)
135
  return adjusted_bboxes
136
 
137
-
138
-
139
-
140
  def draw_bounding_boxes(image, bounding_boxes):
141
  draw = ImageDraw.Draw(image)
142
  for bbox in bounding_boxes:
@@ -145,7 +135,6 @@ def draw_bounding_boxes(image, bounding_boxes):
145
  draw.text((bbox['xmin'], bbox['ymin']), f"{bbox['class']} {bbox['confidence']:.2f}", fill=color)
146
  return image
147
 
148
-
149
  # Non-Max Suppression Implementations
150
  def soft_nms(bounding_boxes, iou_threshold=0.3, sigma=0.5, score_threshold=0.001):
151
  if not bounding_boxes:
@@ -182,8 +171,6 @@ def soft_nms(bounding_boxes, iou_threshold=0.3, sigma=0.5, score_threshold=0.001
182
 
183
  return final_boxes
184
 
185
-
186
-
187
  # Density Map Generation and Counting Functions
188
  def generate_density_map(image, bounding_boxes, sigma=4):
189
  density_map = np.zeros((image.height, image.width))
@@ -196,13 +183,9 @@ def generate_density_map(image, bounding_boxes, sigma=4):
196
  density_map = gaussian_filter(density_map, sigma=sigma)
197
  return density_map
198
 
199
-
200
-
201
  def count_from_density_map(density_map, threshold=0.05):
202
  return np.sum(density_map > threshold)
203
 
204
-
205
-
206
  # Edge Enhancement Functions
207
  def apply_edge_enhancement(image):
208
  gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
@@ -215,8 +198,6 @@ def apply_edge_enhancement(image):
215
  enhanced_image = cv2.cvtColor(sobel_combined, cv2.COLOR_GRAY2RGB)
216
  return Image.fromarray(enhanced_image)
217
 
218
-
219
-
220
  # Object Detection Functions
221
  def object_detection(image, conf_threshold=0.25, iou_threshold=0.45):
222
  image = Image.fromarray(image)
@@ -237,8 +218,7 @@ def object_detection(image, conf_threshold=0.25, iou_threshold=0.45):
237
  json_response = json.dumps(final_bounding_boxes, indent=4)
238
  return image_with_boxes, json_response
239
 
240
-
241
-
242
  def object_detection_with_edge_enhancement(image, conf_threshold=0.25, iou_threshold=0.45):
243
  image = Image.fromarray(image)
244
  image_enhanced = apply_edge_enhancement(image)
@@ -260,8 +240,6 @@ def object_detection_with_edge_enhancement(image, conf_threshold=0.25, iou_thres
260
  json_response = json.dumps(final_bounding_boxes, indent=4)
261
  return image_with_boxes, json_response
262
 
263
-
264
-
265
  def object_detection_density_edge(image, conf_threshold=0.25, iou_threshold=0.45):
266
  """Apply edge enhancement and density-based counting."""
267
  image = Image.fromarray(image)
@@ -294,38 +272,36 @@ def object_detection_density_edge(image, conf_threshold=0.25, iou_threshold=0.45
294
  summary = json.dumps({"object_count": int(object_count)}, indent=4)
295
  return image_with_density, json_response, summary
296
 
297
-
298
-
299
  def procedure(image_input, yolov7_confidence_threshold_input, yolov7_IOU_Threshold_input, roboflow_confidence_threshold_input, roboflow_IOU_Threshold_input, roboflow_labels_input, roboflow_stroke_width_input):
300
-
301
  '''
302
  This function takes in an image and applies both YOLOv7 and Roboflow object detection models to it.
303
  It then returns the images and JSON results.
304
  '''
305
-
306
  print("Begin Roboflow inferences.")
307
- roboflow_inference = roboflow(image = image_input, labels=roboflow_labels_input, stroke_width=roboflow_stroke_width_input, confidence = roboflow_confidence_threshold_input, overlap = roboflow_IOU_Threshold_input, )
308
 
309
- roboflow_image = Image.open(io.BytesIO(roboflow_inference["image"]))
310
- roboflow_json = roboflow_inference["json"]
311
-
312
- return None, None, roboflow_image, roboflow_json
313
 
 
 
 
 
314
 
315
  # Uploaded image.
316
  image_input = gr.Image(shape=(4080, 1836), image_mode="RGB", source="upload", label="Upload Image", optional=False)
317
 
318
  # YOLOv7 Confidence Threshold input.
319
- yolov7_confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value = 0.45, step=0.01, label="YOLOv7 Confidence Threshold")
320
 
321
  # YOLOv7 IOU Threshold.
322
- yolov7_IOU_Threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value = 0.45, step=0.01, label="YOLOv7 IOU Threshold")
323
 
324
  # Roboflow Confidence Threshold input.
325
- roboflow_confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value = 0.45, step=0.01, label="Roboflow Confidence Threshold")
326
 
327
  # Roboflow IOU Threshold.
328
- roboflow_IOU_Threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value = 0.45, step=0.01, label="Roboflow IOU Threshold")
329
 
330
  # Roboflow Labels.
331
  roboflow_labels_input = gr.Checkbox(label="Roboflow Labels")
@@ -333,8 +309,6 @@ roboflow_labels_input = gr.Checkbox(label="Roboflow Labels")
333
  # Roboflow Stroke Width.
334
  roboflow_stroke_width_input = gr.Radio([1, 2, 5, 10], label="Stroke Width")
335
 
336
-
337
-
338
  # YOLOv7 Image Output.
339
  yolov7_image_output = gr.Image(type="pil", label="YOLOv7 Output Image")
340
 
@@ -347,7 +321,6 @@ roboflow_image_output = gr.Image(type="pil", label="Roboflow Output Image")
347
  # Roboflow JSON Output.
348
  roboflow_json_output = gr.Textbox(label="Roboflow Bounding Boxes JSON")
349
 
350
-
351
  # Gradio Interface Definitions
352
  inputs = [
353
  image_input,
@@ -359,8 +332,6 @@ inputs = [
359
  roboflow_stroke_width_input,
360
  ]
361
 
362
-
363
-
364
  outputs = [
365
  yolov7_image_output,
366
  yolov7_json_output,
@@ -368,9 +339,8 @@ outputs = [
368
  roboflow_json_output,
369
  ]
370
 
371
-
372
  title = "<center>Cigarette Pack Counter</center>"
373
- description = "<center><a href='http://counttek.online'><img src='https://mvp-83056e96f7ab.herokuapp.com/static/countteklogo2.png'></a><br><a href='https://nolenfelten.github.io'>Project by Nolen Felten</a></center>"
374
  footer = ("<center><b>Item Classes it will detect (Total 140 Classes)</b></center>")
375
 
376
  interface = gr.Interface(
@@ -384,6 +354,5 @@ interface = gr.Interface(
384
  allow_flagging="never"
385
  )
386
 
387
-
388
  # Launch Gradio Interfaces
389
- interface.launch(debug=True)
 
34
  print("import scipy")
35
  from scipy.ndimage import gaussian_filter
36
 
 
37
  REPO_ID = "thoucentric/Shelf_Objects_Detection_Yolov7_Pytorch"
38
  FILENAME = "best.pt"
39
 
40
  yolov7_custom_weights = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
41
 
 
42
  # Load YOLOv7 Custom Model
43
  print("Load YOLOv7 Custom Model")
44
  model = torch.hub.load('Owaiskhan9654/yolov7-1:main', model='custom', path_or_model=yolov7_custom_weights, force_reload=True)
 
49
  project = rf.workspace().project("sku-110k")
50
  model = project.version(2).model
51
 
 
52
  def resize_image(image, max_size=1500):
53
  if isinstance(image, np.ndarray):
54
  image = Image.fromarray(image)
 
96
  "image": image_response
97
  }
98
 
 
 
99
  # Image Splitting and Merging Functionality
100
  def split_image(image, tile_size=640, overlap=160):
101
  img_width, img_height = image.size
 
111
 
112
  return tiles
113
 
 
 
114
  def merge_bounding_boxes(results, box):
115
  adjusted_bboxes = []
116
  for idx, row in results.pandas().xyxy[0].iterrows():
 
127
  adjusted_bboxes.append(adjusted_bbox)
128
  return adjusted_bboxes
129
 
 
 
 
130
  def draw_bounding_boxes(image, bounding_boxes):
131
  draw = ImageDraw.Draw(image)
132
  for bbox in bounding_boxes:
 
135
  draw.text((bbox['xmin'], bbox['ymin']), f"{bbox['class']} {bbox['confidence']:.2f}", fill=color)
136
  return image
137
 
 
138
  # Non-Max Suppression Implementations
139
  def soft_nms(bounding_boxes, iou_threshold=0.3, sigma=0.5, score_threshold=0.001):
140
  if not bounding_boxes:
 
171
 
172
  return final_boxes
173
 
 
 
174
  # Density Map Generation and Counting Functions
175
  def generate_density_map(image, bounding_boxes, sigma=4):
176
  density_map = np.zeros((image.height, image.width))
 
183
  density_map = gaussian_filter(density_map, sigma=sigma)
184
  return density_map
185
 
 
 
186
  def count_from_density_map(density_map, threshold=0.05):
187
  return np.sum(density_map > threshold)
188
 
 
 
189
  # Edge Enhancement Functions
190
  def apply_edge_enhancement(image):
191
  gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
 
198
  enhanced_image = cv2.cvtColor(sobel_combined, cv2.COLOR_GRAY2RGB)
199
  return Image.fromarray(enhanced_image)
200
 
 
 
201
  # Object Detection Functions
202
  def object_detection(image, conf_threshold=0.25, iou_threshold=0.45):
203
  image = Image.fromarray(image)
 
218
  json_response = json.dumps(final_bounding_boxes, indent=4)
219
  return image_with_boxes, json_response
220
 
221
+ def object_detection_with_edge_enhancement(image, conf_threshold=0.25, iou_threshold=0.45```python
 
222
  def object_detection_with_edge_enhancement(image, conf_threshold=0.25, iou_threshold=0.45):
223
  image = Image.fromarray(image)
224
  image_enhanced = apply_edge_enhancement(image)
 
240
  json_response = json.dumps(final_bounding_boxes, indent=4)
241
  return image_with_boxes, json_response
242
 
 
 
243
  def object_detection_density_edge(image, conf_threshold=0.25, iou_threshold=0.45):
244
  """Apply edge enhancement and density-based counting."""
245
  image = Image.fromarray(image)
 
272
  summary = json.dumps({"object_count": int(object_count)}, indent=4)
273
  return image_with_density, json_response, summary
274
 
 
 
275
  def procedure(image_input, yolov7_confidence_threshold_input, yolov7_IOU_Threshold_input, roboflow_confidence_threshold_input, roboflow_IOU_Threshold_input, roboflow_labels_input, roboflow_stroke_width_input):
 
276
  '''
277
  This function takes in an image and applies both YOLOv7 and Roboflow object detection models to it.
278
  It then returns the images and JSON results.
279
  '''
 
280
  print("Begin Roboflow inferences.")
281
+ roboflow_inference = roboflow(image=image_input, confidence=roboflow_confidence_threshold_input, overlap=roboflow_IOU_Threshold_input, stroke_width=roboflow_stroke_width_input, labels=roboflow_labels_input)
282
 
283
+ if roboflow_inference["image"] is None:
284
+ raise ValueError("Roboflow API did not return a valid image.")
 
 
285
 
286
+ roboflow_image = roboflow_inference["image"]
287
+ roboflow_json = json.dumps(roboflow_inference["json"], indent=4)
288
+
289
+ return None, None, roboflow_image, roboflow_json
290
 
291
  # Uploaded image.
292
  image_input = gr.Image(shape=(4080, 1836), image_mode="RGB", source="upload", label="Upload Image", optional=False)
293
 
294
  # YOLOv7 Confidence Threshold input.
295
+ yolov7_confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.01, label="YOLOv7 Confidence Threshold")
296
 
297
  # YOLOv7 IOU Threshold.
298
+ yolov7_IOU_Threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.01, label="YOLOv7 IOU Threshold")
299
 
300
  # Roboflow Confidence Threshold input.
301
+ roboflow_confidence_threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.01, label="Roboflow Confidence Threshold")
302
 
303
  # Roboflow IOU Threshold.
304
+ roboflow_IOU_Threshold_input = gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.01, label="Roboflow IOU Threshold")
305
 
306
  # Roboflow Labels.
307
  roboflow_labels_input = gr.Checkbox(label="Roboflow Labels")
 
309
  # Roboflow Stroke Width.
310
  roboflow_stroke_width_input = gr.Radio([1, 2, 5, 10], label="Stroke Width")
311
 
 
 
312
  # YOLOv7 Image Output.
313
  yolov7_image_output = gr.Image(type="pil", label="YOLOv7 Output Image")
314
 
 
321
  # Roboflow JSON Output.
322
  roboflow_json_output = gr.Textbox(label="Roboflow Bounding Boxes JSON")
323
 
 
324
  # Gradio Interface Definitions
325
  inputs = [
326
  image_input,
 
332
  roboflow_stroke_width_input,
333
  ]
334
 
 
 
335
  outputs = [
336
  yolov7_image_output,
337
  yolov7_json_output,
 
339
  roboflow_json_output,
340
  ]
341
 
 
342
  title = "<center>Cigarette Pack Counter</center>"
343
+ description = "<center><a href='http://counttek.online'><img width="25%" src='https://mvp-83056e96f7ab.herokuapp.com/static/countteklogo2.png'></a><br><a href='https://nolenfelten.github.io'>Project by Nolen Felten</a></center>"
344
  footer = ("<center><b>Item Classes it will detect (Total 140 Classes)</b></center>")
345
 
346
  interface = gr.Interface(
 
354
  allow_flagging="never"
355
  )
356
 
 
357
  # Launch Gradio Interfaces
358
+ interface.launch(debug=True)