BenjiELCA commited on
Commit
27a202c
·
1 Parent(s): 74f41b4

delete some print

Browse files
Files changed (5) hide show
  1. .gitignore +4 -0
  2. app.py +4 -4
  3. modules/display.py +7 -4
  4. modules/eval.py +8 -3
  5. modules/toXML.py +1 -1
.gitignore CHANGED
@@ -13,3 +13,7 @@ VISION_KEY.json
13
  backup/
14
 
15
  temp.jpg
 
 
 
 
 
13
  backup/
14
 
15
  temp.jpg
16
+
17
+ Evaluation.ipynb
18
+
19
+ study/
app.py CHANGED
@@ -222,7 +222,7 @@ def display_options(image, score_threshold):
222
  annotated_image = draw_stream(
223
  np.array(image), prediction=st.session_state.prediction, text_predictions=st.session_state.text_pred,
224
  draw_keypoints=draw_keypoints, draw_boxes=draw_boxes, draw_links=draw_links, draw_twins=False, draw_grouped_text=draw_text,
225
- write_class=write_class, write_text=write_text, keypoints_correction=True, write_idx=write_idx, only_print=selected_option,
226
  score_threshold=score_threshold, write_score=write_score, resize=True, return_image=True, axis=True
227
  )
228
 
@@ -254,7 +254,7 @@ def perform_inference(model_object, model_arrow, image, score_threshold):
254
  ocr_results = text_prediction(uploaded_image)
255
 
256
  # Filter and map OCR results to prediction results
257
- st.session_state.text_pred = filter_text(ocr_results, threshold=0.5)
258
  st.session_state.text_mapping = mapping_text(st.session_state.prediction, st.session_state.text_pred, print_sentences=False, percentage_thresh=0.5)
259
 
260
  # Remove the original image display
@@ -300,7 +300,7 @@ def main():
300
  st.sidebar.subheader("Instructions:")
301
  st.sidebar.text("1. Upload you image")
302
  st.sidebar.text("2. Crop the image \n (try to put the BPMN diagram \n in the center of the image)")
303
- st.sidebar.text("3. Set the score threshold \n for prediction (default is 0.5)")
304
  st.sidebar.text("4. Click on 'Launch Prediction'")
305
  st.sidebar.text("5. You can now see the annotation \n and the BPMN XML result")
306
  st.sidebar.text("6. You can change the scale for \n the XML file (default is 1.0)")
@@ -371,7 +371,7 @@ def main():
371
  if cropped_image is not None:
372
  col1, col2, col3 = st.columns(3)
373
  with col1:
374
- score_threshold = st.slider("Set score threshold for prediction", min_value=0.0, max_value=1.0, value=0.5, step=0.05)
375
 
376
  if st.button("Launch Prediction"):
377
  st.session_state.crop_image = cropped_image
 
222
  annotated_image = draw_stream(
223
  np.array(image), prediction=st.session_state.prediction, text_predictions=st.session_state.text_pred,
224
  draw_keypoints=draw_keypoints, draw_boxes=draw_boxes, draw_links=draw_links, draw_twins=False, draw_grouped_text=draw_text,
225
+ write_class=write_class, write_text=write_text, keypoints_correction=True, write_idx=write_idx, only_show=selected_option,
226
  score_threshold=score_threshold, write_score=write_score, resize=True, return_image=True, axis=True
227
  )
228
 
 
254
  ocr_results = text_prediction(uploaded_image)
255
 
256
  # Filter and map OCR results to prediction results
257
+ st.session_state.text_pred = filter_text(ocr_results, threshold=0.6)
258
  st.session_state.text_mapping = mapping_text(st.session_state.prediction, st.session_state.text_pred, print_sentences=False, percentage_thresh=0.5)
259
 
260
  # Remove the original image display
 
300
  st.sidebar.subheader("Instructions:")
301
  st.sidebar.text("1. Upload you image")
302
  st.sidebar.text("2. Crop the image \n (try to put the BPMN diagram \n in the center of the image)")
303
+ st.sidebar.text("3. Set the score threshold \n for prediction (default is 0.6)")
304
  st.sidebar.text("4. Click on 'Launch Prediction'")
305
  st.sidebar.text("5. You can now see the annotation \n and the BPMN XML result")
306
  st.sidebar.text("6. You can change the scale for \n the XML file (default is 1.0)")
 
371
  if cropped_image is not None:
372
  col1, col2, col3 = st.columns(3)
373
  with col1:
374
+ score_threshold = st.slider("Set score threshold for prediction", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
375
 
376
  if st.button("Launch Prediction"):
377
  st.session_state.crop_image = cropped_image
modules/display.py CHANGED
@@ -25,7 +25,7 @@ def draw_stream(image,
25
  write_idx=False,
26
  keypoints_correction=False,
27
  new_size=(1333, 1333),
28
- only_print=None,
29
  axis=False,
30
  return_image=False,
31
  resize=False):
@@ -48,7 +48,7 @@ def draw_stream(image,
48
  - write_score (bool): Flag to write scores near the annotations.
49
  - write_text (bool): Flag to write OCR recognized text.
50
  - score_threshold (float): Threshold for scores above which annotations will be drawn.
51
- - only_print (str): Specific class name to filter annotations by.
52
  - resize (bool): Whether to resize annotations to fit the image size.
53
  """
54
 
@@ -74,8 +74,11 @@ def draw_stream(image,
74
  if score < score_threshold:
75
  continue
76
  if draw_boxes:
77
- if only_print is not None and only_print != 'all':
78
- if prediction['labels'][i] != list(class_dict.values()).index(only_print):
 
 
 
79
  continue
80
  cv2.rectangle(image_copy, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 0), int(2*scale))
81
  if write_score:
 
25
  write_idx=False,
26
  keypoints_correction=False,
27
  new_size=(1333, 1333),
28
+ only_show=None,
29
  axis=False,
30
  return_image=False,
31
  resize=False):
 
48
  - write_score (bool): Flag to write scores near the annotations.
49
  - write_text (bool): Flag to write OCR recognized text.
50
  - score_threshold (float): Threshold for scores above which annotations will be drawn.
51
+ - only_show (str): Specific class name to filter annotations by.
52
  - resize (bool): Whether to resize annotations to fit the image size.
53
  """
54
 
 
74
  if score < score_threshold:
75
  continue
76
  if draw_boxes:
77
+ if only_show is not None and only_show != 'all':
78
+ if prediction['labels'][i] != list(class_dict.values()).index(only_show):
79
+ continue
80
+ #dont show the lanes
81
+ if prediction['labels'][i] == list(class_dict.values()).index('lane'):
82
  continue
83
  cv2.rectangle(image_copy, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 0), int(2*scale))
84
  if write_score:
modules/eval.py CHANGED
@@ -9,6 +9,8 @@ from modules.utils import is_vertical
9
 
10
 
11
  def non_maximum_suppression(boxes, scores, labels=None, iou_threshold=0.5):
 
 
12
  idxs = np.argsort(scores) # Sort the boxes according to their scores in ascending order
13
  selected_boxes = []
14
 
@@ -17,7 +19,7 @@ def non_maximum_suppression(boxes, scores, labels=None, iou_threshold=0.5):
17
  i = idxs[last]
18
 
19
  # Skip if the label is a lane
20
- if labels is not None and class_dict[labels[i]] == 'lane':
21
  selected_boxes.append(i)
22
  idxs = np.delete(idxs, last)
23
  continue
@@ -360,7 +362,7 @@ def full_prediction(model_object, model_arrow, image, score_threshold=0.5, iou_t
360
 
361
  # Load an image
362
  with torch.no_grad(): # Disable gradient calculation for inference
363
- _, objects_pred = object_prediction(model_object, image, score_threshold=score_threshold, iou_threshold=iou_threshold)
364
  _, arrow_pred = arrow_prediction(model_arrow, image, score_threshold=score_threshold, iou_threshold=iou_threshold, distance_treshold=distance_treshold)
365
 
366
  #print('Object prediction:', objects_pred)
@@ -377,7 +379,7 @@ def full_prediction(model_object, model_arrow, image, score_threshold=0.5, iou_t
377
  #give a link to event to allow the creation of the BPMN id with start, indermediate and end event
378
  flow_links = give_link_to_element(flow_links, labels)
379
 
380
- boxes,labels,scores,keypoints,flow_links,best_points,pool_dict = last_correction(boxes,labels,scores,keypoints,flow_links,best_points, pool_dict)
381
 
382
  image = image.permute(1, 2, 0).cpu().numpy()
383
  image = (image * 255).astype(np.uint8)
@@ -399,11 +401,14 @@ def full_prediction(model_object, model_arrow, image, score_threshold=0.5, iou_t
399
  'BPMN_id': bpmn_id,
400
  }
401
 
 
402
  # give a unique BPMN id to each element
403
  data = create_BPMN_id(data)
404
 
405
 
406
 
 
 
407
  return image, data
408
 
409
  def evaluate_model_by_class(pred_boxes, true_boxes, pred_labels, true_labels, model_dict, iou_threshold=0.5):
 
9
 
10
 
11
  def non_maximum_suppression(boxes, scores, labels=None, iou_threshold=0.5):
12
+ exception = ['pool', 'lane']
13
+
14
  idxs = np.argsort(scores) # Sort the boxes according to their scores in ascending order
15
  selected_boxes = []
16
 
 
19
  i = idxs[last]
20
 
21
  # Skip if the label is a lane
22
+ if labels is not None and (class_dict[labels[i]] in exception):
23
  selected_boxes.append(i)
24
  idxs = np.delete(idxs, last)
25
  continue
 
362
 
363
  # Load an image
364
  with torch.no_grad(): # Disable gradient calculation for inference
365
+ _, objects_pred = object_prediction(model_object, image, score_threshold=score_threshold, iou_threshold=0.1)
366
  _, arrow_pred = arrow_prediction(model_arrow, image, score_threshold=score_threshold, iou_threshold=iou_threshold, distance_treshold=distance_treshold)
367
 
368
  #print('Object prediction:', objects_pred)
 
379
  #give a link to event to allow the creation of the BPMN id with start, indermediate and end event
380
  flow_links = give_link_to_element(flow_links, labels)
381
 
382
+ boxes,labels,scores,keypoints,flow_links,best_points,pool_dict = last_correction(boxes,labels,scores,keypoints,flow_links,best_points, pool_dict)
383
 
384
  image = image.permute(1, 2, 0).cpu().numpy()
385
  image = (image * 255).astype(np.uint8)
 
401
  'BPMN_id': bpmn_id,
402
  }
403
 
404
+
405
  # give a unique BPMN id to each element
406
  data = create_BPMN_id(data)
407
 
408
 
409
 
410
+
411
+
412
  return image, data
413
 
414
  def evaluate_model_by_class(pred_boxes, true_boxes, pred_labels, true_labels, model_dict, iou_threshold=0.5):
modules/toXML.py CHANGED
@@ -236,7 +236,7 @@ def create_bpmn_object(process, bpmnplane, text_mapping, definitions, size, data
236
 
237
  # Data Object
238
  elif element_type == 'dataObject' or element_type == 'dataStore':
239
- print('ici dataObject', element_id)
240
  dataObject_idx = element_id.split('_')[1]
241
  dataObject_ref = f'DataObjectReference_{dataObject_idx}'
242
  element = ET.SubElement(process, 'bpmn:dataObjectReference', id=dataObject_ref, dataObjectRef=element_id, name=text_mapping[element_id])
 
236
 
237
  # Data Object
238
  elif element_type == 'dataObject' or element_type == 'dataStore':
239
+ #print('ici dataObject', element_id)
240
  dataObject_idx = element_id.split('_')[1]
241
  dataObject_ref = f'DataObjectReference_{dataObject_idx}'
242
  element = ET.SubElement(process, 'bpmn:dataObjectReference', id=dataObject_ref, dataObjectRef=element_id, name=text_mapping[element_id])