vumichien commited on
Commit
82d91ef
·
verified ·
1 Parent(s): 70139ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -0
app.py CHANGED
@@ -20,9 +20,25 @@ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base-ft", trust_
20
  onnx_model = YOLO("models/best.onnx", task='detect')
21
 
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def ends_with_number(s):
24
  return s[-1].isdigit()
25
 
 
26
  def ocr(image, prompt="<OCR>"):
27
  original_height, original_width = image.shape[:2]
28
  inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
@@ -45,6 +61,7 @@ def ocr(image, prompt="<OCR>"):
45
 
46
  return parsed_answer
47
 
 
48
  def parse_detection(detections):
49
  parsed_rows = []
50
  for i in range(len(detections.xyxy)):
@@ -103,6 +120,7 @@ def cut_and_save_image(image, parsed_detections, output_dir):
103
  output_path_list.append(output_path)
104
  return output_path_list
105
 
 
106
  def analysis(progress=gr.Progress()):
107
  progress(0, desc="Analyzing...")
108
  list_files = glob.glob("output/*.png")
@@ -123,6 +141,7 @@ def analysis(progress=gr.Progress()):
123
  print("Time taken:", time.time() - start_time)
124
  return pd.DataFrame(results.items(), columns=['Mark', 'Total']).reset_index(drop=False).rename(columns={'index': 'No.'})
125
 
 
126
  def inference(
127
  image_path,
128
  conf_threshold,
@@ -143,6 +162,7 @@ def inference(
143
 
144
  results = onnx_model(image, conf=conf_threshold, iou=iou_threshold)[0]
145
  detections = sv.Detections.from_ultralytics(results)
 
146
  parsed_detections = parse_detection(detections)
147
  output_dir = "output"
148
  # Check if the output directory exists, clear all the files inside
 
20
  onnx_model = YOLO("models/best.onnx", task='detect')
21
 
22
 
23
+ def filter_detections(detections, target_class_name="mark"):
24
+ indices_to_keep = [i for i, class_name in enumerate(detections.data['class_name']) if
25
+ class_name == target_class_name]
26
+
27
+ filtered_xyxy = detections.xyxy[indices_to_keep]
28
+ filtered_confidence = detections.confidence[indices_to_keep]
29
+ filtered_class_id = detections.class_id[indices_to_keep]
30
+ filtered_class_name = detections.data['class_name'][indices_to_keep]
31
+ detections.xyxy = filtered_xyxy
32
+ detections.confidence = filtered_confidence
33
+ detections.class_id = filtered_class_id
34
+ detections.data['class_name'] = filtered_class_name
35
+ return detections
36
+
37
+
38
  def ends_with_number(s):
39
  return s[-1].isdigit()
40
 
41
+
42
  def ocr(image, prompt="<OCR>"):
43
  original_height, original_width = image.shape[:2]
44
  inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
 
61
 
62
  return parsed_answer
63
 
64
+
65
  def parse_detection(detections):
66
  parsed_rows = []
67
  for i in range(len(detections.xyxy)):
 
120
  output_path_list.append(output_path)
121
  return output_path_list
122
 
123
+
124
  def analysis(progress=gr.Progress()):
125
  progress(0, desc="Analyzing...")
126
  list_files = glob.glob("output/*.png")
 
141
  print("Time taken:", time.time() - start_time)
142
  return pd.DataFrame(results.items(), columns=['Mark', 'Total']).reset_index(drop=False).rename(columns={'index': 'No.'})
143
 
144
+
145
  def inference(
146
  image_path,
147
  conf_threshold,
 
162
 
163
  results = onnx_model(image, conf=conf_threshold, iou=iou_threshold)[0]
164
  detections = sv.Detections.from_ultralytics(results)
165
+ detections = filter_detections(detections)
166
  parsed_detections = parse_detection(detections)
167
  output_dir = "output"
168
  # Check if the output directory exists, clear all the files inside