muhammadsalmanalfaridzi commited on
Commit
bf1fd9f
·
verified ·
1 Parent(s): 1ba1aa6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -96
app.py CHANGED
@@ -1,104 +1,264 @@
1
  import gradio as gr
 
 
 
 
 
 
2
  import numpy as np
3
- from vision_agent.tools import *
4
- from vision_agent.tools import countgd_object_detection
5
- from pillow_heif import register_heif_opener
6
- from typing import Dict
7
-
8
- # Register HEIF opener
9
- register_heif_opener()
10
-
11
- import vision_agent as va
12
-
13
- def analyze_mixed_boxes(image) -> Dict:
14
- """
15
- Analyzes an image containing mixed types of beverages, specifically water bottles and beverage cans.
16
- 1) Loads the image from the provided path.
17
- 2) Uses the 'countgd_object_detection' tool with the prompt 'water bottle, beverage can' to detect items.
18
- 3) Splits detections into a top shelf and bottom shelf by comparing detection center to the image's vertical midpoint.
19
- 4) Calculates how many water bottles and beverage cans are on each shelf and overall, along with average confidence scores.
20
- 5) Overlays bounding boxes on the image to visualize detections, then saves the annotated image.
21
- 6) Returns a dictionary summarizing the distribution of water bottles and beverage cans.
22
-
23
- Parameters:
24
- image (PIL.Image): The uploaded image.
25
-
26
- Returns:
27
- dict: Summary of the analysis with keys:
28
- - total_items (int): total number of detected items
29
- - total_water_bottles (int): total count of detected water bottles
30
- - total_beverage_cans (int): total count of detected beverage cans
31
- - top_shelf (dict): counts of bottles and cans on top shelf
32
- - bottom_shelf (dict): counts of bottles and cans on bottom shelf
33
- - confidence (dict): average confidence scores for bottles and cans
34
- """
35
- # Convert the uploaded image to a numpy array
36
- image = np.array(image)
37
- height, width = image.shape[:2]
38
-
39
- # Detect water bottles and beverage cans
40
- detections = countgd_object_detection("water bottle, beverage can", image)
41
-
42
- # Separate detections into top shelf and bottom shelf
43
- mid_height = height / 2
44
- top_shelf_dets = []
45
- bottom_shelf_dets = []
46
- for det in detections:
47
- cy = ((det["bbox"][1] + det["bbox"][3]) / 2) * height
48
- if cy < mid_height:
49
- top_shelf_dets.append(det)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  else:
51
- bottom_shelf_dets.append(det)
52
 
53
- # Count items by label and calculate average confidence
54
- water_bottles = [det for det in detections if det["label"] == "water bottle"]
55
- beverage_cans = [det for det in detections if det["label"] == "beverage can"]
56
 
57
- avg_bottle_conf = (sum(det["score"] for det in water_bottles) / len(water_bottles)
58
- if water_bottles else 0)
59
- avg_can_conf = (sum(det["score"] for det in beverage_cans) / len(beverage_cans)
60
- if beverage_cans else 0)
 
 
61
 
62
- top_water_bottles = [det for det in top_shelf_dets if det["label"] == "water bottle"]
63
- top_beverage_cans = [det for det in top_shelf_dets if det["label"] == "beverage can"]
64
- bottom_water_bottles = [det for det in bottom_shelf_dets if det["label"] == "water bottle"]
65
- bottom_beverage_cans = [det for det in bottom_shelf_dets if det["label"] == "beverage can"]
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- # Overlay bounding boxes and save the annotated image
68
- annotated_image = overlay_bounding_boxes(image, detections)
69
-
70
- # Convert annotated image back to PIL format for Gradio output
71
- annotated_image_pil = Image.fromarray(annotated_image)
72
-
73
- # Return the result
74
- result = {
75
- "total_items": len(detections),
76
- "total_water_bottles": len(water_bottles),
77
- "total_beverage_cans": len(beverage_cans),
78
- "top_shelf": {
79
- "water_bottles": len(top_water_bottles),
80
- "beverage_cans": len(top_beverage_cans),
81
- },
82
- "bottom_shelf": {
83
- "water_bottles": len(bottom_water_bottles),
84
- "beverage_cans": len(bottom_beverage_cans),
85
- },
86
- "confidence": {
87
- "water_bottles": round(avg_bottle_conf, 2),
88
- "beverage_cans": round(avg_can_conf, 2),
89
- },
90
- "annotated_image": annotated_image_pil # return annotated image for display
91
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- return result
94
-
95
- # Gradio Interface
96
- iface = gr.Interface(
97
- fn=analyze_mixed_boxes,
98
- inputs=gr.Image(type="pil"), # allows image upload
99
- outputs=[gr.JSON(), gr.Image(type="pil")], # display result and annotated image
100
- title="Beverage Detection Analysis",
101
- description="Upload an image containing water bottles and beverage cans, and the tool will analyze the distribution on shelves and display an annotated image.",
102
- )
103
-
104
- iface.launch()
 
 
 
 
1
  import gradio as gr
2
+ from dotenv import load_dotenv
3
+ from roboflow import Roboflow
4
+ import tempfile
5
+ import os
6
+ import requests
7
+ import cv2
8
  import numpy as np
9
+ from dds_cloudapi_sdk import Config, Client
10
+ from dds_cloudapi_sdk.tasks.dinox import DinoxTask
11
+ from dds_cloudapi_sdk.tasks.types import DetectionTarget
12
+ from dds_cloudapi_sdk import TextPrompt
13
+ import subprocess
14
+
15
+ # ========== Konfigurasi ==========
16
+ load_dotenv()
17
+
18
+ # Roboflow Config
19
+ rf_api_key = os.getenv("ROBOFLOW_API_KEY")
20
+ workspace = os.getenv("ROBOFLOW_WORKSPACE")
21
+ project_name = os.getenv("ROBOFLOW_PROJECT")
22
+ model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION"))
23
+
24
+ # DINO-X Config
25
+ DINOX_API_KEY = os.getenv("DINO_X_API_KEY")
26
+ DINOX_PROMPT = "beverage . bottle . cans . mixed box" # Customize sesuai produk kompetitor : food . drink
27
+
28
+ # Inisialisasi Model
29
+ rf = Roboflow(api_key=rf_api_key)
30
+ project = rf.workspace(workspace).project(project_name)
31
+ yolo_model = project.version(model_version).model
32
+
33
+ dinox_config = Config(DINOX_API_KEY)
34
+ dinox_client = Client(dinox_config)
35
+
36
+ # ========== Fungsi Deteksi Kombinasi ==========
37
+ def detect_combined(image):
38
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
39
+ image.save(temp_file, format="JPEG")
40
+ temp_path = temp_file.name
41
+
42
+ try:
43
+ # ========== [1] YOLO: Deteksi Produk Nestlé (Per Class) ==========
44
+ yolo_pred = yolo_model.predict(temp_path, confidence=50, overlap=80).json()
45
+
46
+ # Hitung per class Nestlé
47
+ nestle_class_count = {}
48
+ nestle_boxes = []
49
+ for pred in yolo_pred['predictions']:
50
+ class_name = pred['class']
51
+ nestle_class_count[class_name] = nestle_class_count.get(class_name, 0) + 1
52
+ nestle_boxes.append((pred['x'], pred['y'], pred['width'], pred['height']))
53
+
54
+ total_nestle = sum(nestle_class_count.values())
55
+
56
+ # ========== [2] DINO-X: Deteksi Kompetitor ==========
57
+ image_url = dinox_client.upload_file(temp_path)
58
+ task = DinoxTask(
59
+ image_url=image_url,
60
+ prompts=[TextPrompt(text=DINOX_PROMPT)],
61
+ bbox_threshold=0.4,
62
+ targets=[DetectionTarget.BBox]
63
+ )
64
+ dinox_client.run_task(task)
65
+ dinox_pred = task.result.objects
66
+
67
+ # Filter & Hitung Kompetitor
68
+ competitor_class_count = {}
69
+ competitor_boxes = []
70
+ for obj in dinox_pred:
71
+ dinox_box = obj.bbox
72
+ # Filter objek yang sudah terdeteksi oleh YOLO (Overlap detection)
73
+ if not is_overlap(dinox_box, nestle_boxes): # Ignore if overlap with YOLO detections
74
+ class_name = obj.category.strip().lower() # Normalisasi nama kelas
75
+ competitor_class_count[class_name] = competitor_class_count.get(class_name, 0) + 1
76
+ competitor_boxes.append({
77
+ "class": class_name,
78
+ "box": dinox_box,
79
+ "confidence": obj.score
80
+ })
81
+
82
+ total_competitor = sum(competitor_class_count.values())
83
+
84
+ # ========== [3] Format Output ==========
85
+ result_text = "Product Nestle\n\n"
86
+ for class_name, count in nestle_class_count.items():
87
+ result_text += f"{class_name}: {count}\n"
88
+ result_text += f"\nTotal Products Nestle: {total_nestle}\n\n"
89
+
90
+ #result_text += "Competitor Products\n\n"
91
+ if competitor_class_count:
92
+ result_text += f"Total Unclassified Products: {total_competitor}\n" # Hanya total, tidak per kelas
93
  else:
94
+ result_text += "No Unclassified Products detected\n"
95
 
96
+ # ========== [4] Visualisasi ==========
97
+ img = cv2.imread(temp_path)
 
98
 
99
+ # Nestlé (Hijau)
100
+ for pred in yolo_pred['predictions']:
101
+ x, y, w, h = pred['x'], pred['y'], pred['width'], pred['height']
102
+ cv2.rectangle(img, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2)
103
+ cv2.putText(img, pred['class'], (int(x-w/2), int(y-h/2-10)),
104
+ cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,255,0), 3)
105
 
106
+ # Kompetitor (Merah) dengan nama 'unclassified'
107
+ for comp in competitor_boxes:
108
+ x1, y1, x2, y2 = comp['box']
109
+
110
+ # Define a list of target classes to rename
111
+ unclassified_classes = ["beverage", "cans", "bottle", "mixed box"]
112
+
113
+ # Normalize the class name to be case-insensitive and check if it's in the unclassified list
114
+ display_name = "unclassified" if any(class_name in comp['class'].lower() for class_name in unclassified_classes) else comp['class']
115
+
116
+ cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)
117
+ cv2.putText(img, f"{display_name} {comp['confidence']:.2f}",
118
+ (int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
119
+
120
+ output_path = "/tmp/combined_output.jpg"
121
+ cv2.imwrite(output_path, img)
122
 
123
+ return output_path, result_text
124
+
125
+ except Exception as e:
126
+ return temp_path, f"Error: {str(e)}"
127
+ finally:
128
+ os.remove(temp_path)
129
+
130
+ def is_overlap(box1, boxes2, threshold=0.3):
131
+ # Fungsi untuk deteksi overlap bounding box
132
+ x1_min, y1_min, x1_max, y1_max = box1
133
+ for b2 in boxes2:
134
+ x2, y2, w2, h2 = b2
135
+ x2_min = x2 - w2/2
136
+ x2_max = x2 + w2/2
137
+ y2_min = y2 - h2/2
138
+ y2_max = y2 + h2/2
139
+
140
+ # Hitung area overlap
141
+ dx = min(x1_max, x2_max) - max(x1_min, x2_min)
142
+ dy = min(y1_max, y2_max) - max(y1_min, y2_min)
143
+ if (dx >= 0) and (dy >= 0):
144
+ area_overlap = dx * dy
145
+ area_box1 = (x1_max - x1_min) * (y1_max - y1_min)
146
+ if area_overlap / area_box1 > threshold:
147
+ return True
148
+ return False
149
+
150
+ # ========== Fungsi untuk Deteksi Video ==========
151
+
152
+ def convert_video_to_mp4(input_path, output_path):
153
+ try:
154
+ subprocess.run(['ffmpeg', '-i', input_path, '-vcodec', 'libx264', '-acodec', 'aac', output_path], check=True)
155
+ return output_path
156
+ except subprocess.CalledProcessError as e:
157
+ return None, f"Error converting video: {e}"
158
+
159
+ def detect_objects_in_video(video_path):
160
+ temp_output_path = "/tmp/output_video.mp4"
161
+ temp_frames_dir = tempfile.mkdtemp()
162
+ frame_count = 0
163
+ previous_detections = {} # For storing previous frame's object detections
164
+
165
+ try:
166
+ # Convert video to MP4 if necessary
167
+ if not video_path.endswith(".mp4"):
168
+ video_path, err = convert_video_to_mp4(video_path, temp_output_path)
169
+ if not video_path:
170
+ return None, f"Video conversion error: {err}"
171
+
172
+ # Read video and process frames
173
+ video = cv2.VideoCapture(video_path)
174
+ frame_rate = int(video.get(cv2.CAP_PROP_FPS))
175
+ frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
176
+ frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
177
+ frame_size = (frame_width, frame_height)
178
+
179
+ # VideoWriter for output video
180
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
181
+ output_video = cv2.VideoWriter(temp_output_path, fourcc, frame_rate, frame_size)
182
+
183
+ while True:
184
+ ret, frame = video.read()
185
+ if not ret:
186
+ break
187
+
188
+ # Save frame temporarily for predictions
189
+ frame_path = os.path.join(temp_frames_dir, f"frame_{frame_count}.jpg")
190
+ cv2.imwrite(frame_path, frame)
191
+
192
+ # Process predictions for the current frame
193
+ predictions = yolo_model.predict(frame_path, confidence=50, overlap=80).json()
194
+
195
+ # Track current frame detections
196
+ current_detections = {}
197
+ for prediction in predictions['predictions']:
198
+ class_name = prediction['class']
199
+ x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height']
200
+ # Generate a unique ID for each detection based on the bounding box
201
+ object_id = f"{class_name}_{x}_{y}_{w}_{h}"
202
+
203
+ # Track each detected object individually
204
+ if object_id not in current_detections:
205
+ current_detections[object_id] = class_name
206
+
207
+ # Draw bounding box for detected objects
208
+ cv2.rectangle(frame, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (0,255,0), 2)
209
+ cv2.putText(frame, class_name, (int(x-w/2), int(y-h/2-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
210
+
211
+ # Update counts for objects
212
+ object_counts = {}
213
+ for detection_id in current_detections.keys():
214
+ class_name = current_detections[detection_id]
215
+ object_counts[class_name] = object_counts.get(class_name, 0) + 1
216
+
217
+ # Generate display text for counts
218
+ count_text = ""
219
+ total_product_count = 0
220
+ for class_name, count in object_counts.items():
221
+ count_text += f"{class_name}: {count}\n"
222
+ total_product_count += count
223
+ count_text += f"\nTotal Product: {total_product_count}"
224
+
225
+ # Overlay the counts text onto the frame
226
+ y_offset = 20
227
+ for line in count_text.split("\n"):
228
+ cv2.putText(frame, line, (10, y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
229
+ y_offset += 30 # Move down for next line
230
+
231
+ # Write processed frame to output video
232
+ output_video.write(frame)
233
+ frame_count += 1
234
+
235
+ # Update previous_detections for the next frame
236
+ previous_detections = current_detections
237
+
238
+ video.release()
239
+ output_video.release()
240
+
241
+ return temp_output_path
242
+
243
+ except Exception as e:
244
+ return None, f"An error occurred: {e}"
245
+
246
+ # ========== Gradio Interface ==========
247
+ with gr.Blocks(theme=gr.themes.Base(primary_hue="teal", secondary_hue="teal", neutral_hue="slate")) as iface:
248
+ gr.Markdown("""<div style="text-align: center;"><h1>NESTLE - STOCK COUNTING</h1></div>""")
249
 
250
+ with gr.Row():
251
+ with gr.Column():
252
+ input_image = gr.Image(type="pil", label="Input Image")
253
+ detect_image_button = gr.Button("Detect Image")
254
+ output_image = gr.Image(label="Detect Object")
255
+ output_text = gr.Textbox(label="Counting Object")
256
+ detect_image_button.click(fn=detect_combined, inputs=input_image, outputs=[output_image, output_text])
257
+
258
+ with gr.Column():
259
+ input_video = gr.Video(label="Input Video")
260
+ detect_video_button = gr.Button("Detect Video")
261
+ output_video = gr.Video(label="Output Video")
262
+ detect_video_button.click(fn=detect_objects_in_video, inputs=input_video, outputs=[output_video])
263
+
264
+ iface.launch()