viraj commited on
Commit
ad595c8
·
1 Parent(s): a31fd6a

Initial Commit

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +199 -0
  3. requirements.txt +5 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ env/
app.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inference_sdk import InferenceHTTPClient
2
+ from PIL import Image, ImageDraw
3
+ import gradio as gr
4
+ import tempfile
5
+ import json
6
+ from gradio import Blocks
7
+
8
+
9
+ def greet(name):
10
+ return "Hello " + name + "!!"
11
+
12
+ # Initialize Roboflow Clients
13
+ ROOM_CLIENT = InferenceHTTPClient(api_url="https://outline.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy")
14
+ DOOR_WINDOW_CLIENT = InferenceHTTPClient(api_url="https://detect.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy")
15
+ color_options = ["Red", "Green", "Blue", "Yellow"]
16
+ layer_options = ["Room Detection", "Doors and Windows Detection"]
17
+
18
+ def apply_zoom(image, zoom_factor):
19
+ width, height = image.size
20
+ new_width = int(width * zoom_factor)
21
+ new_height = int(height * zoom_factor)
22
+ return image.resize((new_width, new_height))
23
+
24
+ def detect_and_draw(image_path, model_id, client, filter_classes=None,color_choice=None):
25
+ result = client.infer(image_path, model_id=model_id)
26
+ with open('result.json', "w") as json_file:
27
+ json.dump(result, json_file, indent=4)
28
+ original_img = Image.open(image_path)
29
+ overlayed_img = original_img.copy()
30
+ draw = ImageDraw.Draw(overlayed_img)
31
+
32
+ # Dictionary to hold counts of all detected classes
33
+ counts = {}
34
+
35
+ for prediction in result.get('predictions', []):
36
+ # Extract class name
37
+ pred_class = prediction.get('class', '').lower()
38
+
39
+ # If filter_classes is provided, skip classes not in the list
40
+ if filter_classes and pred_class not in filter_classes:
41
+ continue
42
+
43
+ # Increment the count for the class in the dictionary
44
+ counts[pred_class] = counts.get(pred_class, 0) + 1
45
+
46
+ # Extract bounding box coordinates
47
+ x = int(prediction['x'] - prediction['width'] / 2)
48
+ y = int(prediction['y'] - prediction['height'] / 2)
49
+ width = int(prediction['width'])
50
+ height = int(prediction['height'])
51
+
52
+ # Draw rectangle
53
+ draw.rectangle([x, y, x + width, y + height], outline=color_choice, width=2)
54
+
55
+ # Add label
56
+ label = f"{pred_class}"
57
+ draw.text((x, y - 10), label, fill=color_choice)
58
+
59
+ return overlayed_img, counts
60
+
61
+ def process_floor_plan(image,zoom_factor,color_choice,selected_layers):
62
+ try:
63
+ # Save the uploaded image to a temporary file
64
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
65
+ image.save(temp_file.name)
66
+ temp_file_path = temp_file.name
67
+
68
+ zoomed_image = apply_zoom(Image.open(temp_file_path), zoom_factor)
69
+ zoomed_image.save(temp_file_path)
70
+
71
+ layers_to_process = []
72
+ # Detect rooms
73
+ room_overlay, room_counts = detect_and_draw(temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"],color_choice=color_choice)
74
+ layers_to_process.append(("Room Detection", room_overlay))
75
+ # Detect doors and windows
76
+ dw_overlay, dw_counts = detect_and_draw(temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"],color_choice=color_choice)
77
+ layers_to_process.append(("Doors and Windows Detection", dw_overlay))
78
+
79
+ outputs = []
80
+ combined_counts = {}
81
+
82
+ # Process each selected layer
83
+ if "Room Detection" in selected_layers:
84
+ room_overlay, room_counts = detect_and_draw(
85
+ temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"], color_choice=color_choice
86
+ )
87
+ outputs.append(("Room Detection", room_overlay))
88
+ combined_counts.update(room_counts)
89
+
90
+ if "Doors and Windows Detection" in selected_layers:
91
+ dw_overlay, dw_counts = detect_and_draw(
92
+ temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"], color_choice=color_choice
93
+ )
94
+ outputs.append(("Doors and Windows Detection", dw_overlay))
95
+ combined_counts.update(dw_counts)
96
+
97
+ return [img[1] for img in outputs], json.dumps(combined_counts, indent=4)
98
+
99
+ except Exception as e:
100
+ print(f"Error processing floor plan: {e}")
101
+ return [], json.dumps({"error": str(e)}, indent=4)
102
+
103
+ measurement_points = []
104
+ def add_measurement_point(x, y):
105
+ global measurement_points
106
+ measurement_points.append((x, y))
107
+ if len(measurement_points) == 2:
108
+ distance = ((measurement_points[1][0] - measurement_points[0][0])**2 +
109
+ (measurement_points[1][1] - measurement_points[0][1])**2)**0.5
110
+ print(f"Distance between points: {distance:.2f} pixels")
111
+
112
+ def clear_measurements():
113
+ global measurement_points
114
+ measurement_points = []
115
+
116
+ actions = ["Upload", "Detect Rooms", "Detect Doors/Windows"]
117
+ action_index = 0
118
+ undo_button = gr.Button("Undo")
119
+ redo_button = gr.Button("Redo")
120
+
121
+ class ActionManager:
122
+ def __init__(self, actions):
123
+ self.actions = actions
124
+ self.current_index = 0
125
+ self.action_log = []
126
+
127
+ def perform_action(self):
128
+ """Perform the next action and log it."""
129
+ self.current_index = (self.current_index + 1) % len(self.actions)
130
+ action = self.actions[self.current_index]
131
+ self.action_log.append(f"Performed: {action}")
132
+ return action
133
+
134
+ def undo_action(self):
135
+ """Undo the last action and log it."""
136
+ self.current_index = (self.current_index - 1) % len(self.actions)
137
+ action = self.actions[self.current_index]
138
+ self.action_log.append(f"Undone: {action}")
139
+ return action
140
+
141
+ def get_action_log(self):
142
+ """Return a formatted log of actions."""
143
+ return "\n".join(self.action_log)
144
+
145
+ action_manager = ActionManager(actions=["Upload", "Detect Rooms", "Detect Doors/Windows"])
146
+
147
+ with gr.Blocks() as demo:
148
+ gr.Markdown("# Advanced Floor Plan Detection")
149
+ gr.Markdown("Upload a floor plan to detect rooms, doors, and windows. Choose detection layers and highlight them with your preferred color.")
150
+
151
+ with gr.Row():
152
+ image_input = gr.Image(type="pil", label="Upload Floor Plan")
153
+ zoom_factor = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Zoom Factor")
154
+ color_choice = gr.Dropdown(choices=["Red", "Green", "Blue", "Yellow"], label="Detection Color")
155
+ selected_layers = gr.Dropdown(choices=["Room Detection", "Doors and Windows Detection"], multiselect=True, label="Select Layers")
156
+
157
+ with gr.Row():
158
+ gallery_output = gr.Gallery(label="Detected Layers")
159
+ detection_counts_output = gr.Text(label="Detection Counts (JSON)")
160
+
161
+ with gr.Row():
162
+ undo_button = gr.Button("Undo")
163
+ redo_button = gr.Button("Redo")
164
+ action_output = gr.Textbox(label="Current Action", value=action_manager.actions[action_manager.current_index], interactive=False)
165
+
166
+ def handle_action(action_type):
167
+ if action_type == "undo":
168
+ return action_manager.undo_action()
169
+ elif action_type == "redo":
170
+ return action_manager.perform_action()
171
+
172
+ undo_button.click(
173
+ lambda: handle_action("undo"),
174
+ inputs=[],
175
+ outputs=action_output
176
+ )
177
+ redo_button.click(
178
+ lambda: handle_action("redo"),
179
+ inputs=[],
180
+ outputs=action_output
181
+ )
182
+
183
+ process_button = gr.Button("Process Floor Plan")
184
+ process_button.click(
185
+ process_floor_plan,
186
+ inputs=[image_input, zoom_factor, color_choice, selected_layers],
187
+ outputs=[gallery_output, detection_counts_output]
188
+ )
189
+
190
+ with gr.Row():
191
+ upload = gr.Image(type="pil", label="Upload Floor Plan")
192
+ detect_button = gr.Button("Detect & Suggest Improvements")
193
+
194
+ with gr.Row():
195
+ detection_output = gr.Gallery(label="Room Detection Results")
196
+ suggestion_output = gr.Textbox(label="Improvement Suggestions", lines=5)
197
+
198
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
199
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ pillow
3
+ inference-sdk
4
+ fastapi
5
+ uvicorn