Shakir60 commited on
Commit
aabfb0a
·
verified ·
1 Parent(s): 1679b1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -380
app.py CHANGED
@@ -6,416 +6,359 @@ from sentence_transformers import SentenceTransformer
6
  from PIL import Image
7
  import torch
8
  import numpy as np
9
- from typing import List, Dict
10
  import faiss
11
  import json
 
 
 
 
 
 
12
 
13
- # Initialize sentence transformer for embeddings
14
- @st.cache_resource
15
- def init_embedding_model():
16
- return SentenceTransformer('all-MiniLM-L6-v2')
17
 
18
- # Initialize Groq client
19
- @st.cache_resource
20
- def init_groq_client():
21
- return Groq(api_key=os.environ.get("GROQ_API_KEY"))
22
- class RAGSystem:
23
- def __init__(self):
24
- self.embedding_model = init_embedding_model()
25
- self.knowledge_base = self.load_knowledge_base()
26
- self.vector_store = self.create_vector_store()
27
-
28
- def load_knowledge_base(self) -> List[Dict]:
29
- """Load and preprocess knowledge base into a list of documents"""
30
- # Your existing knowledge base dictionary
31
- kb = {
32
- "spalling": [
33
- {
34
- "severity": "Critical",
35
- "description": "Severe concrete spalling with exposed reinforcement and section loss",
36
- "repair_method": [
37
- "Install temporary support",
38
- "Remove deteriorated concrete",
39
- "Clean and treat reinforcement",
40
- "Apply corrosion inhibitor",
41
- "Apply bonding agent",
42
- "High-strength repair mortar",
43
- "Surface treatment and waterproofing"
44
- ],
45
- "estimated_cost": "Very High ($15,000+)",
46
- "timeframe": "3-4 weeks",
47
- "location": "Primary structural elements",
48
- "required_expertise": "Structural Engineer + Specialist Contractor",
49
- "immediate_action": "Evacuate area, install temporary support, prevent access",
50
- "prevention": "Regular inspections, waterproofing, chloride protection",
51
- "testing_required": ["Core testing", "Reinforcement scanning", "Chloride testing"],
52
- "common_causes": [
53
- "Reinforcement corrosion",
54
- "Freeze-thaw cycles",
55
- "Poor concrete cover",
56
- "Chemical attack"
57
- ],
58
- "safety_considerations": [
59
- "Risk of structural failure",
60
- "Falling concrete hazard",
61
- "Worker safety during repairs"
62
- ]
63
- },
64
- {
65
- "severity": "Moderate",
66
- "description": "Surface spalling without exposed reinforcement",
67
- "repair_method": [
68
- "Remove loose concrete",
69
- "Surface preparation",
70
- "Apply repair mortar",
71
- "Surface treatment"
72
- ],
73
- "estimated_cost": "Medium ($5,000-$10,000)",
74
- "timeframe": "1-2 weeks",
75
- "location": "Non-structural elements",
76
- "required_expertise": "Concrete Repair Specialist",
77
- "immediate_action": "Remove loose material, protect from water ingress",
78
- "prevention": "Surface sealers, proper drainage",
79
- "testing_required": ["Surface adhesion testing", "Moisture testing"],
80
- "common_causes": [
81
- "Surface carbonation",
82
- "Impact damage",
83
- "Poor curing"
84
- ],
85
- "safety_considerations": [
86
- "Minor falling debris risk",
87
- "Dust control during repairs"
88
- ]
89
- }
90
- ],
91
- "reinforcement_corrosion": [
92
- {
93
- "severity": "Critical",
94
- "description": "Severe corrosion with >30% section loss",
95
- "repair_method": [
96
- "Structural support installation",
97
- "Concrete removal around reinforcement",
98
- "Reinforcement replacement",
99
- "Corrosion protection application",
100
- "High-strength concrete repair",
101
- "Cathodic protection installation"
102
- ],
103
- "estimated_cost": "Critical ($20,000+)",
104
- "timeframe": "4-6 weeks",
105
- "location": "Load-bearing elements",
106
- "required_expertise": "Senior Structural Engineer",
107
- "immediate_action": "Immediate evacuation, emergency shoring",
108
- "prevention": "Waterproofing, cathodic protection",
109
- "testing_required": [
110
- "Half-cell potential survey",
111
- "Concrete resistivity testing",
112
- "Chloride analysis",
113
- "Carbonation testing"
114
- ],
115
- "common_causes": [
116
- "Chloride contamination",
117
- "Carbonation",
118
- "Stray electrical currents",
119
- "Poor concrete quality"
120
- ],
121
- "safety_considerations": [
122
- "Structural collapse risk",
123
- "Electrical hazards during testing",
124
- "Confined space entry"
125
- ]
126
- }
127
- ],
128
- "structural_cracks": [
129
- {
130
- "severity": "High",
131
- "description": "Active structural cracks >5mm width",
132
- "repair_method": [
133
- "Structural analysis",
134
- "Crack monitoring",
135
- "Epoxy injection",
136
- "Carbon fiber reinforcement",
137
- "Load path modification"
138
- ],
139
- "estimated_cost": "High ($10,000-$20,000)",
140
- "timeframe": "2-4 weeks",
141
- "location": "Primary structural elements",
142
- "required_expertise": "Structural Engineer",
143
- "immediate_action": "Install crack monitors, restrict loading",
144
- "prevention": "Proper design, joint maintenance",
145
- "testing_required": [
146
- "Crack movement monitoring",
147
- "Load testing",
148
- "Concrete strength testing"
149
- ],
150
- "common_causes": [
151
- "Overloading",
152
- "Foundation settlement",
153
- "Thermal movements",
154
- "Design deficiencies"
155
- ],
156
- "safety_considerations": [
157
- "Structural stability",
158
- "Water infiltration",
159
- "Working at height"
160
- ]
161
- }
162
- ],
163
- "water_damage": [
164
- {
165
- "severity": "Medium",
166
- "description": "Active water infiltration with deterioration",
167
- "repair_method": [
168
- "Water source identification",
169
- "Drainage improvement",
170
- "Waterproofing membrane installation",
171
- "Joint sealing",
172
- "Surface treatment"
173
- ],
174
- "estimated_cost": "Medium ($5,000-$15,000)",
175
- "timeframe": "1-3 weeks",
176
- "location": "Various locations",
177
- "required_expertise": "Waterproofing Specialist",
178
- "immediate_action": "Water diversion, dehumidification",
179
- "prevention": "Regular maintenance, proper drainage",
180
- "testing_required": [
181
- "Moisture mapping",
182
- "Drainage assessment",
183
- "Permeability testing"
184
- ],
185
- "common_causes": [
186
- "Failed waterproofing",
187
- "Poor drainage",
188
- "Joint failure",
189
- "Condensation"
190
- ],
191
- "safety_considerations": [
192
- "Slip hazards",
193
- "Electrical safety",
194
- "Mold growth"
195
- ]
196
- }
197
- ],
198
- "surface_deterioration": [
199
- {
200
- "severity": "Low",
201
- "description": "Surface scaling and deterioration",
202
- "repair_method": [
203
- "Surface cleaning",
204
- "Repair material application",
205
- "Surface treatment",
206
- "Protective coating"
207
- ],
208
- "estimated_cost": "Low ($2,000-$5,000)",
209
- "timeframe": "3-5 days",
210
- "location": "Exposed surfaces",
211
- "required_expertise": "Concrete Repair Technician",
212
- "immediate_action": "Clean and protect surface",
213
- "prevention": "Regular maintenance, surface protection",
214
- "testing_required": [
215
- "Surface strength testing",
216
- "Coating adhesion tests"
217
- ],
218
- "common_causes": [
219
- "Freeze-thaw damage",
220
- "Chemical exposure",
221
- "Poor finishing",
222
- "Abrasion"
223
- ],
224
- "safety_considerations": [
225
- "Dust control",
226
- "Chemical handling",
227
- "PPE requirements"
228
- ]
229
- }
230
- ],
231
- "alkali_silica_reaction": [
232
- {
233
- "severity": "High",
234
- "description": "Concrete expansion and map cracking due to ASR",
235
- "repair_method": [
236
- "Expansion monitoring",
237
- "Moisture control",
238
- "Crack sealing",
239
- "Surface treatment",
240
- "Structural strengthening"
241
- ],
242
- "estimated_cost": "High ($15,000-$25,000)",
243
- "timeframe": "3-5 weeks",
244
- "location": "Concrete elements",
245
- "required_expertise": "Materials Engineer + Structural Engineer",
246
- "immediate_action": "Monitor expansion, control moisture",
247
- "prevention": "Proper aggregate selection, pozzolans",
248
- "testing_required": [
249
- "Petrographic analysis",
250
- "Expansion testing",
251
- "Humidity monitoring"
252
- ],
253
- "common_causes": [
254
- "Reactive aggregates",
255
- "High alkali cement",
256
- "Moisture presence",
257
- "Temperature cycles"
258
- ],
259
- "safety_considerations": [
260
- "Progressive deterioration",
261
- "Structural integrity",
262
- "Long-term monitoring"
263
- ]
264
- }
265
- ]
266
  }
267
-
268
- # Convert nested knowledge base into flat documents
269
- documents = []
270
- for category, items in kb.items():
271
- for item in items:
272
- # Create a text representation of the document
273
- doc_text = f"Category: {category}\n"
274
- for key, value in item.items():
275
- if isinstance(value, list):
276
- doc_text += f"{key}: {', '.join(value)}\n"
277
- else:
278
- doc_text += f"{key}: {value}\n"
279
- documents.append({
280
- "text": doc_text,
281
- "metadata": {"category": category}
282
- })
283
-
284
- return documents
285
 
286
- def create_vector_store(self):
287
- """Create FAISS vector store from knowledge base"""
288
- # Generate embeddings for all documents
289
- texts = [doc["text"] for doc in self.knowledge_base]
290
- embeddings = self.embedding_model.encode(texts)
291
-
292
- # Initialize FAISS index
293
- dimension = embeddings.shape[1]
294
- index = faiss.IndexFlatL2(dimension)
295
- index.add(np.array(embeddings).astype('float32'))
296
-
297
- return index
298
-
299
- def create_vector_store(self):
300
- """Create FAISS vector store from knowledge base"""
301
- # Generate embeddings for all documents
302
- texts = [doc["text"] for doc in self.knowledge_base]
303
- embeddings = self.embedding_model.encode(texts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
- # Initialize FAISS index
306
- dimension = embeddings.shape[1]
307
- index = faiss.IndexFlatL2(dimension)
308
- index.add(np.array(embeddings).astype('float32'))
 
309
 
310
- return index
 
 
311
 
312
- def get_relevant_context(self, query: str, k: int = 3) -> str:
313
- """Retrieve relevant context based on query"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  # Generate query embedding
315
  query_embedding = self.embedding_model.encode([query])
316
 
317
  # Search for similar documents
318
  D, I = self.vector_store.search(np.array(query_embedding).astype('float32'), k)
319
 
320
- # Combine relevant documents into context
321
- context = "\n\n".join([self.knowledge_base[i]["text"] for i in I[0]])
322
- return context
323
-
324
- def get_groq_response(query: str, context: str) -> str:
325
- """Get response from Groq LLM"""
326
- client = init_groq_client()
327
- try:
328
- prompt = f"""Based on the following context about construction defects, please answer the question.
329
- Context:
330
- {context}
331
- Question: {query}
332
- Please provide a detailed and specific answer based on the given context."""
333
-
334
- response = client.chat.completions.create(
335
- messages=[
336
- {
337
- "role": "system",
338
- "content": "You are a construction defect analysis expert. Provide detailed, accurate answers based on the given context."
339
- },
340
- {
341
- "role": "user",
342
- "content": prompt
343
- }
344
- ],
345
- model="llama-3.3-70b-versatile",
346
- )
347
- return response.choices[0].message.content
348
- except Exception as e:
349
- return f"Error: {str(e)}"
350
 
351
- ef main():
352
  st.set_page_config(
353
- page_title="Construction Defect RAG Analyzer",
354
  page_icon="🏗️",
355
  layout="wide"
356
  )
357
 
358
- st.title("🏗️ Construction Defect RAG Analyzer")
359
 
360
- # Initialize RAG system
361
  if 'rag_system' not in st.session_state:
362
- st.session_state.rag_system = RAGSystem()
 
 
363
 
364
- # File upload for image analysis
365
- uploaded_file = st.file_uploader(
366
- "Upload a construction image",
367
- type=['jpg', 'jpeg', 'png']
368
- )
 
 
 
 
 
 
 
 
369
 
370
- # Query input
371
- user_query = st.text_input("Ask a question about construction defects:")
 
 
 
 
 
 
 
 
 
 
 
372
 
373
- if user_query:
374
- with st.spinner("Processing query..."):
375
- # Get relevant context using RAG
376
- context = st.session_state.rag_system.get_relevant_context(user_query)
377
 
378
- # Debug view of retrieved context
379
- if st.checkbox("Show retrieved context"):
380
- st.subheader("Retrieved Context")
381
- st.text(context)
382
 
383
- # Get response from Groq
384
- st.subheader("AI Assistant Response")
385
- response = get_groq_response(user_query, context)
386
- st.write(response)
387
-
388
- if uploaded_file:
389
- image = Image.open(uploaded_file)
390
- st.image(image, caption="Uploaded Image")
391
-
392
- # Combine image analysis with RAG
393
- col1, col2 = st.columns(2)
394
-
395
- with col1:
396
- st.subheader("Image Analysis")
397
- # Image analysis placeholder
398
- st.info("Image analysis results would appear here")
399
-
400
- with col2:
401
- st.subheader("AI Assistant Response")
402
- if user_query: # Only show response if there's a query
403
- # Get relevant context from knowledge base
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  context = st.session_state.rag_system.get_relevant_context(user_query)
 
 
 
 
405
 
406
- # Get response from Groq
407
- with st.spinner("Getting AI response..."):
408
- response = get_groq_response(user_query, context)
409
- st.write(response)
410
-
411
- # Display knowledge base sections
412
- if st.checkbox("Show Knowledge Base"):
413
- st.subheader("Available Knowledge Base")
414
- kb_data = st.session_state.rag_system.knowledge_base
415
- for doc in kb_data:
416
- category = doc["metadata"]["category"]
417
- with st.expander(category.title()):
418
- st.text(doc["text"])
419
 
420
  if __name__ == "__main__":
421
  main()
 
6
  from PIL import Image
7
  import torch
8
  import numpy as np
9
+ from typing import List, Dict, Tuple, Optional, Any
10
  import faiss
11
  import json
12
+ import torchvision.transforms.functional as TF
13
+ from torchvision import transforms
14
+ import cv2
15
+ import pandas as pd
16
+ from datetime import datetime
17
+ import logging
18
 
19
+ # Setup logging
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger(__name__)
 
22
 
23
+ class ConfigManager:
24
+ """Manages configuration settings for the application"""
25
+ DEFAULT_CONFIG = {
26
+ "model_settings": {
27
+ "vit_model": "google/vit-base-patch16-224",
28
+ "sentence_transformer": "all-MiniLM-L6-v2",
29
+ "groq_model": "llama-3.3-70b-versatile"
30
+ },
31
+ "analysis_settings": {
32
+ "confidence_threshold": 0.5,
33
+ "max_defects": 3,
34
+ "heatmap_intensity": 0.7
35
+ },
36
+ "rag_settings": {
37
+ "num_relevant_docs": 3,
38
+ "similarity_threshold": 0.75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  }
40
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ @staticmethod
43
+ def load_config():
44
+ """Load configuration with fallback to defaults"""
45
+ try:
46
+ if os.path.exists('config.json'):
47
+ with open('config.json', 'r') as f:
48
+ config = json.load(f)
49
+ return {**ConfigManager.DEFAULT_CONFIG, **config}
50
+ except Exception as e:
51
+ logger.warning(f"Error loading config: {e}")
52
+ return ConfigManager.DEFAULT_CONFIG
53
+
54
+ config = ConfigManager.load_config()
55
+
56
+ class ImageAnalyzer:
57
+ def __init__(self):
58
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
+ self.config = config["model_settings"]
60
+ self.analysis_config = config["analysis_settings"]
61
+ self.defect_classes = [
62
+ "spalling", "reinforcement_corrosion", "structural_cracks",
63
+ "water_damage", "surface_deterioration", "alkali_silica_reaction",
64
+ "concrete_delamination", "honeycomb", "scaling",
65
+ "efflorescence", "joint_deterioration", "carbonation"
66
+ ]
67
+ self.initialize_models()
68
+ self.history = []
69
+
70
+ @st.cache_resource
71
+ def initialize_models(self):
72
+ """Initialize all required models"""
73
+ try:
74
+ # Initialize ViT model
75
+ self.model = ViTForImageClassification.from_pretrained(
76
+ self.config["vit_model"],
77
+ num_labels=len(self.defect_classes),
78
+ ignore_mismatched_sizes=True
79
+ ).to(self.device)
80
+
81
+ # Initialize image processor
82
+ self.processor = ViTImageProcessor.from_pretrained(self.config["vit_model"])
83
+
84
+ # Initialize transformations pipeline
85
+ self.transforms = self._setup_transforms()
86
+
87
+ return True
88
+ except Exception as e:
89
+ logger.error(f"Model initialization error: {e}")
90
+ return False
91
+
92
+ def _setup_transforms(self):
93
+ """Setup image transformation pipeline"""
94
+ return transforms.Compose([
95
+ transforms.Resize((224, 224)),
96
+ transforms.ToTensor(),
97
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
98
+ std=[0.229, 0.224, 0.225]),
99
+ transforms.RandomAdjustSharpness(2),
100
+ transforms.ColorJitter(brightness=0.2, contrast=0.2)
101
+ ])
102
+
103
+ def preprocess_image(self, image: Image.Image) -> Dict[str, Any]:
104
+ """Enhanced image preprocessing with multiple analyses"""
105
+ try:
106
+ # Convert to RGB if necessary
107
+ if image.mode != 'RGB':
108
+ image = image.convert('RGB')
109
+
110
+ # Basic image statistics
111
+ img_array = np.array(image)
112
+ stats = {
113
+ "mean_brightness": np.mean(img_array),
114
+ "std_brightness": np.std(img_array),
115
+ "size": image.size,
116
+ "aspect_ratio": image.size[0] / image.size[1]
117
+ }
118
+
119
+ # Edge detection for crack analysis
120
+ gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
121
+ edges = cv2.Canny(gray, 100, 200)
122
+ stats["edge_density"] = np.mean(edges > 0)
123
+
124
+ # Color analysis for rust detection
125
+ hsv = cv2.cvtColor(img_array, cv2.COLOR_RGB2HSV)
126
+ rust_mask = cv2.inRange(hsv, np.array([0, 50, 50]), np.array([30, 255, 255]))
127
+ stats["rust_percentage"] = np.mean(rust_mask > 0)
128
+
129
+ # Transform for model
130
+ model_input = self.transforms(image).unsqueeze(0).to(self.device)
131
+
132
+ return {
133
+ "model_input": model_input,
134
+ "stats": stats,
135
+ "edges": edges,
136
+ "rust_mask": rust_mask
137
+ }
138
+ except Exception as e:
139
+ logger.error(f"Preprocessing error: {e}")
140
+ return None
141
+
142
+ def detect_defects(self, image: Image.Image) -> Dict[str, Any]:
143
+ """Enhanced defect detection with multiple analysis methods"""
144
+ try:
145
+ # Preprocess image
146
+ proc_data = self.preprocess_image(image)
147
+ if proc_data is None:
148
+ return None
149
+
150
+ # Model prediction
151
+ with torch.no_grad():
152
+ outputs = self.model(proc_data["model_input"])
153
+
154
+ # Get probabilities
155
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=1)
156
+
157
+ # Convert to dictionary
158
+ defect_probs = {
159
+ self.defect_classes[i]: float(probabilities[0][i])
160
+ for i in range(len(self.defect_classes))
161
+ }
162
+
163
+ # Generate attention heatmap
164
+ attention_weights = outputs.attentions[-1].mean(dim=1)[0] if hasattr(outputs, 'attentions') else None
165
+ heatmap = self.generate_heatmap(attention_weights, image.size) if attention_weights is not None else None
166
+
167
+ # Additional analysis based on image statistics
168
+ additional_analysis = self.analyze_image_statistics(proc_data["stats"])
169
+
170
+ # Combine all results
171
+ result = {
172
+ "defect_probabilities": defect_probs,
173
+ "heatmap": heatmap,
174
+ "image_statistics": proc_data["stats"],
175
+ "additional_analysis": additional_analysis,
176
+ "edge_detection": proc_data["edges"],
177
+ "rust_detection": proc_data["rust_mask"],
178
+ "timestamp": datetime.now().isoformat()
179
+ }
180
+
181
+ # Save to history
182
+ self.history.append(result)
183
+
184
+ return result
185
+ except Exception as e:
186
+ logger.error(f"Defect detection error: {e}")
187
+ return None
188
+
189
+ def analyze_image_statistics(self, stats: Dict) -> Dict[str, Any]:
190
+ """Analyze image statistics for additional insights"""
191
+ analysis = {}
192
 
193
+ # Brightness analysis
194
+ if stats["mean_brightness"] < 50:
195
+ analysis["lighting_condition"] = "Poor lighting - may affect accuracy"
196
+ elif stats["mean_brightness"] > 200:
197
+ analysis["lighting_condition"] = "Overexposed - may affect accuracy"
198
 
199
+ # Edge density analysis
200
+ if stats["edge_density"] > 0.1:
201
+ analysis["crack_likelihood"] = "High crack probability based on edge detection"
202
 
203
+ # Rust analysis
204
+ if stats["rust_percentage"] > 0.05:
205
+ analysis["corrosion_indicator"] = "Possible corrosion detected"
206
+
207
+ return analysis
208
+
209
+ def generate_heatmap(self, attention_weights: torch.Tensor, image_size: Tuple[int, int]) -> np.ndarray:
210
+ """Generate enhanced attention heatmap"""
211
+ try:
212
+ if attention_weights is None:
213
+ return None
214
+
215
+ # Process attention weights
216
+ heatmap = attention_weights.cpu().numpy()
217
+ heatmap = cv2.resize(heatmap, image_size)
218
+
219
+ # Enhanced normalization
220
+ heatmap = np.maximum(heatmap, 0)
221
+ heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min() + 1e-8)
222
+
223
+ # Apply gamma correction
224
+ gamma = self.analysis_config["heatmap_intensity"]
225
+ heatmap = np.power(heatmap, gamma)
226
+
227
+ # Apply colormap
228
+ heatmap = (heatmap * 255).astype(np.uint8)
229
+ heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
230
+
231
+ return heatmap
232
+ except Exception as e:
233
+ logger.error(f"Heatmap generation error: {e}")
234
+ return None
235
+
236
+ class EnhancedRAGSystem(RAGSystem):
237
+ """Enhanced RAG system with additional features"""
238
+ def __init__(self):
239
+ super().__init__()
240
+ self.config = config["rag_settings"]
241
+ self.query_history = []
242
+
243
+ def get_relevant_context(self, query: str, k: int = None) -> str:
244
+ """Enhanced context retrieval with debugging info"""
245
+ if k is None:
246
+ k = self.config["num_relevant_docs"]
247
+
248
+ # Log query
249
+ self.query_history.append({
250
+ "timestamp": datetime.now().isoformat(),
251
+ "query": query
252
+ })
253
+
254
  # Generate query embedding
255
  query_embedding = self.embedding_model.encode([query])
256
 
257
  # Search for similar documents
258
  D, I = self.vector_store.search(np.array(query_embedding).astype('float32'), k)
259
 
260
+ # Filter by similarity threshold
261
+ relevant_docs = [
262
+ self.knowledge_base[i]["text"]
263
+ for i, dist in zip(I[0], D[0])
264
+ if dist < self.config["similarity_threshold"]
265
+ ]
266
+
267
+ return "\n\n".join(relevant_docs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
+ def main():
270
  st.set_page_config(
271
+ page_title="Enhanced Construction Defect Analyzer",
272
  page_icon="🏗️",
273
  layout="wide"
274
  )
275
 
276
+ st.title("🏗️ Advanced Construction Defect Analysis System")
277
 
278
+ # Initialize systems
279
  if 'rag_system' not in st.session_state:
280
+ st.session_state.rag_system = EnhancedRAGSystem()
281
+ if 'image_analyzer' not in st.session_state:
282
+ st.session_state.image_analyzer = ImageAnalyzer()
283
 
284
+ # Sidebar for settings and history
285
+ with st.sidebar:
286
+ st.header("Settings & History")
287
+ show_debug = st.checkbox("Show Debug Information")
288
+ confidence_threshold = st.slider(
289
+ "Confidence Threshold",
290
+ min_value=0.0,
291
+ max_value=1.0,
292
+ value=config["analysis_settings"]["confidence_threshold"]
293
+ )
294
+
295
+ if st.button("View Analysis History"):
296
+ st.write("Recent Analyses:", st.session_state.image_analyzer.history[-5:])
297
 
298
+ # Main interface
299
+ col1, col2 = st.columns([2, 3])
300
+
301
+ with col1:
302
+ uploaded_file = st.file_uploader(
303
+ "Upload a construction image",
304
+ type=['jpg', 'jpeg', 'png']
305
+ )
306
+
307
+ user_query = st.text_input(
308
+ "Ask a question about construction defects:",
309
+ help="Enter your question about specific defects or general construction issues"
310
+ )
311
 
312
+ with col2:
313
+ if uploaded_file:
314
+ image = Image.open(uploaded_file)
 
315
 
316
+ # Create tabs for different views
317
+ tabs = st.tabs(["Original", "Analysis", "Details"])
 
 
318
 
319
+ with tabs[0]:
320
+ st.image(image, caption="Uploaded Image")
321
+
322
+ with tabs[1]:
323
+ with st.spinner("Analyzing image..."):
324
+ results = st.session_state.image_analyzer.detect_defects(image)
325
+
326
+ if results:
327
+ # Show defect probabilities
328
+ defect_probs = results["defect_probabilities"]
329
+ significant_defects = {
330
+ k: v for k, v in defect_probs.items()
331
+ if v > confidence_threshold
332
+ }
333
+
334
+ if significant_defects:
335
+ st.subheader("Detected Defects")
336
+ fig = plt.figure(figsize=(10, 6))
337
+ plt.barh(list(significant_defects.keys()),
338
+ list(significant_defects.values()))
339
+ st.pyplot(fig)
340
+
341
+ # Show heatmap
342
+ if results["heatmap"] is not None:
343
+ st.image(results["heatmap"], caption="Defect Attention Map")
344
+
345
+ with tabs[2]:
346
+ if results:
347
+ st.json(results["additional_analysis"])
348
+ if show_debug:
349
+ st.json(results["image_statistics"])
350
+
351
+ if user_query:
352
+ with st.spinner("Processing query..."):
353
  context = st.session_state.rag_system.get_relevant_context(user_query)
354
+ response = get_groq_response(user_query, context)
355
+
356
+ st.subheader("AI Assistant Response")
357
+ st.write(response)
358
 
359
+ if show_debug:
360
+ st.subheader("Retrieved Context")
361
+ st.text(context)
 
 
 
 
 
 
 
 
 
 
362
 
363
  if __name__ == "__main__":
364
  main()