Shakir60 commited on
Commit
e0fdb55
·
verified ·
1 Parent(s): 8a69496

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -0
app.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #app.py
2
+ import streamlit as st
3
+ from transformers import ViTForImageClassification, ViTImageProcessor
4
+ from PIL import Image
5
+ import torch
6
+ from sentence_transformers import SentenceTransformer
7
+ import faiss
8
+ import pandas as pd
9
+ import os
10
+ from pathlib import Path
11
+ import json
12
+
13
+ DAMAGE_TYPES = {
14
+ 0: {'name': 'spalling', 'risk': 'High'},
15
+ 1: {'name': 'reinforcement_corrosion', 'risk': 'Critical'},
16
+ 2: {'name': 'structural_crack', 'risk': 'High'},
17
+ 3: {'name': 'dampness', 'risk': 'Medium'},
18
+ 4: {'name': 'no_damage', 'risk': 'Low'}
19
+ }
20
+
21
+ @st.cache_resource
22
+ def load_models():
23
+ vision_model = ViTForImageClassification.from_pretrained(
24
+ "google/vit-base-patch16-224",
25
+ num_labels=len(DAMAGE_TYPES),
26
+ ignore_mismatched_sizes=True
27
+ )
28
+ processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224")
29
+ embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
30
+ return vision_model, processor, embedding_model
31
+
32
+ class DamageKnowledgeBase:
33
+ def __init__(self, embedding_model):
34
+ self.embedding_model = embedding_model
35
+ self.load_knowledge_base()
36
+
37
+ def load_knowledge_base(self):
38
+ # Load dataset metadata and embeddings
39
+ knowledge_path = Path("data/knowledge_base.json")
40
+ if knowledge_path.exists():
41
+ with open(knowledge_path, 'r') as f:
42
+ self.kb_data = json.load(f)
43
+
44
+ # Initialize FAISS index
45
+ embeddings = torch.load("data/embeddings.pt")
46
+ self.index = faiss.IndexFlatL2(embeddings.shape[1])
47
+ self.index.add(embeddings.numpy())
48
+ else:
49
+ self.initialize_knowledge_base()
50
+
51
+ def initialize_knowledge_base(self):
52
+ # Sample knowledge base structure
53
+ self.kb_data = {
54
+ 'spalling': [
55
+ {
56
+ 'description': 'Severe concrete spalling on column surface',
57
+ 'severity': 'High',
58
+ 'repair_method': 'Remove damaged concrete, clean reinforcement, apply repair mortar',
59
+ 'estimated_cost': 'High',
60
+ 'timeframe': '2-3 weeks',
61
+ 'similar_cases': ['case_123', 'case_456']
62
+ }
63
+ ],
64
+ # Add more damage types...
65
+ }
66
+
67
+ # Create embeddings
68
+ texts = []
69
+ for damage_type, cases in self.kb_data.items():
70
+ for case in cases:
71
+ texts.append(f"{damage_type} {case['description']} {case['repair_method']}")
72
+
73
+ embeddings = self.embedding_model.encode(texts)
74
+ self.index = faiss.IndexFlatL2(embeddings.shape[1])
75
+ self.index.add(embeddings)
76
+
77
+ # Save for future use
78
+ os.makedirs("data", exist_ok=True)
79
+ with open("data/knowledge_base.json", 'w') as f:
80
+ json.dump(self.kb_data, f)
81
+ torch.save(torch.tensor(embeddings), "data/embeddings.pt")
82
+
83
+ def query(self, damage_type, confidence):
84
+ query = f"damage type: {damage_type}"
85
+ query_embedding = self.embedding_model.encode([query])
86
+ D, I = self.index.search(query_embedding, k=3)
87
+
88
+ similar_cases = []
89
+ for idx in I[0]:
90
+ for damage, cases in self.kb_data.items():
91
+ for case in cases:
92
+ case_text = f"{damage} {case['description']} {case['repair_method']}"
93
+ if len(similar_cases) < 3:
94
+ similar_cases.append(case)
95
+
96
+ return similar_cases
97
+
98
+ def analyze_damage(image, model, processor):
99
+ image = image.convert('RGB')
100
+ inputs = processor(images=image, return_tensors="pt")
101
+ outputs = model(**inputs)
102
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1)[0]
103
+ return probs
104
+
105
+ def main():
106
+ st.title("Advanced Structural Damage Assessment Tool")
107
+
108
+ vision_model, processor, embedding_model = load_models()
109
+ kb = DamageKnowledgeBase(embedding_model)
110
+
111
+ uploaded_file = st.file_uploader("Upload structural image", type=['jpg', 'jpeg', 'png'])
112
+
113
+ if uploaded_file:
114
+ image = Image.open(uploaded_file)
115
+ st.image(image, caption="Uploaded Structure", use_column_width=True)
116
+
117
+ with st.spinner("Analyzing..."):
118
+ predictions = analyze_damage(image, vision_model, processor)
119
+
120
+ col1, col2 = st.columns(2)
121
+
122
+ with col1:
123
+ st.subheader("Damage Assessment")
124
+ detected_damages = []
125
+ for idx, prob in enumerate(predictions):
126
+ confidence = float(prob) * 100
127
+ if confidence > 15:
128
+ damage_type = DAMAGE_TYPES[idx]['name']
129
+ detected_damages.append((damage_type, confidence))
130
+
131
+ st.write(f"**{damage_type.replace('_', ' ').title()}**")
132
+ st.progress(confidence / 100)
133
+ st.write(f"Confidence: {confidence:.1f}%")
134
+ st.write(f"Risk Level: {DAMAGE_TYPES[idx]['risk']}")
135
+
136
+ with col2:
137
+ st.subheader("Similar Cases & Recommendations")
138
+ for damage_type, confidence in detected_damages:
139
+ similar_cases = kb.query(damage_type, confidence)
140
+
141
+ st.write(f"**{damage_type.replace('_', ' ').title()}:**")
142
+ for case in similar_cases:
143
+ with st.expander(f"Similar Case - {case['severity']} Severity"):
144
+ st.write(f"Description: {case['description']}")
145
+ st.write(f"Repair Method: {case['repair_method']}")
146
+ st.write(f"Estimated Cost: {case['estimated_cost']}")
147
+ st.write(f"Timeframe: {case['timeframe']}")
148
+
149
+ if __name__ == "__main__":
150
+ main()