Update app.py
Browse files
app.py
CHANGED
@@ -1,103 +1,147 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
import
|
4 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import os
|
6 |
-
import sqlite3
|
7 |
-
from sklearn.ensemble import IsolationForest
|
8 |
-
import speech_recognition as sr
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
# Anomaly Detection Model
|
18 |
-
anomaly_model = IsolationForest(contamination=0.1)
|
19 |
-
data = []
|
20 |
-
|
21 |
-
# Initialize Models
|
22 |
-
retriever = RagRetriever.from_pretrained("facebook/rag-sequence-base")
|
23 |
-
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-base")
|
24 |
-
model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-base")
|
25 |
-
nlp = pipeline("conversational")
|
26 |
-
|
27 |
-
# IoT Device Control
|
28 |
-
def control_device(command):
|
29 |
-
client = mqtt.Client()
|
30 |
-
client.connect("broker.hivemq.com", 1883, 60)
|
31 |
-
if "light" in command and "on" in command:
|
32 |
-
client.publish("home/light", "ON")
|
33 |
-
return "Light turned on."
|
34 |
-
elif "light" in command and "off" in command:
|
35 |
-
client.publish("home/light", "OFF")
|
36 |
-
return "Light turned off."
|
37 |
-
else:
|
38 |
-
return "Command not recognized."
|
39 |
-
|
40 |
-
# Process Command
|
41 |
-
def process_command(command):
|
42 |
-
if "light" in command:
|
43 |
-
return control_device(command)
|
44 |
-
else:
|
45 |
-
inputs = tokenizer(command, return_tensors="pt")
|
46 |
-
retrieved_docs = retriever(command, return_tensors="pt")
|
47 |
-
outputs = model.generate(input_ids=inputs['input_ids'], context_input_ids=retrieved_docs['context_input_ids'])
|
48 |
-
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
49 |
-
|
50 |
-
# Log History
|
51 |
-
def log_history(command, response):
|
52 |
-
cursor.execute("INSERT INTO history (command, response) VALUES (?, ?)", (command, response))
|
53 |
-
conn.commit()
|
54 |
-
|
55 |
-
# Anomaly Detection
|
56 |
-
def detect_anomalies(command):
|
57 |
-
global data
|
58 |
-
data.append(len(command))
|
59 |
-
if len(data) > 10:
|
60 |
-
anomaly_model.fit([[x] for x in data])
|
61 |
-
if anomaly_model.predict([[len(command)]])[0] == -1:
|
62 |
-
return True
|
63 |
-
return False
|
64 |
|
65 |
-
#
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
-
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import ViTForImageClassification, ViTImageProcessor, pipeline
|
4 |
+
from PIL import Image
|
5 |
+
import pandas as pd
|
6 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
7 |
+
from langchain_community.llms import HuggingFaceHub
|
8 |
+
from langchain_community.vectorstores import FAISS
|
9 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
10 |
+
from langchain.docstore.document import Document
|
11 |
+
from langchain.prompts import PromptTemplate
|
12 |
+
from langchain.chains import RetrievalQA
|
13 |
import os
|
|
|
|
|
|
|
14 |
|
15 |
+
# Page config
|
16 |
+
st.set_page_config(
|
17 |
+
page_title="Building Damage Analysis",
|
18 |
+
page_icon="🏗️",
|
19 |
+
layout="wide"
|
20 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Initialize models
|
23 |
+
@st.cache_resource
|
24 |
+
def load_models():
|
25 |
+
# Vision model
|
26 |
+
damage_model = ViTForImageClassification.from_pretrained("microsoft/vit-base-patch16-224")
|
27 |
+
processor = ViTImageProcessor.from_pretrained("microsoft/vit-base-patch16-224")
|
28 |
+
|
29 |
+
# Text model
|
30 |
+
llm = HuggingFaceHub(
|
31 |
+
repo_id="google/flan-t5-large",
|
32 |
+
model_kwargs={"temperature": 0.7, "max_length": 512}
|
33 |
+
)
|
34 |
+
|
35 |
+
embeddings = HuggingFaceEmbeddings(
|
36 |
+
model_name='sentence-transformers/all-MiniLM-L6-v2'
|
37 |
+
)
|
38 |
+
|
39 |
+
return damage_model, processor, embeddings, llm
|
40 |
|
41 |
+
# Sample data - in production, you'd load this from a proper dataset
|
42 |
+
SAMPLE_DATA = [
|
43 |
+
{
|
44 |
+
"repair_description": "Major wall crack requiring structural repair. Steel plate reinforcement needed.",
|
45 |
+
"repair_cost": 5000,
|
46 |
+
"damage_type": "Wall Crack"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"repair_description": "Concrete beam damage with exposed rebar. Requires immediate attention.",
|
50 |
+
"repair_cost": 7500,
|
51 |
+
"damage_type": "Beam Damage"
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"repair_description": "Foundation settling causing structural issues. Need underpinning.",
|
55 |
+
"repair_cost": 15000,
|
56 |
+
"damage_type": "Foundation Issue"
|
57 |
+
}
|
58 |
+
]
|
59 |
|
60 |
+
def setup_rag(embeddings, llm):
|
61 |
+
# Create documents from sample data
|
62 |
+
documents = [
|
63 |
+
Document(
|
64 |
+
page_content=f"{item['repair_description']} Cost: ${item['repair_cost']}",
|
65 |
+
metadata={'cost': item['repair_cost'], 'damage_type': item['damage_type']}
|
66 |
+
)
|
67 |
+
for item in SAMPLE_DATA
|
68 |
+
]
|
69 |
+
|
70 |
+
# Create vector store
|
71 |
+
vectorstore = FAISS.from_documents(documents, embeddings)
|
72 |
+
|
73 |
+
# Create prompt template
|
74 |
+
template = """
|
75 |
+
Analyze building damage and provide repair recommendations based on this context:
|
76 |
+
{context}
|
77 |
+
|
78 |
+
For damage type: {question}
|
79 |
+
|
80 |
+
Provide:
|
81 |
+
1. Damage assessment
|
82 |
+
2. Repair steps
|
83 |
+
3. Safety considerations
|
84 |
+
4. Estimated cost range
|
85 |
+
"""
|
86 |
+
|
87 |
+
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
|
88 |
+
|
89 |
+
# Create QA chain
|
90 |
+
qa_chain = RetrievalQA.from_chain_type(
|
91 |
+
llm=llm,
|
92 |
+
chain_type="stuff",
|
93 |
+
retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
|
94 |
+
chain_type_kwargs={"prompt": prompt}
|
95 |
+
)
|
96 |
+
|
97 |
+
return qa_chain
|
98 |
|
99 |
+
def process_image(image, model, processor):
|
100 |
+
inputs = processor(images=image, return_tensors="pt")
|
101 |
+
outputs = model(**inputs)
|
102 |
+
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
103 |
+
return predictions[0].tolist()
|
104 |
|
105 |
+
def main():
|
106 |
+
st.title("🏗️ Building Damage Detection & Analysis")
|
107 |
+
st.markdown("""
|
108 |
+
Upload a photo of building damage for AI analysis and repair recommendations.
|
109 |
+
""")
|
110 |
+
|
111 |
+
# Load models on first run
|
112 |
+
if 'models_loaded' not in st.session_state:
|
113 |
+
with st.spinner('Loading AI models...'):
|
114 |
+
damage_model, processor, embeddings, llm = load_models()
|
115 |
+
qa_chain = setup_rag(embeddings, llm)
|
116 |
+
st.session_state['models_loaded'] = True
|
117 |
+
st.session_state['models'] = (damage_model, processor, qa_chain)
|
118 |
+
|
119 |
+
damage_model, processor, qa_chain = st.session_state['models']
|
120 |
+
|
121 |
+
# File upload
|
122 |
+
uploaded_file = st.file_uploader("Upload building damage photo", type=["jpg", "jpeg", "png"])
|
123 |
+
|
124 |
+
if uploaded_file:
|
125 |
+
# Display image
|
126 |
+
image = Image.open(uploaded_file)
|
127 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
128 |
+
|
129 |
+
with st.spinner('Analyzing damage...'):
|
130 |
+
# Process image
|
131 |
+
predictions = process_image(image, damage_model, processor)
|
132 |
+
damage_types = ["Wall Crack", "Beam Damage", "Foundation Issue",
|
133 |
+
"Roof Damage", "Structural Damage"]
|
134 |
+
|
135 |
+
# Show results
|
136 |
+
st.subheader("Detected Damage Types")
|
137 |
+
for damage_type, prob in zip(damage_types, predictions):
|
138 |
+
if prob > 0.2:
|
139 |
+
st.metric(damage_type, f"{prob:.1%}")
|
140 |
+
|
141 |
+
with st.spinner(f'Generating analysis for {damage_type}...'):
|
142 |
+
analysis = qa_chain.invoke(damage_type)
|
143 |
+
st.markdown(f"### Analysis for {damage_type}")
|
144 |
+
st.markdown(analysis['result'])
|
145 |
|
146 |
+
if __name__ == "__main__":
|
147 |
+
main()
|