File size: 5,022 Bytes
83437a6
 
 
 
 
 
 
 
 
 
 
 
7bbf949
 
83437a6
 
 
 
 
 
7bbf949
83437a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d053647
83437a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d053647
83437a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d053647
83437a6
 
 
 
 
7bbf949
83437a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d053647
83437a6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import streamlit as st
import torch
from transformers import ViTForImageClassification, ViTImageProcessor, pipeline
from PIL import Image
import pandas as pd
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFaceHub
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import os

# Page config
st.set_page_config(
    page_title="Building Damage Analysis",
    page_icon="πŸ—οΈ",
    layout="wide"
)

# Initialize models
@st.cache_resource
def load_models():
    # Vision model
    damage_model = ViTForImageClassification.from_pretrained("microsoft/vit-base-patch16-224")
    processor = ViTImageProcessor.from_pretrained("microsoft/vit-base-patch16-224")
    
    # Text model
    llm = HuggingFaceHub(
        repo_id="google/flan-t5-large",
        model_kwargs={"temperature": 0.7, "max_length": 512}
    )
    
    embeddings = HuggingFaceEmbeddings(
        model_name='sentence-transformers/all-MiniLM-L6-v2'
    )
    
    return damage_model, processor, embeddings, llm

# Sample data - in production, you'd load this from a proper dataset
SAMPLE_DATA = [
    {
        "repair_description": "Major wall crack requiring structural repair. Steel plate reinforcement needed.",
        "repair_cost": 5000,
        "damage_type": "Wall Crack"
    },
    {
        "repair_description": "Concrete beam damage with exposed rebar. Requires immediate attention.",
        "repair_cost": 7500,
        "damage_type": "Beam Damage"
    },
    {
        "repair_description": "Foundation settling causing structural issues. Need underpinning.",
        "repair_cost": 15000,
        "damage_type": "Foundation Issue"
    }
]

def setup_rag(embeddings, llm):
    # Create documents from sample data
    documents = [
        Document(
            page_content=f"{item['repair_description']} Cost: ${item['repair_cost']}",
            metadata={'cost': item['repair_cost'], 'damage_type': item['damage_type']}
        )
        for item in SAMPLE_DATA
    ]
    
    # Create vector store
    vectorstore = FAISS.from_documents(documents, embeddings)
    
    # Create prompt template
    template = """
    Analyze building damage and provide repair recommendations based on this context:
    {context}
    
    For damage type: {question}
    
    Provide:
    1. Damage assessment
    2. Repair steps
    3. Safety considerations
    4. Estimated cost range
    """
    
    prompt = PromptTemplate(template=template, input_variables=["context", "question"])
    
    # Create QA chain
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
        chain_type_kwargs={"prompt": prompt}
    )
    
    return qa_chain

def process_image(image, model, processor):
    inputs = processor(images=image, return_tensors="pt")
    outputs = model(**inputs)
    predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
    return predictions[0].tolist()

def main():
    st.title("πŸ—οΈ Building Damage Detection & Analysis")
    st.markdown("""
    Upload a photo of building damage for AI analysis and repair recommendations.
    """)
    
    # Load models on first run
    if 'models_loaded' not in st.session_state:
        with st.spinner('Loading AI models...'):
            damage_model, processor, embeddings, llm = load_models()
            qa_chain = setup_rag(embeddings, llm)
            st.session_state['models_loaded'] = True
            st.session_state['models'] = (damage_model, processor, qa_chain)
    
    damage_model, processor, qa_chain = st.session_state['models']
    
    # File upload
    uploaded_file = st.file_uploader("Upload building damage photo", type=["jpg", "jpeg", "png"])
    
    if uploaded_file:
        # Display image
        image = Image.open(uploaded_file)
        st.image(image, caption="Uploaded Image", use_column_width=True)
        
        with st.spinner('Analyzing damage...'):
            # Process image
            predictions = process_image(image, damage_model, processor)
            damage_types = ["Wall Crack", "Beam Damage", "Foundation Issue", 
                          "Roof Damage", "Structural Damage"]
            
            # Show results
            st.subheader("Detected Damage Types")
            for damage_type, prob in zip(damage_types, predictions):
                if prob > 0.2:
                    st.metric(damage_type, f"{prob:.1%}")
                    
                    with st.spinner(f'Generating analysis for {damage_type}...'):
                        analysis = qa_chain.invoke(damage_type)
                        st.markdown(f"### Analysis for {damage_type}")
                        st.markdown(analysis['result'])

if __name__ == "__main__":
    main()