File size: 2,770 Bytes
15526fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import streamlit as st
import torch
from transformers import ViTForImageClassification, ViTImageProcessor
from PIL import Image
import numpy as np
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
import io
import json
# Load pre-trained model and processor
model_name = "google/vit-base-patch16-224-in21k"
processor = ViTImageProcessor.from_pretrained(model_name)
model = ViTForImageClassification.from_pretrained(model_name)
# Custom class labels
damage_types = [
"spalling",
"reinforcement_corrosion",
"flexural_crack",
"structural_crack",
"dampness",
"impact_failure"
]
# Initialize FAISS vector store
embeddings = HuggingFaceEmbeddings()
knowledge_base = FAISS.load_local("knowledge_base", embeddings)
def process_image(image):
# Preprocess image
inputs = processor(images=image, return_tensors="pt")
# Get model predictions
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
# Get top predictions
top_probs, top_indices = torch.topk(probs, len(damage_types))
return {
damage_types[idx]: float(prob)
for idx, prob in zip(top_indices[0], top_probs[0])
}
def get_recommendations(damage_type):
# Query vector store for recommendations
docs = knowledge_base.similarity_search(
f"Remedial measures for {damage_type} in building structures",
k=3
)
return [doc.page_content for doc in docs]
# Streamlit UI
st.title("Structural Damage Assessment Tool")
# File upload
uploaded_file = st.file_uploader("Upload structural image", type=["jpg", "jpeg", "png"])
if uploaded_file:
# Display image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
# Process image
with st.spinner("Analyzing image..."):
predictions = process_image(image)
# Display results
st.subheader("Damage Assessment")
for damage_type, probability in predictions.items():
st.progress(probability)
st.write(f"{damage_type.replace('_', ' ').title()}: {probability:.2%}")
# Show recommendations
if probability > 0.5:
st.subheader(f"Recommendations for {damage_type.replace('_', ' ').title()}")
recommendations = get_recommendations(damage_type)
for i, rec in enumerate(recommendations, 1):
st.write(f"{i}. {rec}")
# Generate report
st.download_button(
"Download Report",
json.dumps(predictions, indent=2),
"assessment_report.json",
"application/json"
) |