|
import streamlit as st |
|
import os |
|
from groq import Groq |
|
from transformers import ViTForImageClassification, ViTImageProcessor |
|
from sentence_transformers import SentenceTransformer |
|
from PIL import Image |
|
import torch |
|
import numpy as np |
|
from typing import List, Dict, Tuple |
|
import faiss |
|
import json |
|
import cv2 |
|
import logging |
|
from datetime import datetime |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
class RAGSystem: |
|
def __init__(self): |
|
self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2') |
|
self.knowledge_base = self.load_knowledge_base() |
|
self.vector_store = self.create_vector_store() |
|
self.query_history = [] |
|
|
|
def load_knowledge_base(self) -> List[Dict]: |
|
"""Load and preprocess knowledge base""" |
|
|
|
kb = { |
|
"spalling": [ |
|
{ |
|
"severity": "Critical", |
|
"description": "Severe concrete spalling with exposed reinforcement", |
|
"repair_method": "Remove deteriorated concrete, clean reinforcement", |
|
"estimated_cost": "Very High ($15,000+)", |
|
"immediate_action": "Evacuate area, install support" |
|
} |
|
], |
|
"structural_cracks": [ |
|
{ |
|
"severity": "High", |
|
"description": "Active structural cracks >5mm width", |
|
"repair_method": "Structural analysis, epoxy injection", |
|
"estimated_cost": "High ($10,000-$20,000)", |
|
"immediate_action": "Install crack monitors" |
|
} |
|
] |
|
} |
|
|
|
documents = [] |
|
for category, items in kb.items(): |
|
for item in items: |
|
doc_text = f"Category: {category}\n" |
|
for key, value in item.items(): |
|
doc_text += f"{key}: {value}\n" |
|
documents.append({"text": doc_text, "metadata": {"category": category}}) |
|
|
|
return documents |
|
|
|
def create_vector_store(self): |
|
"""Create FAISS vector store""" |
|
texts = [doc["text"] for doc in self.knowledge_base] |
|
embeddings = self.embedding_model.encode(texts) |
|
dimension = embeddings.shape[1] |
|
index = faiss.IndexFlatL2(dimension) |
|
index.add(np.array(embeddings).astype('float32')) |
|
return index |
|
|
|
def get_relevant_context(self, query: str, k: int = 3) -> str: |
|
"""Retrieve relevant context based on query""" |
|
try: |
|
query_embedding = self.embedding_model.encode([query]) |
|
D, I = self.vector_store.search(np.array(query_embedding).astype('float32'), k) |
|
context = "\n\n".join([self.knowledge_base[i]["text"] for i in I[0]]) |
|
|
|
|
|
self.query_history.append({ |
|
"timestamp": datetime.now().isoformat(), |
|
"query": query |
|
}) |
|
|
|
return context |
|
except Exception as e: |
|
logger.error(f"Error retrieving context: {e}") |
|
return "" |
|
|
|
class ImageAnalyzer: |
|
def __init__(self): |
|
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
self.defect_classes = ["spalling", "structural_cracks", "surface_deterioration"] |
|
self.model = self._initialize_model() |
|
self.processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") |
|
self.history = [] |
|
|
|
def _initialize_model(self): |
|
model = ViTForImageClassification.from_pretrained( |
|
"google/vit-base-patch16-224", |
|
num_labels=len(self.defect_classes), |
|
ignore_mismatched_sizes=True |
|
) |
|
return model.to(self.device) |
|
|
|
def analyze_image(self, image: Image.Image) -> Dict: |
|
"""Analyze image for defects""" |
|
try: |
|
|
|
inputs = self.processor(images=image, return_tensors="pt").to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = self.model(**inputs) |
|
|
|
|
|
probabilities = torch.nn.functional.softmax(outputs.logits, dim=1) |
|
defect_probs = { |
|
self.defect_classes[i]: float(probabilities[0][i]) |
|
for i in range(len(self.defect_classes)) |
|
} |
|
|
|
|
|
img_array = np.array(image) |
|
stats = { |
|
"mean_brightness": float(np.mean(img_array)), |
|
"image_size": image.size |
|
} |
|
|
|
result = { |
|
"defect_probabilities": defect_probs, |
|
"image_statistics": stats, |
|
"timestamp": datetime.now().isoformat() |
|
} |
|
|
|
self.history.append(result) |
|
return result |
|
|
|
except Exception as e: |
|
logger.error(f"Image analysis error: {e}") |
|
return None |
|
|
|
def get_groq_response(query: str, context: str) -> str: |
|
"""Get response from Groq LLM""" |
|
try: |
|
client = Groq(api_key=os.getenv("GROQ_API_KEY")) |
|
|
|
prompt = f"""Based on the following context about construction defects, answer the question. |
|
Context: {context} |
|
Question: {query} |
|
Provide a detailed answer based on the context.""" |
|
|
|
response = client.chat.completions.create( |
|
messages=[ |
|
{ |
|
"role": "system", |
|
"content": "You are a construction defect analysis expert." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": prompt |
|
} |
|
], |
|
model="llama2-70b-4096", |
|
temperature=0.7, |
|
) |
|
return response.choices[0].message.content |
|
except Exception as e: |
|
logger.error(f"Groq API error: {e}") |
|
return f"Error: Unable to get response from AI model. Please try again later." |
|
|
|
def main(): |
|
st.set_page_config( |
|
page_title="Construction Defect Analyzer", |
|
page_icon="🏗️", |
|
layout="wide" |
|
) |
|
|
|
st.title("🏗️ Construction Defect Analyzer") |
|
|
|
|
|
if 'rag_system' not in st.session_state: |
|
st.session_state.rag_system = RAGSystem() |
|
if 'image_analyzer' not in st.session_state: |
|
st.session_state.image_analyzer = ImageAnalyzer() |
|
|
|
|
|
col1, col2 = st.columns([1, 1]) |
|
|
|
with col1: |
|
uploaded_file = st.file_uploader( |
|
"Upload a construction image", |
|
type=['jpg', 'jpeg', 'png'] |
|
) |
|
|
|
if uploaded_file: |
|
image = Image.open(uploaded_file) |
|
st.image(image, caption="Uploaded Image", use_column_width=True) |
|
|
|
with st.spinner("Analyzing image..."): |
|
results = st.session_state.image_analyzer.analyze_image(image) |
|
|
|
if results: |
|
st.subheader("Detected Defects") |
|
|
|
|
|
defect_probs = results["defect_probabilities"] |
|
fig, ax = plt.subplots() |
|
defects = list(defect_probs.keys()) |
|
probs = list(defect_probs.values()) |
|
ax.barh(defects, probs) |
|
ax.set_xlim(0, 1) |
|
ax.set_xlabel("Probability") |
|
st.pyplot(fig) |
|
|
|
|
|
if st.checkbox("Show Image Details"): |
|
st.json(results["image_statistics"]) |
|
|
|
with col2: |
|
st.subheader("Ask About Defects") |
|
user_query = st.text_input( |
|
"Enter your question about construction defects:", |
|
help="Example: What are the repair methods for severe spalling?" |
|
) |
|
|
|
if user_query: |
|
with st.spinner("Processing query..."): |
|
context = st.session_state.rag_system.get_relevant_context(user_query) |
|
response = get_groq_response(user_query, context) |
|
|
|
st.write("AI Response:") |
|
st.write(response) |
|
|
|
if st.checkbox("Show Retrieved Context"): |
|
st.write("Context Used:") |
|
st.text(context) |
|
|
|
|
|
with st.sidebar: |
|
st.header("Analysis History") |
|
if st.button("Show Recent Analyses"): |
|
if st.session_state.image_analyzer.history: |
|
for analysis in st.session_state.image_analyzer.history[-5:]: |
|
st.write(f"Analysis from: {analysis['timestamp']}") |
|
st.json(analysis["defect_probabilities"]) |
|
else: |
|
st.write("No analyses yet") |
|
|
|
if __name__ == "__main__": |
|
main() |