File size: 4,866 Bytes
66f9aa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e0b2cb
66f9aa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# app.py

import streamlit as st
import random
import pandas as pd
import time
import speech_recognition as sr
from openai import OpenAI
from PyPDF2 import PdfReader
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings


# Set DeepSeek API Key
api_key = "0f33d8a8b7714460bc4b8335b66d217a"
base_url = "https://api.aimlapi.com/v1"

# Initialize OpenAI client
api = OpenAI(api_key=api_key, base_url=base_url)

# Generate 300 Dummy Messages with Severity Levels
severities = ["High", "Medium", "Low"]
messages = [
    {"message": f"Disaster Alert {i}", "severity": random.choice(severities)}
    for i in range(300)
]
df = pd.DataFrame(messages)

# Streamlit UI
st.set_page_config(page_title="BDRS", layout="wide")

st.title("🌍 BeaconAi Disaster Response System")
st.write("Real-time disaster response with DeepSeek AI-powered chatbot & voice recognition.")

# Live-updating Disaster Message Dashboard
st.subheader("πŸ“Š Social Media Monitoring")
chart_placeholder = st.empty()

# Function to Randomly Pick 5 Messages
def get_random_messages():
    return df.sample(5)


# Placeholder for chart
chart_placeholder = st.empty()

# Define severity categories
severities = ["Low", "Medium", "High"]

# Function to update chart
def update_chart():
    selected_messages = get_random_messages()
    severity_counts = selected_messages["severity"].value_counts().reindex(severities, fill_value=0)

    # Create DataFrame
    chart_data = pd.DataFrame({"Severity": severities, "Count": severity_counts.values})

    # Update the chart
    chart_placeholder.bar_chart(chart_data, x="Severity", y="Count", use_container_width=True)

# Auto-refresh every second
st.button("Refresh Data", on_click=st.rerun)

update_chart()



# PDF Processing for Chatbot Context (Pre-Provided PDF)
pdf_path = "Natural%20Disaster%20Safety%20Manual.pdf"  # Ensure this file is in the same directory as app.py

pdf_reader = PdfReader(pdf_path)
raw_text = ""
for page in pdf_reader.pages:
    raw_text += page.extract_text() + "\n"

# Convert to Embeddings for Retrieval
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_text(raw_text)
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vector_db = FAISS.from_texts(texts, embeddings)
retriever = vector_db.as_retriever()

# LangGraph-Powered Q&A System
chat_model = ChatOpenAI(model="deepseek-chat", api_key=api_key, base_url=base_url)
qa = RetrievalQA.from_chain_type(llm=chat_model, chain_type="stuff", retriever=retriever)

# Chatbot UI
st.subheader("πŸ€– AI-Powered Disaster Chatbot")
user_query = st.text_input("Ask the chatbot:")

if user_query:
    response = qa.run(user_query)
    st.write("**Chatbot Response:**", response)


# Voice Recognition for Non-English Users
st.subheader("πŸŽ™οΈ Voice Recognition (Speech-to-Text)")

if st.button("Start Recording"):
    recognizer = sr.Recognizer()
    with sr.Microphone() as source:
        st.write("Listening...")
        audio = recognizer.listen(source)

    try:
        recognized_text = recognizer.recognize_google(audio)
        st.write("**Recognized Text:**", recognized_text)
    except sr.UnknownValueError:
        st.write("Sorry, could not understand.")
    except sr.RequestError:
        st.write("Could not request results. Check your internet connection.")

# Disaster Guide Dropdown
st.subheader("πŸŒͺ️ Disaster Preparedness Guide")

disaster_options = {
    "Wildfire": {
        "steps": [
            "Evacuate if ordered.",
            "Keep emergency supplies ready.",
            "Close all doors and windows."
        ],
        "video": "https://www.youtube.com/watch?v=OCjl6tp8dnw"
    },
    "Earthquake": {
        "steps": [
            "Drop, Cover, and Hold On.",
            "Stay indoors until shaking stops.",
            "Move away from windows."
        ],
        "video": "https://www.youtube.com/watch?v=BLEPakj1YTY"
    },
    "Flood": {
        "steps": [
            "Move to higher ground.",
            "Avoid walking or driving through floodwaters.",
            "Stay tuned to emergency alerts."
        ],
        "video": "https://www.youtube.com/watch?v=43M5mZuzHF8"
    }
}

selected_disaster = st.selectbox("Select a disaster type:", list(disaster_options.keys()))

if selected_disaster:
    st.write("### πŸ›  Steps to Follow:")
    for step in disaster_options[selected_disaster]["steps"]:
        st.write(f"- {step}")

    st.write("πŸ“Ί [Watch Video Guide]({})".format(disaster_options[selected_disaster]["video"]))

st.write("πŸš€ Stay prepared and stay safe!")