Haseeb-001 commited on
Commit
976b089
·
verified ·
1 Parent(s): 093ae7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -95
app.py CHANGED
@@ -1,104 +1,106 @@
1
- from fastapi import FastAPI, Request, HTTPException
2
- from fastapi.responses import JSONResponse
3
- from starlette.middleware.cors import CORSMiddleware
4
- import rasa.core.interpreter
5
- from rasa.core.agent import Agent
6
- import asyncio
7
- import json
8
  import os
9
- from dotenv import load_dotenv
 
 
 
 
 
 
10
  import streamlit as st
11
- import requests
12
-
13
- load_dotenv()
14
 
 
15
  app = FastAPI()
16
 
17
- # CORS setup to allow Streamlit frontend to communicate with FastAPI backend
18
- app.add_middleware(
19
- CORSMiddleware,
20
- allow_origins=["*"], # Allows all origins (for development, restrict in production)
21
- allow_credentials=True,
22
- allow_methods=["*"],
23
- allow_headers=["*"],
24
- )
25
-
26
- # Rasa model directory and paths - adjust if needed, or use environment variables
27
- RASA_MODEL_PATH = os.getenv("RASA_MODEL_PATH", "./models")
28
- DOMAIN_PATH = os.getenv("DOMAIN_PATH", "./domain.yml")
29
- NLU_MODEL_PATH = os.getenv("NLU_MODEL_PATH", None) # NLU model path is optional
30
-
31
- # Load Rasa agent
32
- async def load_rasa_agent():
33
- interpreter = None
34
- if NLU_MODEL_PATH:
35
- interpreter = await rasa.core.interpreter.create_interpreter(NLU_MODEL_PATH)
36
-
37
- agent = Agent.load(RASA_MODEL_PATH,
38
- interpreter=interpreter,
39
- domain=DOMAIN_PATH)
40
- return agent
41
-
42
- loop = asyncio.get_event_loop()
43
- rasa_agent = loop.run_until_complete(load_rasa_agent())
44
-
45
- # FastAPI endpoint for chat
46
- @app.post("/chat")
47
- async def chat_endpoint(message_data: dict):
48
- """Rasa chatbot endpoint that takes message in JSON and returns bot response."""
49
- user_message = message_data.get('message')
50
-
51
- if not user_message:
52
- raise HTTPException(status_code=400, detail="Message text not found")
53
-
54
- response = await rasa_agent.handle_text(user_message)
55
-
56
- bot_responses = []
57
- for message in response:
58
- if "text" in message:
59
- bot_responses.append(message["text"])
60
-
61
- return JSONResponse({"responses": bot_responses})
62
-
63
-
64
- @app.get("/health")
65
- async def health_check():
66
- """Health check endpoint for monitoring."""
67
- return JSONResponse({"status": "ok"})
 
 
 
68
 
69
  # Streamlit UI
70
- def main():
71
- st.title("HealthVoice Bot")
72
- st.write("Talk to the bot about your symptoms.")
73
-
74
- user_input = st.text_input("Your message:", "")
75
-
76
- if user_input:
77
- api_url = "http://localhost:8000/chat" # FastAPI endpoint URL
78
- message_payload = {"message": user_input}
79
-
80
- try:
81
- response = requests.post(api_url, json=message_payload)
82
- response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
83
- bot_response_json = response.json()
84
- bot_messages = bot_response_json.get("responses", [])
85
- for bot_message in bot_messages:
86
- st.text_area("Bot Response:", value=bot_message, height=len(bot_message.split('\n')) * 25) # Adjust height dynamically
87
-
88
- except requests.exceptions.RequestException as e:
89
- st.error(f"Error communicating with the chatbot backend: {e}")
90
-
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  if __name__ == "__main__":
93
- import uvicorn
94
- import threading
95
-
96
- # Start FastAPI app in a separate thread
97
- def run_fastapi():
98
- uvicorn.run(app, host="0.0.0.0", port=8000)
99
-
100
- fastapi_thread = threading.Thread(target=run_fastapi)
101
- fastapi_thread.daemon = True # Daemon threads are abruptly stopped at exit
102
- fastapi_thread.start()
103
-
104
- main() # Run Streamlit app in the main thread
 
 
 
 
 
 
 
 
1
  import os
2
+ import json
3
+ import rasa
4
+ import whisper
5
+ from fastapi import FastAPI, UploadFile, File, HTTPException
6
+ from transformers import pipeline
7
+ from geopy.geocoders import Nominatim
8
+ from pymongo import MongoClient
9
  import streamlit as st
 
 
 
10
 
11
+ # Initialize FastAPI
12
  app = FastAPI()
13
 
14
+ # Load Whisper for Speech-to-Text
15
+ try:
16
+ whisper_model = whisper.load_model("base")
17
+ except Exception as e:
18
+ raise RuntimeError(f"Error loading Whisper model: {e}")
19
+
20
+ def speech_to_text(audio_path):
21
+ try:
22
+ result = whisper_model.transcribe(audio_path)
23
+ return result["text"]
24
+ except Exception as e:
25
+ return f"Error in speech-to-text conversion: {e}"
26
+
27
+ # Load Medical LLM (PubMedBERT)
28
+ try:
29
+ medical_llm = pipeline("question-answering", model="nlp4health/pubmedbert")
30
+ except Exception as e:
31
+ raise RuntimeError(f"Error loading Medical LLM: {e}")
32
+
33
+ def analyze_symptoms(question, context):
34
+ try:
35
+ return medical_llm(question=question, context=context)["answer"]
36
+ except Exception as e:
37
+ return f"Error analyzing symptoms: {e}"
38
+
39
+ # Connect to MongoDB (Hospital Database)
40
+ try:
41
+ client = MongoClient("mongodb://localhost:27017/")
42
+ db = client["hospital_db"]
43
+ hospitals = db["hospitals"]
44
+ except Exception as e:
45
+ raise RuntimeError(f"Error connecting to MongoDB: {e}")
46
+
47
+ def find_nearby_hospitals(location):
48
+ try:
49
+ geolocator = Nominatim(user_agent="geoapi")
50
+ loc = geolocator.geocode(location)
51
+ query = {"location": {"$near": {"$geometry": {"type": "Point", "coordinates": [loc.longitude, loc.latitude]}, "$maxDistance": 5000}}}
52
+ return list(hospitals.find(query))
53
+ except Exception as e:
54
+ return f"Error finding hospitals: {e}"
55
+
56
+ # Store conversation in JSON
57
+ conversations = {}
58
+
59
+ def store_conversation(user_id, message, response):
60
+ try:
61
+ if user_id not in conversations:
62
+ conversations[user_id] = []
63
+ conversations[user_id].append({"user": message, "bot": response})
64
+ with open("conversations.json", "w") as f:
65
+ json.dump(conversations, f, indent=4)
66
+ except Exception as e:
67
+ return f"Error storing conversation: {e}"
68
 
69
  # Streamlit UI
70
+ def streamlit_interface():
71
+ st.title("Medical Triage Bot")
72
+ uploaded_file = st.file_uploader("Upload Voice Message", type=["wav", "mp3", "m4a"])
73
+ if uploaded_file is not None:
74
+ audio_path = "temp_audio.wav"
75
+ with open(audio_path, "wb") as f:
76
+ f.write(uploaded_file.read())
77
+ text_input = speech_to_text(audio_path)
78
+ response = analyze_symptoms(text_input, "Common medical symptoms database")
79
+ store_conversation("guest", text_input, response)
80
+ st.write(f"**User:** {text_input}")
81
+ st.write(f"**Bot:** {response}")
82
+
83
+ # FastAPI Endpoints
84
+ @app.post("/voice_input/")
85
+ async def process_audio(file: UploadFile = File(...), user_id: str = "guest"):
86
+ try:
87
+ audio_path = f"temp_{user_id}.wav"
88
+ with open(audio_path, "wb") as buffer:
89
+ buffer.write(await file.read())
90
+
91
+ text_input = speech_to_text(audio_path)
92
+ response = analyze_symptoms(text_input, "Common medical symptoms database")
93
+ store_conversation(user_id, text_input, response)
94
+ return {"user": text_input, "bot": response}
95
+ except Exception as e:
96
+ raise HTTPException(status_code=500, detail=f"Error processing audio: {e}")
97
+
98
+ @app.get("/get_hospitals/")
99
+ def get_hospitals(location: str):
100
+ try:
101
+ return find_nearby_hospitals(location)
102
+ except Exception as e:
103
+ raise HTTPException(status_code=500, detail=f"Error fetching hospitals: {e}")
104
 
105
  if __name__ == "__main__":
106
+ streamlit_interface()