Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,109 +1,93 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
-
import
|
4 |
-
import
|
5 |
-
|
6 |
-
from transformers import pipeline
|
7 |
-
from geopy.geocoders import Nominatim
|
8 |
-
from pymongo import MongoClient
|
9 |
-
import streamlit as st
|
10 |
-
import os
|
11 |
-
os.system("pip install --no-cache-dir -r requirements.txt")
|
12 |
-
|
13 |
|
14 |
-
# Initialize
|
15 |
-
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
raise RuntimeError(f"Error loading Whisper model: {e}")
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
result = whisper_model.transcribe(audio_path)
|
26 |
-
return result["text"]
|
27 |
-
except Exception as e:
|
28 |
-
return f"Error in speech-to-text conversion: {e}"
|
29 |
-
|
30 |
-
# Load Medical LLM (PubMedBERT)
|
31 |
-
try:
|
32 |
-
medical_llm = pipeline("question-answering", model="nlp4health/pubmedbert")
|
33 |
-
except Exception as e:
|
34 |
-
raise RuntimeError(f"Error loading Medical LLM: {e}")
|
35 |
|
36 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
try:
|
38 |
-
|
39 |
-
|
40 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
#
|
43 |
try:
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
except Exception as e:
|
48 |
-
|
49 |
-
|
50 |
-
def find_nearby_hospitals(location):
|
51 |
-
try:
|
52 |
-
geolocator = Nominatim(user_agent="geoapi")
|
53 |
-
loc = geolocator.geocode(location)
|
54 |
-
query = {"location": {"$near": {"$geometry": {"type": "Point", "coordinates": [loc.longitude, loc.latitude]}, "$maxDistance": 5000}}}
|
55 |
-
return list(hospitals.find(query))
|
56 |
-
except Exception as e:
|
57 |
-
return f"Error finding hospitals: {e}"
|
58 |
|
59 |
# Store conversation in JSON
|
60 |
-
|
61 |
|
62 |
-
def
|
63 |
-
|
64 |
-
|
65 |
-
conversations[user_id] = []
|
66 |
-
conversations[user_id].append({"user": message, "bot": response})
|
67 |
-
with open("conversations.json", "w") as f:
|
68 |
-
json.dump(conversations, f, indent=4)
|
69 |
-
except Exception as e:
|
70 |
-
return f"Error storing conversation: {e}"
|
71 |
|
72 |
-
#
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
if uploaded_file is not None:
|
77 |
-
audio_path = "temp_audio.wav"
|
78 |
-
with open(audio_path, "wb") as f:
|
79 |
-
f.write(uploaded_file.read())
|
80 |
-
text_input = speech_to_text(audio_path)
|
81 |
-
response = analyze_symptoms(text_input, "Common medical symptoms database")
|
82 |
-
store_conversation("guest", text_input, response)
|
83 |
-
st.write(f"**User:** {text_input}")
|
84 |
-
st.write(f"**Bot:** {response}")
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
try:
|
90 |
-
audio_path = f"temp_{user_id}.wav"
|
91 |
-
with open(audio_path, "wb") as buffer:
|
92 |
-
buffer.write(await file.read())
|
93 |
-
|
94 |
-
text_input = speech_to_text(audio_path)
|
95 |
-
response = analyze_symptoms(text_input, "Common medical symptoms database")
|
96 |
-
store_conversation(user_id, text_input, response)
|
97 |
-
return {"user": text_input, "bot": response}
|
98 |
-
except Exception as e:
|
99 |
-
raise HTTPException(status_code=500, detail=f"Error processing audio: {e}")
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
-
|
109 |
-
|
|
|
1 |
import os
|
2 |
import json
|
3 |
+
import time
|
4 |
+
import speech_recognition as sr
|
5 |
+
import pyttsx3
|
6 |
+
from transformers import pipeline, AutoTokenizer, AutoModel
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# Initialize Text-to-Speech (TTS)
|
9 |
+
tts_engine = pyttsx3.init()
|
10 |
+
tts_engine.setProperty("rate", 150) # Adjust speaking speed
|
11 |
|
12 |
+
def speak(text):
|
13 |
+
"""Convert text to speech."""
|
14 |
+
tts_engine.say(text)
|
15 |
+
tts_engine.runAndWait()
|
|
|
16 |
|
17 |
+
# Initialize Speech Recognition (STT)
|
18 |
+
recognizer = sr.Recognizer()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
def listen():
|
21 |
+
"""Capture user's voice input and convert to text."""
|
22 |
+
with sr.Microphone() as source:
|
23 |
+
print("🎤 Listening...")
|
24 |
+
recognizer.adjust_for_ambient_noise(source)
|
25 |
+
audio = recognizer.listen(source)
|
26 |
+
|
27 |
try:
|
28 |
+
user_input = recognizer.recognize_google(audio)
|
29 |
+
print(f"User: {user_input}")
|
30 |
+
return user_input
|
31 |
+
except sr.UnknownValueError:
|
32 |
+
speak("Sorry, I didn't catch that. Can you repeat?")
|
33 |
+
return listen()
|
34 |
+
except sr.RequestError:
|
35 |
+
speak("There was an error with the speech recognition service.")
|
36 |
+
return ""
|
37 |
|
38 |
+
# Load Medical Model
|
39 |
try:
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
|
41 |
+
model = AutoModel.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
|
42 |
+
medical_pipeline = pipeline('feature-extraction', model=model, tokenizer=tokenizer, device=-1)
|
43 |
except Exception as e:
|
44 |
+
speak("Error loading medical AI model. Using fallback response.")
|
45 |
+
medical_pipeline = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Store conversation in JSON
|
48 |
+
conversation_data = {"user_info": {}, "medical_history": [], "chat": []}
|
49 |
|
50 |
+
def save_conversation():
|
51 |
+
with open("conversation.json", "w") as f:
|
52 |
+
json.dump(conversation_data, f, indent=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
# Collect Basic User Info
|
55 |
+
speak("Hello! I am your medical assistant. May I know your name?")
|
56 |
+
user_name = listen()
|
57 |
+
conversation_data["user_info"]["name"] = user_name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
speak(f"Nice to meet you, {user_name}. How old are you?")
|
60 |
+
age = listen()
|
61 |
+
conversation_data["user_info"]["age"] = age
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
speak("What is your primary health concern today?")
|
64 |
+
health_issue = listen()
|
65 |
+
conversation_data["medical_history"].append(health_issue)
|
66 |
+
|
67 |
+
# Real-Time Conversation Loop
|
68 |
+
while True:
|
69 |
+
speak("Tell me more about your symptoms or concerns.")
|
70 |
+
user_query = listen()
|
71 |
+
|
72 |
+
if "stop" in user_query.lower() or "bye" in user_query.lower():
|
73 |
+
speak("Thank you for sharing your details. Take care!")
|
74 |
+
break
|
75 |
+
|
76 |
+
conversation_data["chat"].append({"user": user_query})
|
77 |
+
|
78 |
+
# Process Medical Query
|
79 |
+
if medical_pipeline:
|
80 |
+
try:
|
81 |
+
embeddings = medical_pipeline(user_query)
|
82 |
+
response = "Based on medical analysis, I recommend consulting a doctor for further evaluation."
|
83 |
+
except Exception:
|
84 |
+
response = "I couldn't analyze your query, but I suggest seeking medical advice."
|
85 |
+
else:
|
86 |
+
response = "I can assist with basic health guidance. Please consult a medical professional for proper diagnosis."
|
87 |
+
|
88 |
+
conversation_data["chat"].append({"bot": response})
|
89 |
+
speak(response)
|
90 |
+
save_conversation()
|
91 |
|
92 |
+
# Save final conversation
|
93 |
+
save_conversation()
|