Upload new.py
Browse files
new.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""new.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/10l5M_bqlqlmx8isz58qrPCp9YwDOKjiC
|
8 |
+
"""
|
9 |
+
|
10 |
+
from re import S
|
11 |
+
!pip install Streamlit
|
12 |
+
!pip install whisper
|
13 |
+
!pip install pydub
|
14 |
+
!pip install mapbox
|
15 |
+
!pip install openai-whisper pydub streamlit requests geopy
|
16 |
+
!pip install streamlit-webrtc
|
17 |
+
|
18 |
+
from google.colab import userdata
|
19 |
+
userdata.get('secret')
|
20 |
+
|
21 |
+
import streamlit as st
|
22 |
+
import requests
|
23 |
+
from geopy.geocoders import Nominatim
|
24 |
+
import whisper
|
25 |
+
import tempfile
|
26 |
+
from pydub import AudioSegment
|
27 |
+
from io import BytesIO
|
28 |
+
import base64
|
29 |
+
import folium
|
30 |
+
import requests
|
31 |
+
from mapbox import Geocoder
|
32 |
+
import folium
|
33 |
+
|
34 |
+
|
35 |
+
# Set your Hugging Face API URL and API key
|
36 |
+
API_URL = "https://api-inference.huggingface.co/models/dmis-lab/biobert-base-cased-v1.1"
|
37 |
+
headers = {"Authorization": f"secret"}
|
38 |
+
|
39 |
+
# Initialize Whisper model
|
40 |
+
whisper_model = whisper.load_model("base")
|
41 |
+
|
42 |
+
# Initialize Mapbox Geocoder with your API key
|
43 |
+
MAPBOX_API_KEY = "sk.eyJ1IjoibWFyaW9jYWxlYiIsImEiOiJjbHdkZWV3b2wwdnlxMmtwbnhqaHdnNzA3In0.iOhyIXlUFztYtc1iZhSMng"
|
44 |
+
geocoder = Geocoder(access_token=MAPBOX_API_KEY)
|
45 |
+
|
46 |
+
# Function to query the Hugging Face model
|
47 |
+
def query(payload):
|
48 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
49 |
+
if response.status_code == 200:
|
50 |
+
return response.json()
|
51 |
+
else:
|
52 |
+
st.error(f"Error: Unable to fetch response from model (status code: {response.status_code})")
|
53 |
+
st.error(response.text)
|
54 |
+
return None
|
55 |
+
|
56 |
+
# Function to find nearby clinics/pharmacies using Mapbox
|
57 |
+
def find_nearby_clinics(address):
|
58 |
+
response = geocoder.forward(address)
|
59 |
+
if response.status_code == 200:
|
60 |
+
features = response.json()['features']
|
61 |
+
if features:
|
62 |
+
location = features[0]['center']
|
63 |
+
return location
|
64 |
+
else:
|
65 |
+
st.error("No locations found")
|
66 |
+
return None
|
67 |
+
else:
|
68 |
+
st.error("Error: Unable to fetch location data")
|
69 |
+
return None
|
70 |
+
|
71 |
+
# Function to transcribe audio to text using Whisper
|
72 |
+
def transcribe_audio(audio_bytes):
|
73 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
|
74 |
+
audio = AudioSegment.from_file(BytesIO(audio_bytes), format="wav")
|
75 |
+
audio.export(temp_audio_file.name, format="wav")
|
76 |
+
result = whisper_model.transcribe(temp_audio_file.name)
|
77 |
+
return result["text"]
|
78 |
+
|
79 |
+
# Main function to create the Streamlit app
|
80 |
+
def main():
|
81 |
+
st.title("Healthcare Companion")
|
82 |
+
st.write("Welcome to Healthcare Companion! Your personal healthcare guide.")
|
83 |
+
|
84 |
+
st.header("Speak Your Symptoms")
|
85 |
+
st.write("Press the button and speak your symptoms clearly.")
|
86 |
+
|
87 |
+
# JavaScript code to capture audio
|
88 |
+
js_code = """
|
89 |
+
async function recordAudio() {
|
90 |
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
91 |
+
const mediaRecorder = new MediaRecorder(stream);
|
92 |
+
let audioChunks = [];
|
93 |
+
|
94 |
+
mediaRecorder.ondataavailable = event => {
|
95 |
+
audioChunks.push(event.data);
|
96 |
+
};
|
97 |
+
|
98 |
+
mediaRecorder.onstop = async () => {
|
99 |
+
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
|
100 |
+
const audioBuffer = await audioBlob.arrayBuffer();
|
101 |
+
const audioBase64 = arrayBufferToBase64(audioBuffer);
|
102 |
+
document.getElementById('audio_data').value = audioBase64;
|
103 |
+
document.getElementById('audio_form').submit();
|
104 |
+
};
|
105 |
+
|
106 |
+
mediaRecorder.start();
|
107 |
+
setTimeout(() => mediaRecorder.stop(), 5000); // Record for 5 seconds
|
108 |
+
|
109 |
+
function arrayBufferToBase64(buffer) {
|
110 |
+
let binary = '';
|
111 |
+
const bytes = new Uint8Array(buffer);
|
112 |
+
const len = bytes.byteLength;
|
113 |
+
for (let i = 0; i < len; i++) {
|
114 |
+
binary += String.fromCharCode(bytes[i]);
|
115 |
+
}
|
116 |
+
return window.btoa(binary);
|
117 |
+
}
|
118 |
+
}
|
119 |
+
|
120 |
+
recordAudio();
|
121 |
+
"""
|
122 |
+
|
123 |
+
# Placeholder for audio data
|
124 |
+
#st_js_code = streamlit_js_eval(js_code, key="record_audio")
|
125 |
+
|
126 |
+
# Form to receive audio data from JavaScript
|
127 |
+
with st.form("audio_form", clear_on_submit=True):
|
128 |
+
audio_data = st.text_input("audio_data", type="default")
|
129 |
+
submit_button = st.form_submit_button("Submit")
|
130 |
+
|
131 |
+
if submit_button and audio_data:
|
132 |
+
audio_bytes = BytesIO(base64.b64decode(audio_data))
|
133 |
+
symptoms = transcribe_audio(audio_bytes)
|
134 |
+
st.write(f"**Transcribed symptoms:** {symptoms}")
|
135 |
+
|
136 |
+
if 'symptoms' in locals() and symptoms:
|
137 |
+
st.header("Symptom Checker")
|
138 |
+
st.write("Enter your symptoms below for advice.")
|
139 |
+
|
140 |
+
context = """
|
141 |
+
This is a healthcare question and answer platform. The following text contains typical symptoms, treatments, and medical conditions commonly asked about in healthcare settings.
|
142 |
+
For example, symptoms of COVID-19 include fever, dry cough, and tiredness. Treatment options for hypertension include lifestyle changes and medications. The platform is designed to assist with general medical inquiries.
|
143 |
+
"""
|
144 |
+
payload = {"inputs": {"question": symptoms, "context": context}}
|
145 |
+
result = query(payload)
|
146 |
+
if result:
|
147 |
+
st.write("**Medical Advice:**")
|
148 |
+
answer = result.get('answer', "Sorry, I don't have information on that.")
|
149 |
+
st.write(answer)
|
150 |
+
|
151 |
+
st.header("Locate Nearest Pharmacy/Clinic")
|
152 |
+
st.write("Enter your address to find the nearest pharmacy or clinic.")
|
153 |
+
|
154 |
+
address = st.text_input("Enter your address here:")
|
155 |
+
if address:
|
156 |
+
location = find_nearby_clinics(address)
|
157 |
+
if location:
|
158 |
+
map_ = folium.Map(location=[location[1], location[0]], zoom_start=15)
|
159 |
+
folium.Marker([location[1], location[0]], popup="Your Location").add_to(map_)
|
160 |
+
st_folium(map_, width=700, height=500)
|
161 |
+
st.write(f"**Nearby Clinics/Pharmacies (Coordinates):** {location}")
|
162 |
+
|
163 |
+
st.write("Providing reliable healthcare guidance and information. Please consult a healthcare professional for medical emergencies.")
|
164 |
+
|
165 |
+
if __name__ == "__main__":
|
166 |
+
main()
|
167 |
+
|