|
|
|
"""new.ipynb |
|
|
|
Automatically generated by Colab. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/10l5M_bqlqlmx8isz58qrPCp9YwDOKjiC |
|
""" |
|
"""" |
|
from re import S |
|
!pip install Streamlit |
|
!pip install whisper |
|
!pip install pydub |
|
!pip install mapbox |
|
!pip install openai-whisper pydub streamlit requests geopy |
|
!pip install streamlit-webrtc |
|
"""" |
|
|
|
from google.colab import userdata |
|
userdata.get('secret') |
|
|
|
import streamlit as st |
|
import requests |
|
from geopy.geocoders import Nominatim |
|
import whisper |
|
import tempfile |
|
from pydub import AudioSegment |
|
from io import BytesIO |
|
import base64 |
|
import folium |
|
import requests |
|
from mapbox import Geocoder |
|
import folium |
|
|
|
|
|
# Set your Hugging Face API URL and API key |
|
API_URL = "https://api-inference.huggingface.co/models/dmis-lab/biobert-base-cased-v1.1" |
|
headers = {"Authorization": f"secret"} |
|
|
|
# Initialize Whisper model |
|
whisper_model = whisper.load_model("base") |
|
|
|
# Initialize Mapbox Geocoder with your API key |
|
MAPBOX_API_KEY = "sk.eyJ1IjoibWFyaW9jYWxlYiIsImEiOiJjbHdkZWV3b2wwdnlxMmtwbnhqaHdnNzA3In0.iOhyIXlUFztYtc1iZhSMng" |
|
geocoder = Geocoder(access_token=MAPBOX_API_KEY) |
|
|
|
# Function to query the Hugging Face model |
|
def query(payload): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
if response.status_code == 200: |
|
return response.json() |
|
else: |
|
st.error(f"Error: Unable to fetch response from model (status code: {response.status_code})") |
|
st.error(response.text) |
|
return None |
|
|
|
# Function to find nearby clinics/pharmacies using Mapbox |
|
def find_nearby_clinics(address): |
|
response = geocoder.forward(address) |
|
if response.status_code == 200: |
|
features = response.json()['features'] |
|
if features: |
|
location = features[0]['center'] |
|
return location |
|
else: |
|
st.error("No locations found") |
|
return None |
|
else: |
|
st.error("Error: Unable to fetch location data") |
|
return None |
|
|
|
# Function to transcribe audio to text using Whisper |
|
def transcribe_audio(audio_bytes): |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file: |
|
audio = AudioSegment.from_file(BytesIO(audio_bytes), format="wav") |
|
audio.export(temp_audio_file.name, format="wav") |
|
result = whisper_model.transcribe(temp_audio_file.name) |
|
return result["text"] |
|
|
|
# Main function to create the Streamlit app |
|
def main(): |
|
st.title("Healthcare Companion") |
|
st.write("Welcome to Healthcare Companion! Your personal healthcare guide.") |
|
|
|
st.header("Speak Your Symptoms") |
|
st.write("Press the button and speak your symptoms clearly.") |
|
|
|
# JavaScript code to capture audio |
|
js_code = """ |
|
async function recordAudio() { |
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); |
|
const mediaRecorder = new MediaRecorder(stream); |
|
let audioChunks = []; |
|
|
|
mediaRecorder.ondataavailable = event => { |
|
audioChunks.push(event.data); |
|
}; |
|
|
|
mediaRecorder.onstop = async () => { |
|
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); |
|
const audioBuffer = await audioBlob.arrayBuffer(); |
|
const audioBase64 = arrayBufferToBase64(audioBuffer); |
|
document.getElementById('audio_data').value = audioBase64; |
|
document.getElementById('audio_form').submit(); |
|
}; |
|
|
|
mediaRecorder.start(); |
|
setTimeout(() => mediaRecorder.stop(), 5000); // Record for 5 seconds |
|
|
|
function arrayBufferToBase64(buffer) { |
|
let binary = ''; |
|
const bytes = new Uint8Array(buffer); |
|
const len = bytes.byteLength; |
|
for (let i = 0; i < len; i++) { |
|
binary += String.fromCharCode(bytes[i]); |
|
} |
|
return window.btoa(binary); |
|
} |
|
} |
|
|
|
recordAudio(); |
|
""" |
|
|
|
# Placeholder for audio data |
|
#st_js_code = streamlit_js_eval(js_code, key="record_audio") |
|
|
|
# Form to receive audio data from JavaScript |
|
with st.form("audio_form", clear_on_submit=True): |
|
audio_data = st.text_input("audio_data", type="default") |
|
submit_button = st.form_submit_button("Submit") |
|
|
|
if submit_button and audio_data: |
|
audio_bytes = BytesIO(base64.b64decode(audio_data)) |
|
symptoms = transcribe_audio(audio_bytes) |
|
st.write(f"**Transcribed symptoms:** {symptoms}") |
|
|
|
if 'symptoms' in locals() and symptoms: |
|
st.header("Symptom Checker") |
|
st.write("Enter your symptoms below for advice.") |
|
|
|
context = """ |
|
This is a healthcare question and answer platform. The following text contains typical symptoms, treatments, and medical conditions commonly asked about in healthcare settings. |
|
For example, symptoms of COVID-19 include fever, dry cough, and tiredness. Treatment options for hypertension include lifestyle changes and medications. The platform is designed to assist with general medical inquiries. |
|
""" |
|
payload = {"inputs": {"question": symptoms, "context": context}} |
|
result = query(payload) |
|
if result: |
|
st.write("**Medical Advice:**") |
|
answer = result.get('answer', "Sorry, I don't have information on that.") |
|
st.write(answer) |
|
|
|
st.header("Locate Nearest Pharmacy/Clinic") |
|
st.write("Enter your address to find the nearest pharmacy or clinic.") |
|
|
|
address = st.text_input("Enter your address here:") |
|
if address: |
|
location = find_nearby_clinics(address) |
|
if location: |
|
map_ = folium.Map(location=[location[1], location[0]], zoom_start=15) |
|
folium.Marker([location[1], location[0]], popup="Your Location").add_to(map_) |
|
st_folium(map_, width=700, height=500) |
|
st.write(f"**Nearby Clinics/Pharmacies (Coordinates):** {location}") |
|
|
|
st.write("Providing reliable healthcare guidance and information. Please consult a healthcare professional for medical emergencies.") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|
|
|