Spaces:
Obotu
/
Build error

File size: 5,097 Bytes
fb132ef
a9fda12
 
 
76025ff
 
 
 
 
a9fda12
fb132ef
a9fda12
76025ff
 
 
 
a9fda12
fb132ef
a9fda12
 
fb132ef
 
 
76025ff
fb132ef
 
 
 
 
 
 
 
 
 
 
 
 
76025ff
 
 
 
 
 
 
 
fb132ef
a9fda12
 
 
 
76025ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb132ef
76025ff
fb132ef
 
 
 
 
 
 
 
76025ff
bfaccb0
76025ff
a9fda12
 
 
 
 
fb132ef
 
a9fda12
 
 
 
 
76025ff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127

import streamlit as st
import requests
from geopy.geocoders import Nominatim
import whisper
import tempfile
from pydub import AudioSegment
from io import BytesIO
from streamlit_js_eval import streamlit_js_eval

# Set your Hugging Face API URL and API key
API_URL = "https://api-inference.huggingface.co/models/dmis-lab/biobert-base-cased-v1.1"
headers = {"Authorization": f"secret"}

# Initialize Whisper model
whisper_model = whisper.load_model("base")

# Function to query the Hugging Face model
def query(payload):
    response = requests.post(API_URL, headers=headers, json=payload)
    if response.status_code == 200:
        return response.json()
    else:
        st.error(f"Error: Unable to fetch response from model (status code: {response.status_code})")
        st.error(response.text)
        return None

# Function to find nearby clinics/pharmacies using geopy
def find_nearby_clinics(address):
    geolocator = Nominatim(user_agent="healthcare_companion")
    location = geolocator.geocode(address)
    if location:
        return (location.latitude, location.longitude)
    else:
        st.error("Error: Address not found")
        return None

# Function to transcribe audio to text using Whisper
def transcribe_audio(audio_bytes):
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
        audio = AudioSegment.from_file(BytesIO(audio_bytes), format="wav")
        audio.export(temp_audio_file.name, format="wav")
        result = whisper_model.transcribe(temp_audio_file.name)
        return result["text"]

# Main function to create the Streamlit app
def main():
    st.title("Healthcare Companion")
    st.write("This app provides healthcare guidance, prescription information, and locates nearby clinics or pharmacies.")

    # JavaScript code to capture audio
    js_code = """
    async function recordAudio() {
        const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
        const mediaRecorder = new MediaRecorder(stream);
        let audioChunks = [];
        
        mediaRecorder.ondataavailable = event => {
            audioChunks.push(event.data);
        };
        
        mediaRecorder.onstop = async () => {
            const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
            const audioBuffer = await audioBlob.arrayBuffer();
            const audioBase64 = arrayBufferToBase64(audioBuffer);
            document.getElementById('audio_data').value = audioBase64;
            document.getElementById('audio_form').submit();
        };
        
        mediaRecorder.start();
        setTimeout(() => mediaRecorder.stop(), 5000);  // Record for 5 seconds
        
        function arrayBufferToBase64(buffer) {
            let binary = '';
            const bytes = new Uint8Array(buffer);
            const len = bytes.byteLength;
            for (let i = 0; i < len; i++) {
                binary += String.fromCharCode(bytes[i]);
            }
            return window.btoa(binary);
        }
    }
    
    recordAudio();
    """
    
    # Placeholder for audio data
    st_js_code = streamlit_js_eval(js_code, key="record_audio")
    
    # Form to receive audio data from JavaScript
    with st.form("audio_form", clear_on_submit=True):
        audio_data = st.text_input("audio_data", type="hidden")
        submit_button = st.form_submit_button("Submit")
        
        if submit_button and audio_data:
            audio_bytes = BytesIO(base64.b64decode(audio_data))
            symptoms = transcribe_audio(audio_bytes)
            st.write(f"Transcribed symptoms: {symptoms}")
    
    if 'symptoms' in locals() and symptoms:
        context = """
        This is a healthcare question and answer platform. The following text contains typical symptoms, treatments, and medical conditions commonly asked about in healthcare settings.
        For example, symptoms of COVID-19 include fever, dry cough, and tiredness. Treatment options for hypertension include lifestyle changes and medications. The platform is designed to assist with general medical inquiries.
        """
        payload = {"inputs": {"question": symptoms, "context": context}}
        st.write(f"Debug: Payload sent to model: {payload}")  # Debugging: Check payload
        result = query(payload)
        st.write(f"Debug: Response from model: {result}")  # Debugging: Check response
        if result:
            st.write("**Medical Advice:**")
            # Check the response structure and extract the answer appropriately
            answer = result.get('answer') if 'answer' in result else "Sorry, how about i contact a doctor."
            st.write(answer)

    # User input for address to find nearby clinics/pharmacies
    address = st.text_input("Enter your address to find nearby clinics/pharmacies:")
    if address:
        location = find_nearby_clinics(address)
        if location:
            st.write(f"**Nearby Clinics/Pharmacies (Coordinates):** {location}")

    # Additional features like prescription info can be added similarly

if __name__ == "__main__":
    main()