File size: 1,796 Bytes
d008cad
94ec7f8
d008cad
cda85cb
 
 
 
d008cad
cda85cb
 
 
 
94ec7f8
cda85cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94ec7f8
 
 
 
 
 
 
 
 
 
 
 
 
 
cda85cb
94ec7f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import streamlit as st
from st_audiorec import st_audiorec

# Session state
if 'text' not in st.session_state:
    st.session_state['text'] = 'Listening...'
    st.session_state['run'] = False

# Audio parameters
st.sidebar.header('Audio Parameters')

FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200))
FORMAT = 'audio/wav'
CHANNELS = 1
RATE = int(st.sidebar.text_input('Rate', 16000))

# Open an audio stream

monitoring = False
audio_data = []

def start_monitoring():
    global monitoring
    st.session_state['run'] = True
    monitoring = True

def stop_monitoring():
    global monitoring
    st.session_state['run'] = False
    monitoring = False

st.title('🎙️ Real-Time Snore Detection App')

with st.expander('About this App'):
    st.markdown('''
    This streamlit app from Hypermind Labs Helps users detect
    how much they are snoring during their sleep.
    ''')

col1, col2, col3 = st.columns(2)
col1.button('Start', on_click=start_monitoring)
col3.button('Stop', on_click=stop_monitoring)
record_button = col3.button('Monitor')

while st.session_state.run:
    wav_audio_data = st_audiorec()
    if wav_audio_data is not None:
        st.audio(wav_audio_data, format=FORMAT, sample_rate=16000)

    # audio_chunk = stream.read(FRAMES_PER_BUFFER)
    # audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16))

    # if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER:
    #     audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER])
    #     print("PROCESSING BY MODEL GOES HERE")
    #     # if model is not None:
    #     #     input_tensor = torch.tensor(audio_data_chunk)
    #     #     with torch.no_grad():
    #     #         output = model(input_tensor)
        
    #     audio_chunk = audio_chunk[1:]