File size: 1,939 Bytes
d008cad
cda85cb
 
 
 
 
 
 
 
d008cad
cda85cb
 
 
 
d008cad
cda85cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
import websockets
import asyncio
import base64
import json
import pyaudio
import os
from pathlib import Path
import numpy as np

# Session state
if 'text' not in st.session_state:
    st.session_state['text'] = 'Listening...'
    st.session_state['run'] = False

# Audio parameters
st.sidebar.header('Audio Parameters')

FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200))
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = int(st.sidebar.text_input('Rate', 16000))
p = pyaudio.PyAudio()

# Open an audio stream
stream = p.open(
    format=FORMAT,
    channels=CHANNELS,
    rate=RATE,
    input=True,
    frames_per_buffer=FRAMES_PER_BUFFER
)

monitoring = False
audio_data = []

def start_monitoring():
    global monitoring
    st.session_state['run'] = True
    monitoring = True

def stop_monitoring():
    global monitoring
    st.session_state['run'] = False
    monitoring = False

st.title('🎙️ Real-Time Snore Detection App')

with st.expander('About this App'):
    st.markdown('''
    This streamlit app from Hypermind Labs Helps users detect
    how much they are snoring during their sleep.
    ''')

col1, col2, col3 = st.columns(2)
col1.button('Start', on_click=start_monitoring)
col3.button('Stop', on_click=stop_monitoring)
record_button = col3.button('Monitor')

while st.session_state.run:
    audio_chunk = stream.read(FRAMES_PER_BUFFER)
    audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16))

    if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER:
        audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER])
        print("PROCESSING BY MODEL GOES HERE")
        # if model is not None:
        #     input_tensor = torch.tensor(audio_data_chunk)
        #     with torch.no_grad():
        #         output = model(input_tensor)
        
        audio_chunk = audio_chunk[1:]

stream.stop_stream()
stream.close()
p.terminate()