import streamlit as st from st_audiorec import st_audiorec # Session state if 'text' not in st.session_state: st.session_state['text'] = 'Listening...' st.session_state['run'] = False # Audio parameters st.sidebar.header('Audio Parameters') FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200)) FORMAT = 'audio/wav' CHANNELS = 1 RATE = int(st.sidebar.text_input('Rate', 16000)) # Open an audio stream monitoring = False audio_data = [] def start_monitoring(): global monitoring st.session_state['run'] = True monitoring = True def stop_monitoring(): global monitoring st.session_state['run'] = False monitoring = False st.title('🎙️ Real-Time Snore Detection App') with st.expander('About this App'): st.markdown(''' This streamlit app from Hypermind Labs Helps users detect how much they are snoring during their sleep. ''') col1, col2, col3 = st.columns(3) col1.button('Start', on_click=start_monitoring) col3.button('Stop', on_click=stop_monitoring) record_button = col3.button('Monitor') while st.session_state.run: wav_audio_data = st_audiorec() if wav_audio_data is not None: st.audio(wav_audio_data, format=FORMAT, sample_rate=16000) # audio_chunk = stream.read(FRAMES_PER_BUFFER) # audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16)) # if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER: # audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER]) # print("PROCESSING BY MODEL GOES HERE") # # if model is not None: # # input_tensor = torch.tensor(audio_data_chunk) # # with torch.no_grad(): # # output = model(input_tensor) # audio_chunk = audio_chunk[1:]