import streamlit as st import websockets import asyncio import base64 import json import pyaudio import os from pathlib import Path import numpy as np # Session state if 'text' not in st.session_state: st.session_state['text'] = 'Listening...' st.session_state['run'] = False # Audio parameters st.sidebar.header('Audio Parameters') FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200)) FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = int(st.sidebar.text_input('Rate', 16000)) p = pyaudio.PyAudio() # Open an audio stream stream = p.open( format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=FRAMES_PER_BUFFER ) monitoring = False audio_data = [] def start_monitoring(): global monitoring st.session_state['run'] = True monitoring = True def stop_monitoring(): global monitoring st.session_state['run'] = False monitoring = False st.title('🎙️ Real-Time Snore Detection App') with st.expander('About this App'): st.markdown(''' This streamlit app from Hypermind Labs Helps users detect how much they are snoring during their sleep. ''') col1, col2, col3 = st.columns(2) col1.button('Start', on_click=start_monitoring) col3.button('Stop', on_click=stop_monitoring) record_button = col3.button('Monitor') while st.session_state.run: audio_chunk = stream.read(FRAMES_PER_BUFFER) audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16)) if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER: audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER]) print("PROCESSING BY MODEL GOES HERE") # if model is not None: # input_tensor = torch.tensor(audio_data_chunk) # with torch.no_grad(): # output = model(input_tensor) audio_chunk = audio_chunk[1:] stream.stop_stream() stream.close() p.terminate()