HyperMind-Labs commited on
Commit
cda85cb
·
1 Parent(s): d008cad

Updated code for testing

Browse files
Files changed (1) hide show
  1. app.py +73 -2
app.py CHANGED
@@ -1,5 +1,76 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
2
 
 
 
 
 
3
 
4
- x = st.slider('Select a value')
5
- st.write(x, 'squared is', x*x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import websockets
3
+ import asyncio
4
+ import base64
5
+ import json
6
+ import pyaudio
7
+ import os
8
+ from pathlib import Path
9
+ import numpy as np
10
 
11
+ # Session state
12
+ if 'text' not in st.session_state:
13
+ st.session_state['text'] = 'Listening...'
14
+ st.session_state['run'] = False
15
 
16
+ # Audio parameters
17
+ st.sidebar.header('Audio Parameters')
18
+
19
+ FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200))
20
+ FORMAT = pyaudio.paInt16
21
+ CHANNELS = 1
22
+ RATE = int(st.sidebar.text_input('Rate', 16000))
23
+ p = pyaudio.PyAudio()
24
+
25
+ # Open an audio stream
26
+ stream = p.open(
27
+ format=FORMAT,
28
+ channels=CHANNELS,
29
+ rate=RATE,
30
+ input=True,
31
+ frames_per_buffer=FRAMES_PER_BUFFER
32
+ )
33
+
34
+ monitoring = False
35
+ audio_data = []
36
+
37
+ def start_monitoring():
38
+ global monitoring
39
+ st.session_state['run'] = True
40
+ monitoring = True
41
+
42
+ def stop_monitoring():
43
+ global monitoring
44
+ st.session_state['run'] = False
45
+ monitoring = False
46
+
47
+ st.title('🎙️ Real-Time Snore Detection App')
48
+
49
+ with st.expander('About this App'):
50
+ st.markdown('''
51
+ This streamlit app from Hypermind Labs Helps users detect
52
+ how much they are snoring during their sleep.
53
+ ''')
54
+
55
+ col1, col2, col3 = st.columns(2)
56
+ col1.button('Start', on_click=start_monitoring)
57
+ col3.button('Stop', on_click=stop_monitoring)
58
+ record_button = col3.button('Monitor')
59
+
60
+ while st.session_state.run:
61
+ audio_chunk = stream.read(FRAMES_PER_BUFFER)
62
+ audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16))
63
+
64
+ if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER:
65
+ audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER])
66
+ print("PROCESSING BY MODEL GOES HERE")
67
+ # if model is not None:
68
+ # input_tensor = torch.tensor(audio_data_chunk)
69
+ # with torch.no_grad():
70
+ # output = model(input_tensor)
71
+
72
+ audio_chunk = audio_chunk[1:]
73
+
74
+ stream.stop_stream()
75
+ stream.close()
76
+ p.terminate()