AbdullaShafeeg commited on
Commit
94ec7f8
·
1 Parent(s): 83e3810

app update

Browse files
Files changed (1) hide show
  1. app.py +17 -32
app.py CHANGED
@@ -1,12 +1,5 @@
1
  import streamlit as st
2
- import websockets
3
- import asyncio
4
- import base64
5
- import json
6
- import pyaudio
7
- import os
8
- from pathlib import Path
9
- import numpy as np
10
 
11
  # Session state
12
  if 'text' not in st.session_state:
@@ -17,19 +10,11 @@ if 'text' not in st.session_state:
17
  st.sidebar.header('Audio Parameters')
18
 
19
  FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200))
20
- FORMAT = pyaudio.paInt16
21
  CHANNELS = 1
22
  RATE = int(st.sidebar.text_input('Rate', 16000))
23
- p = pyaudio.PyAudio()
24
 
25
  # Open an audio stream
26
- stream = p.open(
27
- format=FORMAT,
28
- channels=CHANNELS,
29
- rate=RATE,
30
- input=True,
31
- frames_per_buffer=FRAMES_PER_BUFFER
32
- )
33
 
34
  monitoring = False
35
  audio_data = []
@@ -58,19 +43,19 @@ col3.button('Stop', on_click=stop_monitoring)
58
  record_button = col3.button('Monitor')
59
 
60
  while st.session_state.run:
61
- audio_chunk = stream.read(FRAMES_PER_BUFFER)
62
- audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16))
63
-
64
- if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER:
65
- audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER])
66
- print("PROCESSING BY MODEL GOES HERE")
67
- # if model is not None:
68
- # input_tensor = torch.tensor(audio_data_chunk)
69
- # with torch.no_grad():
70
- # output = model(input_tensor)
 
 
 
 
71
 
72
- audio_chunk = audio_chunk[1:]
73
-
74
- stream.stop_stream()
75
- stream.close()
76
- p.terminate()
 
1
  import streamlit as st
2
+ from st_audiorec import st_audiorec
 
 
 
 
 
 
 
3
 
4
  # Session state
5
  if 'text' not in st.session_state:
 
10
  st.sidebar.header('Audio Parameters')
11
 
12
  FRAMES_PER_BUFFER = int(st.sidebar.text_input('Frames per buffer', 3200))
13
+ FORMAT = 'audio/wav'
14
  CHANNELS = 1
15
  RATE = int(st.sidebar.text_input('Rate', 16000))
 
16
 
17
  # Open an audio stream
 
 
 
 
 
 
 
18
 
19
  monitoring = False
20
  audio_data = []
 
43
  record_button = col3.button('Monitor')
44
 
45
  while st.session_state.run:
46
+ wav_audio_data = st_audiorec()
47
+ if wav_audio_data is not None:
48
+ st.audio(wav_audio_data, format=FORMAT, sample_rate=16000)
49
+
50
+ # audio_chunk = stream.read(FRAMES_PER_BUFFER)
51
+ # audio_data.append(np.frombuffer(audio_chunk, dtype=np.int16))
52
+
53
+ # if monitoring and len(audio_chunk) >= RATE // FRAMES_PER_BUFFER:
54
+ # audio_data_chunk = np.concatenate(audio_chunk[:RATE//FRAMES_PER_BUFFER])
55
+ # print("PROCESSING BY MODEL GOES HERE")
56
+ # # if model is not None:
57
+ # # input_tensor = torch.tensor(audio_data_chunk)
58
+ # # with torch.no_grad():
59
+ # # output = model(input_tensor)
60
 
61
+ # audio_chunk = audio_chunk[1:]