Delete pages/3_audio_filter.py
Browse files- pages/3_audio_filter.py +0 -40
pages/3_audio_filter.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import av
|
2 |
-
import numpy as np
|
3 |
-
import pydub
|
4 |
-
import streamlit as st
|
5 |
-
from streamlit_webrtc import WebRtcMode, webrtc_streamer
|
6 |
-
|
7 |
-
from sample_utils.turn import get_ice_servers
|
8 |
-
|
9 |
-
gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05)
|
10 |
-
|
11 |
-
|
12 |
-
def process_audio(frame: av.AudioFrame) -> av.AudioFrame:
|
13 |
-
raw_samples = frame.to_ndarray()
|
14 |
-
sound = pydub.AudioSegment(
|
15 |
-
data=raw_samples.tobytes(),
|
16 |
-
sample_width=frame.format.bytes,
|
17 |
-
frame_rate=frame.sample_rate,
|
18 |
-
channels=len(frame.layout.channels),
|
19 |
-
)
|
20 |
-
|
21 |
-
sound = sound.apply_gain(gain)
|
22 |
-
|
23 |
-
# Ref: https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples # noqa
|
24 |
-
channel_sounds = sound.split_to_mono()
|
25 |
-
channel_samples = [s.get_array_of_samples() for s in channel_sounds]
|
26 |
-
new_samples: np.ndarray = np.array(channel_samples).T
|
27 |
-
new_samples = new_samples.reshape(raw_samples.shape)
|
28 |
-
|
29 |
-
new_frame = av.AudioFrame.from_ndarray(new_samples, layout=frame.layout.name)
|
30 |
-
new_frame.sample_rate = frame.sample_rate
|
31 |
-
return new_frame
|
32 |
-
|
33 |
-
|
34 |
-
webrtc_streamer(
|
35 |
-
key="audio-filter",
|
36 |
-
mode=WebRtcMode.SENDRECV,
|
37 |
-
rtc_configuration={"iceServers": get_ice_servers()},
|
38 |
-
audio_frame_callback=process_audio,
|
39 |
-
async_processing=True,
|
40 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|