Snore-Detector / app.py
AbdullaShafeeg
big update
28cc6c6
raw
history blame
3.59 kB
import streamlit as st
from st_audiorec import st_audiorec
import matplotlib.pyplot as plt
import sounddevice as sd
import numpy as np
import pandas as pd
import torch
import torchaudio
import wave
import io
from scipy.io import wavfile
import pydub
import time
# MODEL LOADING and INITIALISATION
model = torch.jit.load("snorenetv1_small.ptl")
model.eval()
# Audio parameters
st.sidebar.markdown(
"""
<div align="justify">
<h4>ABOUT</h4>
<p>Transform your sleep experience with the cutting-edge Snore Detector by Hypermind Labs!
Discover the power to monitor and understand your nighttime sounds like never before.
Take control of your sleep quality and uncover the secrets of your peaceful slumber with our innovative app.</p>
</div>
""",
unsafe_allow_html=True,
)
st.title('Real-Time Snore Detection App 😴')
upload_file = st.file_uploader("Upload wav file", type=["wav"])
if upload_file is not None:
file_details = {
"Filename": upload_file.name,
"Filesize":f"{upload_file.size / 1024:.2f} KB",
"File Type": upload_file.type,
}
st.write("File Details:", file_details)
# wav_audio_data = None
# if wav_audio_data is not None:
# data = np.frombuffer(wav_audio_data, dtype=np.int16)
# st.write(len(data))
# duration = len(data)//110000
# num_of_samples = len(data)
# sample_rate = num_of_samples // duration
# # data = np.array(wav_audio_data, dtype=float)
# max_abs_value = np.max(np.abs(data))
# np_array = (data/max_abs_value) * 32767
# scaled_data = np_array.astype(np.int16).tobytes()
# with io.BytesIO() as fp, wave.open(fp, mode="wb") as waveobj:
# waveobj.setnchannels(1)
# waveobj.setframerate(96000)
# waveobj.setsampwidth(2)
# waveobj.setcomptype("NONE", "NONE")
# waveobj.writeframes(scaled_data)
# wav_make = fp.getvalue()
# with open("output.wav", 'wb') as wav_file:
# wav_file.write(wav_make)
file_uploaded = False
if file_uploaded == False:
st.write("Uploading File.....")
audio = pydub.AudioSegment.from_wav(upload_file)
file_uploaded = True
time.sleep(2)
st.write("File Uploaded!")
if file_uploaded == True:
st.write("Analysing...")
audio.export(upload_file.name, format='wav')
sr, waveform = wavfile.read(upload_file.name)
snore = 0
other = 0
s=0
n=16000
endReached = False
while(endReached==False):
input_tensor = torch.tensor(waveform[s:n]).unsqueeze(0).to(torch.float32)
result = model(input_tensor)
if np.abs(result[0][0]) > np.abs(result[0][1]):
other += 1
else:
snore += 1
s += 16000
n += 16000
if(n >= len(waveform)):
endReached = True
# PERCENTAGE OF SNORING PLOT
total = snore + other
snore_percentage = (snore / total) * 100
other_percentage = (other / total) * 100
categories = ["Snore", "Other"]
percentages = [snore_percentage, other_percentage]
plt.figure(figsize=(8, 4))
plt.barh(categories, percentages, color=['#ff0033', '#00ffee'])
plt.xlabel('Percentage')
plt.title('Percentage of Snoring')
plt.xlim(0, 100)
for i, percentage in enumerate(percentages):
plt.text(percentage, i, f' {percentage:.2f}%', va='center')
st.write("DONE")
st.pyplot(plt)