Spaces:
Runtime error
Runtime error
File size: 2,495 Bytes
d008cad 94ec7f8 063e974 99cbfbc 63beaa0 ab9c217 a406b0b b2a4dc4 6c938cd 28cc6c6 8cd428d 0c0344f 4db3eab 6c938cd f758117 d1170f5 e2c4c0e d1170f5 e2c4c0e 6f7d7a7 e2c4c0e 0532438 8f888de b75a31b 8f888de b75a31b 8f888de 75b78d9 cf4974d ab9c217 32bfd0d b75a31b db5b28f b75a31b 8f888de 26e6e62 9ca3c5c db5b28f b75a31b 26e6e62 ab9c217 75b78d9 99cbfbc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import streamlit as st
from st_audiorec import st_audiorec
import matplotlib.pyplot as plt
import sounddevice as sd
import numpy as np
import pandas as pd
import torch
# import torchaudio
import wave
import io
from scipy.io import wavfile
import pydub
import time
import os
import atexit
import librosa
# MODEL LOADING and INITIALISATION
def load_model():
model = torch.jit.load("snorenetv1_small.ptl")
model.eval()
return model
model = load_model()
# Audio parameters
def process_data(waveform_chunks):
snore = 0
other = 0
for chunk in waveform_chunks:
input_tensor = torch.tensor(chunk).unsqueeze(0).to(torch.float32)
# st.write(input_tensor[0][98])
result = model(input_tensor)
# st.write(result)
if np.abs(result[0][0]) > np.abs(result[0][1]):
other += 1
else:
snore += 1
return snore, other
st.sidebar.markdown(
"""
<div align="justify">
<h4>ABOUT</h4>
<p>Transform your sleep experience with the cutting-edge Snore Detector by Hypermind Labs!
Discover the power to monitor and understand your nighttime sounds like never before.
Take control of your sleep quality and uncover the secrets of your peaceful slumber with our innovative app.</p>
</div>
""",
unsafe_allow_html=True,
)
st.title('Real-Time Snore Detection App 😴')
uploaded_file = st.file_uploader("Upload Sample", type=["wav"])
if uploaded_file is not None:
st.write("Analsysing...")
audio_bytes = uploaded_file.getvalue()
audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
chunk_size = 16000
num_chunks = len(audio_array) // chunk_size
waveform_chunks = np.array_split(audio_array[:num_chunks * chunk_size], num_chunks)
snore, other = process_data(waveform_chunks)
total = snore + other
snore_percentage = (snore / total) * 100
other_percentage = (other / total) * 100
categories = ["Snore", "Other"]
percentages = [snore_percentage, other_percentage]
st.write(f'Snore Percentage: {snore_percentage}')
plt.figure(figsize=(8, 4))
plt.barh(categories, percentages, color=['#ff0033', '#00ffee'])
plt.xlabel('Percentage')
plt.title('Percentage of Snoring')
plt.xlim(0, 100)
for i, percentage in enumerate(percentages):
plt.text(percentage, i, f' {percentage:.2f}%', va='center')
st.write("DONE")
st.pyplot(plt)
# # PERCENTAGE OF SNORING PLOT
|