Spaces:
Runtime error
Runtime error
File size: 2,730 Bytes
d008cad 94ec7f8 063e974 99cbfbc 63beaa0 ab9c217 a406b0b b2a4dc4 6c938cd 28cc6c6 8cd428d 0c0344f 4db3eab 6c938cd f758117 d1170f5 ab9c217 0532438 ab9c217 d008cad cda85cb 8f888de d1170f5 0532438 8f888de 47906a4 8f888de 29da33e 8f888de 47906a4 8f888de 75b78d9 cf4974d ab9c217 32bfd0d d1170f5 4db3eab db5b28f 9ed9129 db5b28f 9ed9129 8f888de 26e6e62 9ca3c5c db5b28f 26e6e62 ab9c217 75b78d9 99cbfbc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import streamlit as st
from st_audiorec import st_audiorec
import matplotlib.pyplot as plt
import sounddevice as sd
import numpy as np
import pandas as pd
import torch
# import torchaudio
import wave
import io
from scipy.io import wavfile
import pydub
import time
import os
import atexit
import librosa
# MODEL LOADING and INITIALISATION
endReached = False
s=0
n=16000
# Audio parameters
def process_data(waveform_chunks):
model = torch.jit.load("snorenetv1_small.ptl")
model.eval()
snore = 0
other = 0
st.write("Reached stage 4")
for chunk in waveform_chunks:
st.write("Reached stage 5")
input_tensor = torch.tensor(chunk).unsqueeze(0)
st.write("Reached stage 6")
st.write(input_tensor.shape)
result = model(input_tensor)
st.write(result)
if np.abs(result[0][0]) > np.abs(result[0][1]):
other += 1
else:
snore += 1
return snore, other
st.sidebar.markdown(
"""
<div align="justify">
<h4>ABOUT</h4>
<p>Transform your sleep experience with the cutting-edge Snore Detector by Hypermind Labs!
Discover the power to monitor and understand your nighttime sounds like never before.
Take control of your sleep quality and uncover the secrets of your peaceful slumber with our innovative app.</p>
</div>
""",
unsafe_allow_html=True,
)
st.title('Real-Time Snore Detection App 😴')
uploaded_file = st.file_uploader("Upload Sample", type=["wav"])
if uploaded_file is not None:
st.write("Analsysing...")
# time.sleep(2.5)
audio, sample_rate = librosa.load(uploaded_file, sr=None)
waveform = audio
# Set the chunk size
chunk_size = 16000
st.write("Reached stage 2")
# Calculate the number of chunks
num_chunks = len(waveform) // chunk_size
# Reshape the waveform into chunks
waveform_chunks = np.array_split(waveform[:num_chunks * chunk_size], num_chunks)
st.write("Reached stage 3")
snore, other = process_data(waveform_chunks)
total = snore + other
snore_percentage = (snore / total) * 100
other_percentage = (other / total) * 100
categories = ["Snore", "Other"]
percentages = [snore_percentage, other_percentage]
st.write(f'Snore Percentage: {snore_percentage}')
# plt.figure(figsize=(8, 4))
# plt.barh(categories, percentages, color=['#ff0033', '#00ffee'])
# plt.xlabel('Percentage')
# plt.title('Percentage of Snoring')
# plt.xlim(0, 100)
# for i, percentage in enumerate(percentages):
# plt.text(percentage, i, f' {percentage:.2f}%', va='center')
# st.write("DONE")
# st.pyplot(plt)
# # PERCENTAGE OF SNORING PLOT
|