Spaces:
Runtime error
Runtime error
File size: 1,370 Bytes
7e634ec 461a633 7e634ec 51b7da3 dcb65b5 7e634ec 461a633 7e634ec 461a633 a8ac236 55e8537 4b7b7a5 dcb65b5 546e581 b2204c7 69963ba b2204c7 461a633 e42b300 69963ba 461a633 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import streamlit as st
import time as t
from transformers import pipeline
from pydub import AudioSegment, silence
#import speech_recognition as sr
#pipe = pipeline('sentiment-analysis')
#text = st.text_area('Enter your notes')
#if text:
# out = pipe(text)
# st.json(out)
st.markdown("<h1 style = text align:center;'> Group Therapy Notes </h1>",unsafe_allow_html = True)
st.markdown("---",unsafe_allow_html=True)
audio=st.file_uploader("Upload Your Audio File", type=['mp3','wav','m4a'])
if audio:
pipe = pipeline(model="facebook/wav2vec2-base-960h")
audio.export("audio.wav", format="wav")
output = pipe("audio.wav", chunk_length_s=10, stride_length_s=(4, 2))
st.json(output)
# stride_length_s is a tuple of the left and right stride length.
# With only 1 number, both sides get the same stride, by default
# the stride_length on one side is 1/6th of the chunk_length_s
# chunk.export(str(index)+".wav", format="wav")
# audio_segment= AudioSegment.from_file(audio)
# chunks=silence.split_on_silence(audio_segment, min_silence_len=500, silence_thresh= audio_segment.dBFS-20,keep_silence=100)
# for index, chunk in enumerate (chunks):
# #output = pipe(audio_segment, chunk_length_s=10, stride_length_s=(4, 2))
# print (chunk)
# st.json("wav")
|