Spaces:
Runtime error
Runtime error
on1onmangoes
commited on
Commit
•
461a633
1
Parent(s):
fc9e91e
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
from transformers import pipeline
|
3 |
from pydub import AudioSegment, silence
|
4 |
#import speech_recognition as sr
|
5 |
|
6 |
-
pipe = pipeline('sentiment-analysis')
|
7 |
-
text = st.text_area('Enter your notes')
|
8 |
|
9 |
-
if text:
|
10 |
-
out = pipe(text)
|
11 |
-
st.json(out)
|
12 |
|
13 |
st.markdown("<h1 style = text align:center;'> Group Therapy Notes </h1>",unsafe_allow_html = True)
|
14 |
st.markdown("---",unsafe_allow_html=True)
|
@@ -20,19 +21,11 @@ if audio:
|
|
20 |
# stride_length_s is a tuple of the left and right stride length.
|
21 |
# With only 1 number, both sides get the same stride, by default
|
22 |
# the stride_length on one side is 1/6th of the chunk_length_s
|
23 |
-
|
24 |
-
output = pipe(audio_segment, chunk_length_s=10, stride_length_s=(4, 2))
|
25 |
-
|
26 |
-
|
27 |
-
# st.json(output)
|
28 |
-
|
29 |
-
# asr = pipeline('automatic-speech-recognition')
|
30 |
-
# asr_out = asr (audio_segment)
|
31 |
-
# st.json(asr_out)
|
32 |
-
|
33 |
-
|
34 |
-
# chunks=silence.split_on_silence(audio_segment, min_silence_len=500, silence_thresh= audio_segment.dBFS-20,keep_silence=100)
|
35 |
-
# for index, chunk in enumerate (chunks):
|
36 |
# chunk.export(str(index)+".wav", format="wav")
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import time as t
|
3 |
from transformers import pipeline
|
4 |
from pydub import AudioSegment, silence
|
5 |
#import speech_recognition as sr
|
6 |
|
7 |
+
#pipe = pipeline('sentiment-analysis')
|
8 |
+
#text = st.text_area('Enter your notes')
|
9 |
|
10 |
+
#if text:
|
11 |
+
# out = pipe(text)
|
12 |
+
# st.json(out)
|
13 |
|
14 |
st.markdown("<h1 style = text align:center;'> Group Therapy Notes </h1>",unsafe_allow_html = True)
|
15 |
st.markdown("---",unsafe_allow_html=True)
|
|
|
21 |
# stride_length_s is a tuple of the left and right stride length.
|
22 |
# With only 1 number, both sides get the same stride, by default
|
23 |
# the stride_length on one side is 1/6th of the chunk_length_s
|
24 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
# chunk.export(str(index)+".wav", format="wav")
|
26 |
+
audio_segment= AudioSegment.from_file(audio)
|
27 |
+
chunks=silence.split_on_silence(audio_segment, min_silence_len=500, silence_thresh= audio_segment.dBFS-20,keep_silence=100)
|
28 |
+
for index, chunk in enumerate (chunks):
|
29 |
+
#output = pipe(audio_segment, chunk_length_s=10, stride_length_s=(4, 2))
|
30 |
+
print (chunk)
|
31 |
+
|