Spaces:
Running
Running
audio caching bug
Browse files
app.py
CHANGED
@@ -69,7 +69,6 @@ def youtube_video_id(value):
|
|
69 |
|
70 |
@st.cache_data
|
71 |
def process_youtube_link(youtube_link):
|
72 |
-
st.write(f"Fetching audio from YouTube: {youtube_link}")
|
73 |
try:
|
74 |
yt = YouTube(youtube_link)
|
75 |
audio_stream = yt.streams.filter(only_audio=True).first()
|
@@ -92,7 +91,7 @@ def process_youtube_link(youtube_link):
|
|
92 |
def load_rttm_file(rttm_path):
|
93 |
return load_rttm(rttm_path)['stream']
|
94 |
|
95 |
-
|
96 |
def load_audio(uploaded_audio):
|
97 |
return AudioSegment.from_file(uploaded_audio)
|
98 |
|
@@ -158,7 +157,7 @@ with st.sidebar:
|
|
158 |
container_transcript_chat = st.container()
|
159 |
|
160 |
# Source Selection
|
161 |
-
option = st.radio("Select source:", [
|
162 |
|
163 |
# Upload audio file
|
164 |
if option == "Upload an audio file":
|
@@ -182,16 +181,20 @@ if option == "Upload an audio file":
|
|
182 |
# use youtube link
|
183 |
elif option == "Use YouTube link":
|
184 |
|
185 |
-
with st.form('youtube-link'
|
186 |
youtube_link_raw = st.text_input("Enter the YouTube video URL:")
|
187 |
youtube_link = f'https://youtu.be/{youtube_video_id(youtube_link_raw)}'
|
188 |
|
189 |
if st.form_submit_button(): # reset variables on new link submit
|
|
|
190 |
st.session_state.messages = initial_prompt
|
191 |
st.session_state.rttm = None
|
192 |
st.session_state.transcript_file = None
|
193 |
st.session_state.prompt_request_counter = 0
|
194 |
|
|
|
|
|
|
|
195 |
# with st.expander('Optional Parameters'):
|
196 |
# st.session_state.rttm = st.file_uploader("Upload .rttm if you already have one", type=["rttm"])
|
197 |
# st.session_state.transcript_file = st.file_uploader("Upload transcipt json", type=["json"])
|
@@ -358,20 +361,20 @@ if "audio" in locals():
|
|
358 |
transcript_string = '\n'.join([f"{s['speaker']} start={s['start']:.1f}s end={s['end']:.1f}s : {s['transcript']}" for s in transcript_json])
|
359 |
|
360 |
@st.cache_data
|
361 |
-
def get_initial_response(
|
362 |
-
st.session_state.messages[1]['content'] = st.session_state.messages[1]['content'].format(transcript)
|
363 |
initial_response = openai.ChatCompletion.create(
|
364 |
model=st.session_state["openai_model"],
|
365 |
-
messages=
|
366 |
)
|
367 |
return initial_response['choices'][0]['message']['content']
|
368 |
|
369 |
# Chat container
|
370 |
with container_transcript_chat:
|
371 |
# get a summary of transcript from ChatGpt
|
372 |
-
init = get_initial_response(transcript_string)
|
373 |
-
# pass transcript to initial prompt
|
374 |
st.session_state.messages[1]['content'] = st.session_state.messages[1]['content'].format(transcript_string)
|
|
|
|
|
|
|
375 |
|
376 |
# LLM Chat
|
377 |
with st.expander('Summary of the Transcribed Audio File Generated by ChatGPT', expanded = True):
|
|
|
69 |
|
70 |
@st.cache_data
|
71 |
def process_youtube_link(youtube_link):
|
|
|
72 |
try:
|
73 |
yt = YouTube(youtube_link)
|
74 |
audio_stream = yt.streams.filter(only_audio=True).first()
|
|
|
91 |
def load_rttm_file(rttm_path):
|
92 |
return load_rttm(rttm_path)['stream']
|
93 |
|
94 |
+
|
95 |
def load_audio(uploaded_audio):
|
96 |
return AudioSegment.from_file(uploaded_audio)
|
97 |
|
|
|
157 |
container_transcript_chat = st.container()
|
158 |
|
159 |
# Source Selection
|
160 |
+
option = st.radio("Select source:", [ "Use YouTube link","Example"], index=1)
|
161 |
|
162 |
# Upload audio file
|
163 |
if option == "Upload an audio file":
|
|
|
181 |
# use youtube link
|
182 |
elif option == "Use YouTube link":
|
183 |
|
184 |
+
with st.form('youtube-link'):
|
185 |
youtube_link_raw = st.text_input("Enter the YouTube video URL:")
|
186 |
youtube_link = f'https://youtu.be/{youtube_video_id(youtube_link_raw)}'
|
187 |
|
188 |
if st.form_submit_button(): # reset variables on new link submit
|
189 |
+
process_youtube_link.clear()
|
190 |
st.session_state.messages = initial_prompt
|
191 |
st.session_state.rttm = None
|
192 |
st.session_state.transcript_file = None
|
193 |
st.session_state.prompt_request_counter = 0
|
194 |
|
195 |
+
with container_transcript_chat:
|
196 |
+
st.empty()
|
197 |
+
|
198 |
# with st.expander('Optional Parameters'):
|
199 |
# st.session_state.rttm = st.file_uploader("Upload .rttm if you already have one", type=["rttm"])
|
200 |
# st.session_state.transcript_file = st.file_uploader("Upload transcipt json", type=["json"])
|
|
|
361 |
transcript_string = '\n'.join([f"{s['speaker']} start={s['start']:.1f}s end={s['end']:.1f}s : {s['transcript']}" for s in transcript_json])
|
362 |
|
363 |
@st.cache_data
|
364 |
+
def get_initial_response(messages):
|
|
|
365 |
initial_response = openai.ChatCompletion.create(
|
366 |
model=st.session_state["openai_model"],
|
367 |
+
messages=messages
|
368 |
)
|
369 |
return initial_response['choices'][0]['message']['content']
|
370 |
|
371 |
# Chat container
|
372 |
with container_transcript_chat:
|
373 |
# get a summary of transcript from ChatGpt
|
|
|
|
|
374 |
st.session_state.messages[1]['content'] = st.session_state.messages[1]['content'].format(transcript_string)
|
375 |
+
init = get_initial_response(st.session_state.messages)
|
376 |
+
# pass transcript to initial prompt
|
377 |
+
|
378 |
|
379 |
# LLM Chat
|
380 |
with st.expander('Summary of the Transcribed Audio File Generated by ChatGPT', expanded = True):
|