Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -72,6 +72,8 @@ if 'should_rerun' not in st.session_state:
|
|
72 |
st.session_state['should_rerun'] = False
|
73 |
if 'old_val' not in st.session_state:
|
74 |
st.session_state['old_val'] = None
|
|
|
|
|
75 |
|
76 |
# ๐จ 4. Custom CSS
|
77 |
st.markdown("""
|
@@ -179,7 +181,7 @@ def create_file(prompt, response, file_type="md"):
|
|
179 |
"""Create file with intelligent naming"""
|
180 |
filename = generate_filename(prompt.strip(), response.strip(), file_type)
|
181 |
with open(filename, 'w', encoding='utf-8') as f:
|
182 |
-
f.write(prompt + "\n\n" + response)
|
183 |
return filename
|
184 |
|
185 |
def get_download_link(file):
|
@@ -347,9 +349,10 @@ def parse_arxiv_refs(ref_text: str):
|
|
347 |
'summary': summary,
|
348 |
'year': year
|
349 |
})
|
|
|
|
|
350 |
return results
|
351 |
|
352 |
-
|
353 |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
354 |
titles_summary=True, full_audio=False, selected_voice="en-US-AriaNeural"):
|
355 |
"""Perform Arxiv search and generate audio summaries."""
|
@@ -361,12 +364,12 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
361 |
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
|
362 |
|
363 |
# ๐ฏ 2) Combine for final text output
|
364 |
-
result = f"### ๐ {q}\n\n{r2}\n\n{refs}"
|
365 |
st.markdown(result)
|
366 |
|
367 |
# ๐ฏ 3) Generate "all at once" audio if requested
|
368 |
if full_audio:
|
369 |
-
complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
|
370 |
audio_file_full = speak_with_edge_tts(complete_text, selected_voice)
|
371 |
st.write("### ๐ Full Audio")
|
372 |
play_and_download_audio(audio_file_full)
|
@@ -389,35 +392,32 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
389 |
# --------------------------------------
|
390 |
parsed_refs = parse_arxiv_refs(refs)
|
391 |
|
392 |
-
# Sort by year descending (put None at bottom)
|
393 |
-
parsed_refs.sort(key=lambda x: x["year"] if x["year"] else 0, reverse=True)
|
394 |
-
|
395 |
st.write("## Individual Papers (Most Recent First)")
|
396 |
for idx, paper in enumerate(parsed_refs):
|
397 |
-
|
398 |
-
st.
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
|
417 |
# Keep your original block for "Titles Only" if you want:
|
418 |
if titles_summary:
|
419 |
titles = []
|
420 |
-
for line in refs.split('\n'):
|
421 |
m = re.search(r"\[([^\]]+)\]", line)
|
422 |
if m:
|
423 |
titles.append(m.group(1))
|
@@ -432,7 +432,7 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
432 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
433 |
|
434 |
# Always create a file with the result
|
435 |
-
create_file(q, result, "md")
|
436 |
|
437 |
return result
|
438 |
|
@@ -588,7 +588,7 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
|
|
588 |
# ๐ฏ 11. Main Application
|
589 |
async def get_available_voices():
|
590 |
voices = await edge_tts.list_voices()
|
591 |
-
return [voice["ShortName"] for voice in voices]
|
592 |
|
593 |
@st.cache_resource
|
594 |
def fetch_voices():
|
@@ -661,7 +661,7 @@ def main():
|
|
661 |
|
662 |
if tab_main == "๐ ArXiv":
|
663 |
st.subheader("๐ Query ArXiv")
|
664 |
-
q = st.text_input("๐ Query:")
|
665 |
|
666 |
st.markdown("### ๐ Options")
|
667 |
vocal_summary = st.checkbox("๐ShortAudio", value=True)
|
@@ -679,7 +679,7 @@ def main():
|
|
679 |
save_full_transcript(q, result)
|
680 |
|
681 |
st.markdown("### Change Prompt & Re-Run")
|
682 |
-
q_new = st.text_input("๐ Modify Query:")
|
683 |
if q_new and st.button("๐ Re-Run with Modified Query"):
|
684 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
685 |
titles_summary=titles_summary, full_audio=full_audio, selected_voice=selected_voice)
|
@@ -711,7 +711,7 @@ def main():
|
|
711 |
if imgs:
|
712 |
c = st.slider("Cols",1,5,3)
|
713 |
cols = st.columns(c)
|
714 |
-
for i,f in enumerate(imgs):
|
715 |
with cols[i%c]:
|
716 |
st.image(Image.open(f),use_container_width=True)
|
717 |
if st.button(f"๐ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
|
@@ -720,7 +720,7 @@ def main():
|
|
720 |
else:
|
721 |
st.write("No images found.")
|
722 |
with tabs[1]:
|
723 |
-
vids = glob.glob("*.mp4")
|
724 |
if vids:
|
725 |
for v in vids:
|
726 |
with st.expander(f"๐ฅ {os.path.basename(v)}"):
|
|
|
72 |
st.session_state['should_rerun'] = False
|
73 |
if 'old_val' not in st.session_state:
|
74 |
st.session_state['old_val'] = None
|
75 |
+
if 'audio_generated' not in st.session_state:
|
76 |
+
st.session_state['audio_generated'] = {}
|
77 |
|
78 |
# ๐จ 4. Custom CSS
|
79 |
st.markdown("""
|
|
|
181 |
"""Create file with intelligent naming"""
|
182 |
filename = generate_filename(prompt.strip(), response.strip(), file_type)
|
183 |
with open(filename, 'w', encoding='utf-8') as f:
|
184 |
+
f.write(prompt.replace('\n', ' ') + "\n\n" + response)
|
185 |
return filename
|
186 |
|
187 |
def get_download_link(file):
|
|
|
349 |
'summary': summary,
|
350 |
'year': year
|
351 |
})
|
352 |
+
if len(results) >= 20:
|
353 |
+
break
|
354 |
return results
|
355 |
|
|
|
356 |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
357 |
titles_summary=True, full_audio=False, selected_voice="en-US-AriaNeural"):
|
358 |
"""Perform Arxiv search and generate audio summaries."""
|
|
|
364 |
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
|
365 |
|
366 |
# ๐ฏ 2) Combine for final text output
|
367 |
+
result = f"### ๐ {q.replace('\n', ' ')}\n\n{r2}\n\n{refs}"
|
368 |
st.markdown(result)
|
369 |
|
370 |
# ๐ฏ 3) Generate "all at once" audio if requested
|
371 |
if full_audio:
|
372 |
+
complete_text = f"Complete response for query: {q.replace('\n', ' ')}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
|
373 |
audio_file_full = speak_with_edge_tts(complete_text, selected_voice)
|
374 |
st.write("### ๐ Full Audio")
|
375 |
play_and_download_audio(audio_file_full)
|
|
|
392 |
# --------------------------------------
|
393 |
parsed_refs = parse_arxiv_refs(refs)
|
394 |
|
|
|
|
|
|
|
395 |
st.write("## Individual Papers (Most Recent First)")
|
396 |
for idx, paper in enumerate(parsed_refs):
|
397 |
+
section_key = f"section_{idx}"
|
398 |
+
with st.expander(f"{idx+1}. {paper['title']} - {paper['year'] if paper['year'] else 'Unknown Year'}", expanded=False, key=section_key):
|
399 |
+
st.markdown(f"**Summary:** {paper['summary']}")
|
400 |
+
colA, colB = st.columns(2)
|
401 |
+
with colA:
|
402 |
+
if st.checkbox(f"Generate Audio for Title", key=f"gen_title_{idx}"):
|
403 |
+
if f"title_audio_{idx}" not in st.session_state['audio_generated']:
|
404 |
+
text_tts = clean_for_speech(paper['title'])
|
405 |
+
audio_file_title = speak_with_edge_tts(text_tts, selected_voice)
|
406 |
+
st.session_state['audio_generated'][f"title_audio_{idx}"] = audio_file_title
|
407 |
+
play_and_download_audio(st.session_state['audio_generated'].get(f"title_audio_{idx}"))
|
408 |
+
with colB:
|
409 |
+
if st.checkbox(f"Generate Audio for Title + Summary", key=f"gen_summary_{idx}"):
|
410 |
+
if f"summary_audio_{idx}" not in st.session_state['audio_generated']:
|
411 |
+
text_tts = clean_for_speech(paper['title'] + ". " + paper['summary'])
|
412 |
+
audio_file_title_summary = speak_with_edge_tts(text_tts, selected_voice)
|
413 |
+
st.session_state['audio_generated'][f"summary_audio_{idx}"] = audio_file_title_summary
|
414 |
+
play_and_download_audio(st.session_state['audio_generated'].get(f"summary_audio_{idx}"))
|
415 |
+
st.write("---")
|
416 |
|
417 |
# Keep your original block for "Titles Only" if you want:
|
418 |
if titles_summary:
|
419 |
titles = []
|
420 |
+
for line in refs.split('\n')[:20]:
|
421 |
m = re.search(r"\[([^\]]+)\]", line)
|
422 |
if m:
|
423 |
titles.append(m.group(1))
|
|
|
432 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
433 |
|
434 |
# Always create a file with the result
|
435 |
+
create_file(q.replace('\n', ' '), result, "md")
|
436 |
|
437 |
return result
|
438 |
|
|
|
588 |
# ๐ฏ 11. Main Application
|
589 |
async def get_available_voices():
|
590 |
voices = await edge_tts.list_voices()
|
591 |
+
return [voice["ShortName"] for voice in voices if voice["Locale"].startswith("en")]
|
592 |
|
593 |
@st.cache_resource
|
594 |
def fetch_voices():
|
|
|
661 |
|
662 |
if tab_main == "๐ ArXiv":
|
663 |
st.subheader("๐ Query ArXiv")
|
664 |
+
q = st.text_input("๐ Query:").replace('\n', ' ')
|
665 |
|
666 |
st.markdown("### ๐ Options")
|
667 |
vocal_summary = st.checkbox("๐ShortAudio", value=True)
|
|
|
679 |
save_full_transcript(q, result)
|
680 |
|
681 |
st.markdown("### Change Prompt & Re-Run")
|
682 |
+
q_new = st.text_input("๐ Modify Query:").replace('\n', ' ')
|
683 |
if q_new and st.button("๐ Re-Run with Modified Query"):
|
684 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
685 |
titles_summary=titles_summary, full_audio=full_audio, selected_voice=selected_voice)
|
|
|
711 |
if imgs:
|
712 |
c = st.slider("Cols",1,5,3)
|
713 |
cols = st.columns(c)
|
714 |
+
for i,f in enumerate(imgs[:20]):
|
715 |
with cols[i%c]:
|
716 |
st.image(Image.open(f),use_container_width=True)
|
717 |
if st.button(f"๐ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
|
|
|
720 |
else:
|
721 |
st.write("No images found.")
|
722 |
with tabs[1]:
|
723 |
+
vids = glob.glob("*.mp4")[:20]
|
724 |
if vids:
|
725 |
for v in vids:
|
726 |
with st.expander(f"๐ฅ {os.path.basename(v)}"):
|