Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -167,7 +167,8 @@ def generate_filename(prompt, response, file_type="md"):
|
|
167 |
Generate filename with meaningful terms and short dense clips from prompt & response.
|
168 |
The filename should be about 150 chars total, include high-info terms, and a clipped snippet.
|
169 |
"""
|
170 |
-
|
|
|
171 |
combined = (prompt + " " + response).strip()
|
172 |
info_terms = get_high_info_terms(combined, top_n=10)
|
173 |
|
@@ -193,6 +194,24 @@ def create_file(prompt, response, file_type="md"):
|
|
193 |
f.write(prompt + "\n\n" + response)
|
194 |
return filename
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
def get_download_link(file, file_type="zip"):
|
197 |
"""Generate download link for file"""
|
198 |
with open(file, "rb") as f:
|
@@ -212,6 +231,7 @@ def get_download_link(file, file_type="zip"):
|
|
212 |
def clean_for_speech(text: str) -> str:
|
213 |
"""Clean text for speech synthesis"""
|
214 |
text = text.replace("\n", " ")
|
|
|
215 |
text = text.replace("</s>", " ")
|
216 |
text = text.replace("#", "")
|
217 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
@@ -239,7 +259,7 @@ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=
|
|
239 |
rate_str = f"{rate:+d}%"
|
240 |
pitch_str = f"{pitch:+d}Hz"
|
241 |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
|
242 |
-
out_fn = generate_filename(text, text,
|
243 |
await communicate.save(out_fn)
|
244 |
return out_fn
|
245 |
|
@@ -317,9 +337,16 @@ def process_video_with_gpt(video_path, prompt):
|
|
317 |
|
318 |
# 🤖 9. AI Model Integration
|
319 |
|
320 |
-
def save_full_transcript(query, text):
|
321 |
"""Save full transcript of Arxiv results as a file."""
|
322 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
|
324 |
def parse_arxiv_refs(ref_text: str):
|
325 |
"""
|
@@ -388,6 +415,7 @@ def create_paper_audio_files(papers, input_question):
|
|
388 |
"""
|
389 |
# Collect all content for combined summary
|
390 |
combined_titles = []
|
|
|
391 |
|
392 |
for paper in papers:
|
393 |
try:
|
@@ -398,6 +426,7 @@ def create_paper_audio_files(papers, input_question):
|
|
398 |
file_format = st.session_state['audio_format']
|
399 |
full_file = speak_with_edge_tts(full_text, voice=st.session_state['tts_voice'], file_format=file_format)
|
400 |
paper['full_audio'] = full_file
|
|
|
401 |
|
402 |
# Display the audio immediately after generation
|
403 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} {os.path.basename(full_file)}")
|
@@ -417,6 +446,9 @@ def create_paper_audio_files(papers, input_question):
|
|
417 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} Combined Summary Audio")
|
418 |
play_and_download_audio(combined_file, file_type=file_format)
|
419 |
papers.append({'title': 'Combined Summary', 'full_audio': combined_file})
|
|
|
|
|
|
|
420 |
|
421 |
def display_papers(papers):
|
422 |
"""
|
@@ -460,8 +492,9 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
460 |
|
461 |
# Parse and process papers
|
462 |
papers = parse_arxiv_refs(refs)
|
|
|
463 |
if papers:
|
464 |
-
create_paper_audio_files(papers, input_question=q)
|
465 |
display_papers(papers)
|
466 |
else:
|
467 |
st.warning("No papers found in the response.")
|
@@ -470,7 +503,9 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
470 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
471 |
|
472 |
# Save full transcript
|
473 |
-
create_file(q, result, "md")
|
|
|
|
|
474 |
return result
|
475 |
|
476 |
def process_with_gpt(text):
|
@@ -488,7 +523,9 @@ def process_with_gpt(text):
|
|
488 |
)
|
489 |
ans = c.choices[0].message.content
|
490 |
st.write("GPT-4o: " + ans)
|
491 |
-
create_file(text, ans, "md")
|
|
|
|
|
492 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
493 |
return ans
|
494 |
|
@@ -506,16 +543,19 @@ def process_with_claude(text):
|
|
506 |
)
|
507 |
ans = r.content[0].text
|
508 |
st.write("Claude-3.5: " + ans)
|
509 |
-
create_file(text, ans, "md")
|
|
|
|
|
510 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
511 |
return ans
|
512 |
|
513 |
# 📂 10. File Management
|
514 |
def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
515 |
"""Create zip with intelligent naming based on top 10 common words."""
|
516 |
-
# Exclude '
|
517 |
-
md_files = [f for f in md_files if os.path.basename(f).lower() != '
|
518 |
all_files = md_files + mp3_files + wav_files
|
|
|
519 |
if not all_files:
|
520 |
return None
|
521 |
|
@@ -537,7 +577,7 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
|
537 |
combined_content = " ".join(all_content)
|
538 |
info_terms = get_high_info_terms(combined_content, top_n=10)
|
539 |
|
540 |
-
timestamp = datetime.now().strftime("%y%
|
541 |
name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:10])
|
542 |
zip_name = f"{timestamp}_{name_text}.zip"
|
543 |
|
@@ -547,93 +587,134 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
|
547 |
|
548 |
return zip_name
|
549 |
|
550 |
-
def
|
551 |
-
"""Load and
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
if f.endswith(".md"):
|
577 |
-
c = open(f,'r',encoding='utf-8').read()
|
578 |
-
text += " " + c
|
579 |
-
return get_high_info_terms(text, top_n=5)
|
580 |
-
|
581 |
-
def display_file_manager_sidebar(groups_sorted):
|
582 |
"""Display file manager in sidebar"""
|
583 |
st.sidebar.title("🎵 Audio & Docs Manager")
|
584 |
|
585 |
-
|
586 |
-
|
587 |
-
all_wav = [] # 🆕 List to hold WAV files
|
588 |
-
for group_name, files in groups_sorted:
|
589 |
-
for f in files:
|
590 |
-
if f.endswith(".md"):
|
591 |
-
all_md.append(f)
|
592 |
-
elif f.endswith(".mp3"):
|
593 |
-
all_mp3.append(f)
|
594 |
-
elif f.endswith(".wav"):
|
595 |
-
all_wav.append(f) # 🆕 Append WAV files
|
596 |
-
|
597 |
top_bar = st.sidebar.columns(4) # 🆕 Adjusted columns to accommodate WAV
|
598 |
with top_bar[0]:
|
599 |
if st.button("🗑 DelAllMD"):
|
600 |
-
|
|
|
|
|
601 |
os.remove(f)
|
602 |
st.session_state.should_rerun = True
|
603 |
with top_bar[1]:
|
604 |
if st.button("🗑 DelAllMP3"):
|
605 |
-
|
|
|
606 |
os.remove(f)
|
607 |
st.session_state.should_rerun = True
|
608 |
with top_bar[2]:
|
609 |
if st.button("🗑 DelAllWAV"):
|
610 |
-
|
|
|
611 |
os.remove(f)
|
612 |
st.session_state.should_rerun = True
|
613 |
with top_bar[3]:
|
614 |
if st.button("⬇️ ZipAll"):
|
615 |
-
|
|
|
|
|
|
|
|
|
616 |
if zip_name:
|
617 |
st.sidebar.markdown(get_download_link(zip_name, file_type="zip"), unsafe_allow_html=True)
|
618 |
|
619 |
-
for
|
620 |
-
|
621 |
-
with st.sidebar.expander(f"{FILE_EMOJIS.get('md', '')} {group_name}
|
622 |
c1,c2 = st.columns(2)
|
623 |
with c1:
|
624 |
-
if st.button("👀ViewGrp", key="view_group_"+
|
625 |
-
st.session_state.viewing_prefix =
|
626 |
with c2:
|
627 |
-
if st.button("🗑DelGrp", key="del_group_"+
|
628 |
-
|
629 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
st.success(f"Deleted group {group_name}!")
|
631 |
st.session_state.should_rerun = True
|
632 |
|
633 |
-
for f in files:
|
634 |
fname = os.path.basename(f)
|
635 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
636 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
637 |
|
638 |
# 🎯 11. Main Application
|
639 |
def main():
|
@@ -726,7 +807,8 @@ def main():
|
|
726 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
727 |
titles_summary=titles_summary, full_audio=full_audio)
|
728 |
if full_transcript:
|
729 |
-
|
|
|
730 |
|
731 |
st.markdown("### Change Prompt & Re-Run")
|
732 |
q_new = st.text_input("🔄 Modify Query:")
|
@@ -735,7 +817,8 @@ def main():
|
|
735 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
736 |
titles_summary=titles_summary, full_audio=full_audio)
|
737 |
if full_transcript:
|
738 |
-
|
|
|
739 |
|
740 |
elif tab_main == "🎤 Voice":
|
741 |
st.subheader("🎤 Voice Input")
|
@@ -795,34 +878,16 @@ def main():
|
|
795 |
st.write("Select a file from the sidebar to edit.")
|
796 |
|
797 |
# Load and display files in the sidebar
|
798 |
-
|
799 |
-
display_file_manager_sidebar(groups_sorted)
|
800 |
-
|
801 |
-
if st.session_state.viewing_prefix and any(st.session_state.viewing_prefix == group for group, _ in groups_sorted):
|
802 |
-
st.write("---")
|
803 |
-
st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
|
804 |
-
for group_name, files in groups_sorted:
|
805 |
-
if group_name == st.session_state.viewing_prefix:
|
806 |
-
for f in files:
|
807 |
-
fname = os.path.basename(f)
|
808 |
-
ext = os.path.splitext(fname)[1].lower().strip('.')
|
809 |
-
st.write(f"### {fname}")
|
810 |
-
if ext == "md":
|
811 |
-
content = open(f,'r',encoding='utf-8').read()
|
812 |
-
st.markdown(content)
|
813 |
-
elif ext == "mp3":
|
814 |
-
st.audio(f)
|
815 |
-
elif ext == "wav":
|
816 |
-
st.audio(f) # 🆕 Handle WAV files
|
817 |
-
else:
|
818 |
-
st.markdown(get_download_link(f), unsafe_allow_html=True)
|
819 |
-
break
|
820 |
-
if st.button("❌ Close"):
|
821 |
-
st.session_state.viewing_prefix = None
|
822 |
|
|
|
|
|
|
|
|
|
|
|
823 |
markdownPapers = """
|
824 |
|
825 |
-
|
826 |
|
827 |
## 1. Performance (rows) x Generality (columns)
|
828 |
- **Narrow**
|
@@ -897,7 +962,7 @@ def main():
|
|
897 |
- *Reference:* Stockfish (2023). **Stockfish Chess Engine**. [Website](https://stockfishchess.org)
|
898 |
- **Artificial Superintelligence (ASI)**
|
899 |
- Not yet achieved
|
900 |
-
|
901 |
|
902 |
# 🧬 Innovative Architecture of AlphaFold2: A Hybrid System
|
903 |
|
@@ -944,7 +1009,7 @@ def main():
|
|
944 |
|
945 |
"""
|
946 |
st.sidebar.markdown(markdownPapers)
|
947 |
-
|
948 |
if st.session_state.should_rerun:
|
949 |
st.session_state.should_rerun = False
|
950 |
st.rerun()
|
|
|
167 |
Generate filename with meaningful terms and short dense clips from prompt & response.
|
168 |
The filename should be about 150 chars total, include high-info terms, and a clipped snippet.
|
169 |
"""
|
170 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
171 |
+
prefix = f"{timestamp}_"
|
172 |
combined = (prompt + " " + response).strip()
|
173 |
info_terms = get_high_info_terms(combined, top_n=10)
|
174 |
|
|
|
194 |
f.write(prompt + "\n\n" + response)
|
195 |
return filename
|
196 |
|
197 |
+
def append_to_transcript(entry: dict):
|
198 |
+
"""Append a new entry at the top of transcript.md"""
|
199 |
+
transcript_file = "transcript.md"
|
200 |
+
new_entry = f"1. **Input:** {entry['input']}\n\n **Output:** {entry['output']}\n\n **Files:**\n"
|
201 |
+
for file in entry['files']:
|
202 |
+
emoji = FILE_EMOJIS.get(file.split('.')[-1], '')
|
203 |
+
new_entry += f" - {emoji} [{os.path.basename(file)}]({file})\n"
|
204 |
+
new_entry += "\n---\n\n"
|
205 |
+
|
206 |
+
if os.path.exists(transcript_file):
|
207 |
+
with open(transcript_file, 'r', encoding='utf-8') as f:
|
208 |
+
existing_content = f.read()
|
209 |
+
else:
|
210 |
+
existing_content = ""
|
211 |
+
|
212 |
+
with open(transcript_file, 'w', encoding='utf-8') as f:
|
213 |
+
f.write(new_entry + existing_content)
|
214 |
+
|
215 |
def get_download_link(file, file_type="zip"):
|
216 |
"""Generate download link for file"""
|
217 |
with open(file, "rb") as f:
|
|
|
231 |
def clean_for_speech(text: str) -> str:
|
232 |
"""Clean text for speech synthesis"""
|
233 |
text = text.replace("\n", " ")
|
234 |
+
text = text.replace("\r", " ")
|
235 |
text = text.replace("</s>", " ")
|
236 |
text = text.replace("#", "")
|
237 |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
|
|
|
259 |
rate_str = f"{rate:+d}%"
|
260 |
pitch_str = f"{pitch:+d}Hz"
|
261 |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
|
262 |
+
out_fn = generate_filename(text, text, file_format=file_format)
|
263 |
await communicate.save(out_fn)
|
264 |
return out_fn
|
265 |
|
|
|
337 |
|
338 |
# 🤖 9. AI Model Integration
|
339 |
|
340 |
+
def save_full_transcript(query, text, files):
|
341 |
"""Save full transcript of Arxiv results as a file."""
|
342 |
+
# Sanitize query by replacing carriage returns and line feeds
|
343 |
+
sanitized_query = query.replace('\r', ' ').replace('\n', ' ').strip()
|
344 |
+
entry = {
|
345 |
+
'input': sanitized_query,
|
346 |
+
'output': text.replace('\r', ' ').replace('\n', ' ').strip(),
|
347 |
+
'files': files
|
348 |
+
}
|
349 |
+
append_to_transcript(entry)
|
350 |
|
351 |
def parse_arxiv_refs(ref_text: str):
|
352 |
"""
|
|
|
415 |
"""
|
416 |
# Collect all content for combined summary
|
417 |
combined_titles = []
|
418 |
+
created_files = []
|
419 |
|
420 |
for paper in papers:
|
421 |
try:
|
|
|
426 |
file_format = st.session_state['audio_format']
|
427 |
full_file = speak_with_edge_tts(full_text, voice=st.session_state['tts_voice'], file_format=file_format)
|
428 |
paper['full_audio'] = full_file
|
429 |
+
created_files.append(full_file)
|
430 |
|
431 |
# Display the audio immediately after generation
|
432 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} {os.path.basename(full_file)}")
|
|
|
446 |
st.write(f"### {FILE_EMOJIS.get(file_format, '')} Combined Summary Audio")
|
447 |
play_and_download_audio(combined_file, file_type=file_format)
|
448 |
papers.append({'title': 'Combined Summary', 'full_audio': combined_file})
|
449 |
+
created_files.append(combined_file)
|
450 |
+
|
451 |
+
return created_files
|
452 |
|
453 |
def display_papers(papers):
|
454 |
"""
|
|
|
492 |
|
493 |
# Parse and process papers
|
494 |
papers = parse_arxiv_refs(refs)
|
495 |
+
created_files = []
|
496 |
if papers:
|
497 |
+
created_files = create_paper_audio_files(papers, input_question=q)
|
498 |
display_papers(papers)
|
499 |
else:
|
500 |
st.warning("No papers found in the response.")
|
|
|
503 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
504 |
|
505 |
# Save full transcript
|
506 |
+
create_md_output = create_file(q, result, "md")
|
507 |
+
created_files.append(create_md_output)
|
508 |
+
save_full_transcript(q, result, created_files)
|
509 |
return result
|
510 |
|
511 |
def process_with_gpt(text):
|
|
|
523 |
)
|
524 |
ans = c.choices[0].message.content
|
525 |
st.write("GPT-4o: " + ans)
|
526 |
+
create_md_output = create_file(text, ans, "md")
|
527 |
+
created_files = [create_md_output]
|
528 |
+
save_full_transcript(text, ans, created_files)
|
529 |
st.session_state.messages.append({"role":"assistant","content":ans})
|
530 |
return ans
|
531 |
|
|
|
543 |
)
|
544 |
ans = r.content[0].text
|
545 |
st.write("Claude-3.5: " + ans)
|
546 |
+
create_md_output = create_file(text, ans, "md")
|
547 |
+
created_files = [create_md_output]
|
548 |
+
save_full_transcript(text, ans, created_files)
|
549 |
st.session_state.chat_history.append({"user":text,"claude":ans})
|
550 |
return ans
|
551 |
|
552 |
# 📂 10. File Management
|
553 |
def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
554 |
"""Create zip with intelligent naming based on top 10 common words."""
|
555 |
+
# Exclude 'transcript.md'
|
556 |
+
md_files = [f for f in md_files if os.path.basename(f).lower() != 'transcript.md']
|
557 |
all_files = md_files + mp3_files + wav_files
|
558 |
+
|
559 |
if not all_files:
|
560 |
return None
|
561 |
|
|
|
577 |
combined_content = " ".join(all_content)
|
578 |
info_terms = get_high_info_terms(combined_content, top_n=10)
|
579 |
|
580 |
+
timestamp = datetime.now().strftime("%y%m%d%H%M%S")
|
581 |
name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:10])
|
582 |
zip_name = f"{timestamp}_{name_text}.zip"
|
583 |
|
|
|
587 |
|
588 |
return zip_name
|
589 |
|
590 |
+
def load_transcript():
|
591 |
+
"""Load and parse transcript.md"""
|
592 |
+
transcript_file = "transcript.md"
|
593 |
+
if not os.path.exists(transcript_file):
|
594 |
+
return []
|
595 |
+
|
596 |
+
with open(transcript_file, 'r', encoding='utf-8') as f:
|
597 |
+
content = f.read()
|
598 |
+
|
599 |
+
entries = content.split('\n---\n\n')
|
600 |
+
parsed_entries = []
|
601 |
+
for entry in entries:
|
602 |
+
if not entry.strip():
|
603 |
+
continue
|
604 |
+
match_input = re.search(r'\*\*Input:\*\* (.+)', entry)
|
605 |
+
match_output = re.search(r'\*\*Output:\*\* (.+)', entry)
|
606 |
+
files = re.findall(r'- (.+) \[(.+)\]\((.+)\)', entry)
|
607 |
+
if match_input and match_output:
|
608 |
+
parsed_entries.append({
|
609 |
+
'input': match_input.group(1),
|
610 |
+
'output': match_output.group(1),
|
611 |
+
'files': [file[2] for file in files]
|
612 |
+
})
|
613 |
+
return parsed_entries
|
614 |
+
|
615 |
+
def display_file_manager_sidebar():
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
"""Display file manager in sidebar"""
|
617 |
st.sidebar.title("🎵 Audio & Docs Manager")
|
618 |
|
619 |
+
groups_sorted = load_transcript()
|
620 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
621 |
top_bar = st.sidebar.columns(4) # 🆕 Adjusted columns to accommodate WAV
|
622 |
with top_bar[0]:
|
623 |
if st.button("🗑 DelAllMD"):
|
624 |
+
md_files = glob.glob("*.md")
|
625 |
+
md_files = [f for f in md_files if os.path.basename(f).lower() != 'transcript.md']
|
626 |
+
for f in md_files:
|
627 |
os.remove(f)
|
628 |
st.session_state.should_rerun = True
|
629 |
with top_bar[1]:
|
630 |
if st.button("🗑 DelAllMP3"):
|
631 |
+
mp3_files = glob.glob("*.mp3")
|
632 |
+
for f in mp3_files:
|
633 |
os.remove(f)
|
634 |
st.session_state.should_rerun = True
|
635 |
with top_bar[2]:
|
636 |
if st.button("🗑 DelAllWAV"):
|
637 |
+
wav_files = glob.glob("*.wav") # 🆕 Load WAV files
|
638 |
+
for f in wav_files:
|
639 |
os.remove(f)
|
640 |
st.session_state.should_rerun = True
|
641 |
with top_bar[3]:
|
642 |
if st.button("⬇️ ZipAll"):
|
643 |
+
md_files = glob.glob("*.md")
|
644 |
+
md_files = [f for f in md_files if os.path.basename(f).lower() != 'transcript.md']
|
645 |
+
mp3_files = glob.glob("*.mp3")
|
646 |
+
wav_files = glob.glob("*.wav") # 🆕 Load WAV files
|
647 |
+
zip_name = create_zip_of_files(md_files, mp3_files, wav_files, input_question=st.session_state.get('last_query', ''))
|
648 |
if zip_name:
|
649 |
st.sidebar.markdown(get_download_link(zip_name, file_type="zip"), unsafe_allow_html=True)
|
650 |
|
651 |
+
for idx, entry in enumerate(groups_sorted, 1):
|
652 |
+
group_name = f"Query {idx}: {entry['input'][:50]}..." # Truncate for display
|
653 |
+
with st.sidebar.expander(f"{FILE_EMOJIS.get('md', '')} {group_name}", expanded=False):
|
654 |
c1,c2 = st.columns(2)
|
655 |
with c1:
|
656 |
+
if st.button("👀ViewGrp", key="view_group_"+str(idx)):
|
657 |
+
st.session_state.viewing_prefix = idx
|
658 |
with c2:
|
659 |
+
if st.button("🗑DelGrp", key="del_group_"+str(idx)):
|
660 |
+
# Delete associated files
|
661 |
+
for f in entry['files']:
|
662 |
+
if os.path.exists(f):
|
663 |
+
os.remove(f)
|
664 |
+
# Reload transcript without this entry
|
665 |
+
all_entries = load_transcript()
|
666 |
+
del all_entries[idx-1]
|
667 |
+
# Rewrite transcript.md
|
668 |
+
transcript_file = "transcript.md"
|
669 |
+
with open(transcript_file, 'w', encoding='utf-8') as tf:
|
670 |
+
for i, e in enumerate(all_entries, 1):
|
671 |
+
new_entry = f"1. **Input:** {e['input']}\n\n **Output:** {e['output']}\n\n **Files:**\n"
|
672 |
+
for file in e['files']:
|
673 |
+
emoji = FILE_EMOJIS.get(file.split('.')[-1], '')
|
674 |
+
new_entry += f" - {emoji} [{os.path.basename(file)}]({file})\n"
|
675 |
+
new_entry += "\n---\n\n"
|
676 |
+
tf.write(new_entry)
|
677 |
st.success(f"Deleted group {group_name}!")
|
678 |
st.session_state.should_rerun = True
|
679 |
|
680 |
+
for f in entry['files']:
|
681 |
fname = os.path.basename(f)
|
682 |
ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
|
683 |
+
st.markdown(f"**{fname}** - {ctime}")
|
684 |
+
|
685 |
+
def extract_keywords_from_md(files):
|
686 |
+
"""Extract keywords from markdown files"""
|
687 |
+
text = ""
|
688 |
+
for f in files:
|
689 |
+
if f.endswith(".md"):
|
690 |
+
c = open(f,'r',encoding='utf-8').read()
|
691 |
+
text += " " + c
|
692 |
+
return get_high_info_terms(text, top_n=5)
|
693 |
+
|
694 |
+
def display_viewing_group(group_idx):
|
695 |
+
"""Display the contents of a viewing group"""
|
696 |
+
groups_sorted = load_transcript()
|
697 |
+
if group_idx < 1 or group_idx > len(groups_sorted):
|
698 |
+
st.error("Invalid group selected.")
|
699 |
+
return
|
700 |
+
entry = groups_sorted[group_idx-1]
|
701 |
+
st.write("---")
|
702 |
+
st.write(f"**Viewing Group {group_idx}:** {entry['input']}")
|
703 |
+
for f in entry['files']:
|
704 |
+
fname = os.path.basename(f)
|
705 |
+
ext = os.path.splitext(fname)[1].lower().strip('.')
|
706 |
+
st.write(f"### {fname}")
|
707 |
+
if ext == "md":
|
708 |
+
content = open(f,'r',encoding='utf-8').read()
|
709 |
+
st.markdown(content)
|
710 |
+
elif ext == "mp3":
|
711 |
+
st.audio(f)
|
712 |
+
elif ext == "wav":
|
713 |
+
st.audio(f) # 🆕 Handle WAV files
|
714 |
+
else:
|
715 |
+
st.markdown(get_download_link(f), unsafe_allow_html=True)
|
716 |
+
if st.button("❌ Close"):
|
717 |
+
st.session_state.viewing_prefix = None
|
718 |
|
719 |
# 🎯 11. Main Application
|
720 |
def main():
|
|
|
807 |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
808 |
titles_summary=titles_summary, full_audio=full_audio)
|
809 |
if full_transcript:
|
810 |
+
# The transcript is already being handled in perform_ai_lookup
|
811 |
+
pass
|
812 |
|
813 |
st.markdown("### Change Prompt & Re-Run")
|
814 |
q_new = st.text_input("🔄 Modify Query:")
|
|
|
817 |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
|
818 |
titles_summary=titles_summary, full_audio=full_audio)
|
819 |
if full_transcript:
|
820 |
+
# The transcript is already being handled in perform_ai_lookup
|
821 |
+
pass
|
822 |
|
823 |
elif tab_main == "🎤 Voice":
|
824 |
st.subheader("🎤 Voice Input")
|
|
|
878 |
st.write("Select a file from the sidebar to edit.")
|
879 |
|
880 |
# Load and display files in the sidebar
|
881 |
+
display_file_manager_sidebar()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
882 |
|
883 |
+
# Display viewing group if selected
|
884 |
+
if st.session_state.viewing_prefix:
|
885 |
+
display_viewing_group(st.session_state.viewing_prefix)
|
886 |
+
|
887 |
+
# Additional Markdown in sidebar
|
888 |
markdownPapers = """
|
889 |
|
890 |
+
# Levels of AGI
|
891 |
|
892 |
## 1. Performance (rows) x Generality (columns)
|
893 |
- **Narrow**
|
|
|
962 |
- *Reference:* Stockfish (2023). **Stockfish Chess Engine**. [Website](https://stockfishchess.org)
|
963 |
- **Artificial Superintelligence (ASI)**
|
964 |
- Not yet achieved
|
965 |
+
|
966 |
|
967 |
# 🧬 Innovative Architecture of AlphaFold2: A Hybrid System
|
968 |
|
|
|
1009 |
|
1010 |
"""
|
1011 |
st.sidebar.markdown(markdownPapers)
|
1012 |
+
|
1013 |
if st.session_state.should_rerun:
|
1014 |
st.session_state.should_rerun = False
|
1015 |
st.rerun()
|