awacke1 commited on
Commit
0cc168b
Β·
verified Β·
1 Parent(s): b6fb714

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -133
app.py CHANGED
@@ -106,6 +106,16 @@ if 'last_query' not in st.session_state:
106
  if 'marquee_content' not in st.session_state:
107
  st.session_state['marquee_content'] = "πŸš€ Welcome to TalkingAIResearcher | πŸ€– Your Research Assistant"
108
 
 
 
 
 
 
 
 
 
 
 
109
  # API Keys
110
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
111
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
@@ -295,18 +305,23 @@ def play_and_download_audio(file_path, file_type="mp3"):
295
  st.markdown(dl_link, unsafe_allow_html=True)
296
 
297
  def save_qa_with_audio(question, answer, voice=None):
298
- """Save Q&A to markdown and also generate audio."""
299
  if not voice:
300
  voice = st.session_state['tts_voice']
301
 
302
  combined_text = f"# Question\n{question}\n\n# Answer\n{answer}"
303
- md_file = create_file(question, answer, "md")
304
- audio_text = f"{question}\n\nAnswer: {answer}"
305
- audio_file = speak_with_edge_tts(
306
- audio_text,
307
- voice=voice,
308
- file_format=st.session_state['audio_format']
309
- )
 
 
 
 
 
310
  return md_file, audio_file
311
 
312
  # ─────────────────────────────────────────────────────────
@@ -377,9 +392,12 @@ def create_paper_links_md(papers):
377
 
378
  def create_paper_audio_files(papers, input_question):
379
  """
380
- For each paper, generate TTS audio summary, store the path in `paper['full_audio']`,
381
- and also store a base64 link for stable downloading.
 
382
  """
 
 
383
  for paper in papers:
384
  try:
385
  audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
@@ -479,97 +497,95 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
479
  # 5. MAIN LOGIC: AI LOOKUP & VOICE INPUT
480
  # ─────────────────────────────────────────────────────────
481
 
482
- def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
483
- titles_summary=True, full_audio=False):
484
- """Main routine that uses Anthropic (Claude) + Gradio ArXiv RAG pipeline."""
485
- start = time.time()
486
- ai_constitution = """
487
- You are a talented AI coder and songwriter...
488
- """
489
-
490
- # --- 1) Claude API
491
  client = anthropic.Anthropic(api_key=anthropic_key)
492
- user_input = q
493
  response = client.messages.create(
494
  model="claude-3-sonnet-20240229",
495
  max_tokens=1000,
496
  messages=[
497
- {"role": "user", "content": user_input}
498
  ])
499
- st.write("Claude's reply 🧠:")
500
- st.markdown(response.content[0].text)
501
-
502
- # Save & produce audio and text result
503
  result = response.content[0].text
504
- create_file(q, result)
505
- md_file, audio_file = save_qa_with_audio(q, result)
506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
 
508
- st.subheader("πŸ“ Claude Response Audio")
509
- play_and_download_audio(audio_file, st.session_state['audio_format'])
510
-
511
- if st.checkbox("Run Arxiv Research AI"):
512
- # --- 2) Arxiv RAG
513
- st.write("Arxiv's AI this Evening is Mixtral 8x7B...")
514
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
515
- refs = client.predict(
516
- q,
517
- #20,
518
- 10,
519
- "Semantic Search",
520
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
521
- api_name="/update_with_rag_md"
522
- )[0]
523
-
524
- r2 = client.predict(
525
- q,
526
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
527
- True,
528
- api_name="/ask_llm"
529
- )
530
-
531
- result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
532
- md_file, audio_file = save_qa_with_audio(q, result)
533
- st.subheader("πŸ“ Main Response Audio")
534
- play_and_download_audio(audio_file, st.session_state['audio_format'])
535
 
536
- # --- 3) Parse + handle papers
537
- papers = parse_arxiv_refs(refs)
538
- if papers:
539
- # Create minimal links page first
 
 
 
 
 
540
  paper_links = create_paper_links_md(papers)
541
- links_file = create_file(q, paper_links, "md")
542
- st.markdown(paper_links)
 
 
 
 
 
 
 
 
 
 
 
 
 
543
 
544
- # Then create audio for each paper
545
- create_paper_audio_files(papers, input_question=q)
546
- display_papers(papers, get_marquee_settings())
547
- display_papers_in_sidebar(papers)
548
- else:
549
- st.warning("No papers found in the response.")
550
 
551
- elapsed = time.time() - start
552
- st.write(f"**Total Elapsed:** {elapsed:.2f} s")
553
-
554
- return result
 
555
 
556
  def process_voice_input(text):
557
- """When user sends voice query, we run the AI lookup + Q&A with audio."""
558
  if not text:
559
  return
560
  st.subheader("πŸ” Search Results")
561
- result = perform_ai_lookup(
562
- text,
563
- vocal_summary=True,
564
- extended_refs=False,
565
- titles_summary=True,
566
- full_audio=True
567
- )
568
  md_file, audio_file = save_qa_with_audio(text, result)
569
- st.subheader("πŸ“ Generated Files")
570
- st.write(f"Markdown: {md_file}")
571
- st.write(f"Audio: {audio_file}")
572
- play_and_download_audio(audio_file, st.session_state['audio_format'])
 
 
 
 
 
573
 
574
  # ─────────────────────────────────────────────────────────
575
  # 6. FILE HISTORY SIDEBAR
@@ -588,7 +604,7 @@ def display_file_history_in_sidebar():
588
  with col1:
589
  if st.button("πŸ—‘οΈ Delete All"):
590
  # Delete all files except README.md
591
- for pattern in ["*.md", "*.mp3", "*.mp4"]:
592
  for f in glob.glob(pattern):
593
  if f.lower() != "readme.md":
594
  try:
@@ -600,16 +616,20 @@ def display_file_history_in_sidebar():
600
  with col2:
601
  # Get all files for potential zip
602
  md_files = [f for f in glob.glob("*.md") if f.lower() != "readme.md"]
603
- mp4_files = glob.glob("*.mp4")
604
- if md_files or mp4_files:
605
- # Use last query if available, otherwise generic name
 
606
  zip_name = "Download.zip"
607
  if 'last_query' in st.session_state and st.session_state['last_query']:
608
- zip_name = f"{clean_text_for_filename(st.session_state['last_query'])[:30]}.zip"
609
-
 
 
 
610
  if st.button("πŸ“¦ Download All"):
611
  with zipfile.ZipFile(zip_name, 'w') as z:
612
- for f in md_files + mp4_files:
613
  z.write(f)
614
  st.sidebar.markdown(get_download_link(zip_name), unsafe_allow_html=True)
615
 
@@ -623,8 +643,23 @@ def display_file_history_in_sidebar():
623
  st.sidebar.write("No files found.")
624
  return
625
 
626
-
627
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
628
 
629
  # ─────────────────────────────────────────────────────────
630
  # 7. MAIN APP
@@ -635,16 +670,25 @@ def main():
635
  update_marquee_settings_ui()
636
  marquee_settings = get_marquee_settings()
637
 
638
- # 2) Display the marquee welcome
 
 
 
 
 
 
 
 
 
 
 
 
 
639
  display_marquee(st.session_state['marquee_content'],
640
  {**marquee_settings, "font-size": "28px", "lineHeight": "50px"},
641
  key_suffix="welcome")
642
 
643
- # 3) Main action tabs
644
- tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"],
645
- horizontal=True)
646
-
647
- # Example custom component usage
648
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
649
  val = mycomponent(my_input_value="Hello")
650
 
@@ -652,52 +696,33 @@ def main():
652
  val_stripped = val.replace('\\n', ' ')
653
  edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
654
  run_option = st.selectbox("Model:", ["Arxiv"])
655
- col1, col2 = st.columns(2)
656
- with col1:
657
- autorun = st.checkbox("βš™ AutoRun", value=True)
658
- with col2:
659
- full_audio = st.checkbox("πŸ“šFullAudio", value=False)
660
-
661
  input_changed = (val != st.session_state.old_val)
662
 
663
- if autorun and input_changed:
 
664
  st.session_state.old_val = val
665
  st.session_state.last_query = edited_input
666
- perform_ai_lookup(edited_input,
667
- vocal_summary=True,
668
- extended_refs=False,
669
- titles_summary=True,
670
- full_audio=full_audio)
671
  else:
672
  if st.button("β–Ά Run"):
673
  st.session_state.old_val = val
674
  st.session_state.last_query = edited_input
675
- perform_ai_lookup(edited_input,
676
- vocal_summary=True,
677
- extended_refs=False,
678
- titles_summary=True,
679
- full_audio=full_audio)
680
-
681
  # ─────────────────────────────────────────────────────────
682
  # TAB: ArXiv
 
683
  # ─────────────────────────────────────────────────────────
684
  if tab_main == "πŸ” ArXiv":
685
  st.subheader("πŸ” Query ArXiv")
686
  q = st.text_input("πŸ” Query:", key="arxiv_query")
687
 
688
- st.markdown("### πŸŽ› Options")
689
- vocal_summary = st.checkbox("πŸŽ™ShortAudio", value=True, key="option_vocal_summary")
690
- extended_refs = st.checkbox("πŸ“œLongRefs", value=False, key="option_extended_refs")
691
- titles_summary = st.checkbox("πŸ”–TitlesOnly", value=True, key="option_titles_summary")
692
- full_audio = st.checkbox("πŸ“šFullAudio", value=False, key="option_full_audio")
693
- full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
694
-
695
  if q and st.button("πŸ”Run"):
696
  st.session_state.last_query = q
697
- result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
698
- titles_summary=titles_summary, full_audio=full_audio)
699
- if full_transcript:
700
- create_file(q, result, "md")
701
 
702
  # ─────────────────────────────────────────────────────────
703
  # TAB: Voice
@@ -710,13 +735,15 @@ def main():
710
  "Select TTS Voice:",
711
  options=EDGE_TTS_VOICES,
712
  index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
 
 
713
  )
714
 
715
  st.markdown("### πŸ”Š Audio Format")
716
  selected_format = st.radio(
717
  "Choose Audio Format:",
718
  options=["MP3", "WAV"],
719
- index=0
720
  )
721
 
722
  # Update session state if voice/format changes
@@ -727,11 +754,10 @@ def main():
727
  st.session_state['audio_format'] = selected_format.lower()
728
  st.rerun()
729
 
730
- # Input text
731
- user_text = st.text_area("πŸ’¬ Message:", height=100)
732
- user_text = user_text.strip().replace('\n', ' ')
733
-
734
  if st.button("πŸ“¨ Send"):
 
 
735
  process_voice_input(user_text)
736
 
737
  st.subheader("πŸ“œ Chat History")
@@ -744,8 +770,6 @@ def main():
744
  # ─────────────────────────────────────────────────────────
745
  elif tab_main == "πŸ“Έ Media":
746
  st.header("πŸ“Έ Media Gallery")
747
-
748
- # By default, show audio first
749
  tabs = st.tabs(["🎡 Audio", "πŸ–Ό Images", "πŸŽ₯ Video"])
750
 
751
  # AUDIO sub-tab
 
106
  if 'marquee_content' not in st.session_state:
107
  st.session_state['marquee_content'] = "πŸš€ Welcome to TalkingAIResearcher | πŸ€– Your Research Assistant"
108
 
109
+ # To track user checkboxes for Claude / Arxiv / Autorun / AutoSave
110
+ if 'run_claude' not in st.session_state:
111
+ st.session_state['run_claude'] = True # enabled by default
112
+ if 'run_arxiv' not in st.session_state:
113
+ st.session_state['run_arxiv'] = False # disabled by default
114
+ if 'autorun_searches' not in st.session_state:
115
+ st.session_state['autorun_searches'] = False
116
+ if 'autosave_output' not in st.session_state:
117
+ st.session_state['autosave_output'] = False
118
+
119
  # API Keys
120
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
121
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
 
305
  st.markdown(dl_link, unsafe_allow_html=True)
306
 
307
  def save_qa_with_audio(question, answer, voice=None):
308
+ """Save Q&A to markdown and also generate audio, returning file paths."""
309
  if not voice:
310
  voice = st.session_state['tts_voice']
311
 
312
  combined_text = f"# Question\n{question}\n\n# Answer\n{answer}"
313
+ md_file = None
314
+ audio_file = None
315
+
316
+ # Only create the files if autosave is enabled
317
+ if st.session_state['autosave_output']:
318
+ md_file = create_file(question, answer, "md")
319
+ audio_text = f"{question}\n\nAnswer: {answer}"
320
+ audio_file = speak_with_edge_tts(
321
+ audio_text,
322
+ voice=voice,
323
+ file_format=st.session_state['audio_format']
324
+ )
325
  return md_file, audio_file
326
 
327
  # ─────────────────────────────────────────────────────────
 
392
 
393
  def create_paper_audio_files(papers, input_question):
394
  """
395
+ For each paper, generate TTS audio summary if autosave is on,
396
+ store the path in `paper['full_audio']`,
397
+ and store a base64 link for stable downloading.
398
  """
399
+ if not st.session_state['autosave_output']:
400
+ return
401
  for paper in papers:
402
  try:
403
  audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
 
497
  # 5. MAIN LOGIC: AI LOOKUP & VOICE INPUT
498
  # ─────────────────────────────────────────────────────────
499
 
500
+ def run_claude_search(q):
501
+ """Call Anthropic (Claude) for the user's query."""
 
 
 
 
 
 
 
502
  client = anthropic.Anthropic(api_key=anthropic_key)
 
503
  response = client.messages.create(
504
  model="claude-3-sonnet-20240229",
505
  max_tokens=1000,
506
  messages=[
507
+ {"role": "user", "content": q}
508
  ])
 
 
 
 
509
  result = response.content[0].text
 
 
510
 
511
+ st.write("### Claude's reply 🧠:")
512
+ st.markdown(result)
513
+ return result
514
+
515
+ def run_arxiv_search(q):
516
+ """Call the Arxiv RAG pipeline for the user's query."""
517
+ st.write("### Arxiv's AI: Mixtral 8x7B RAG")
518
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
519
+ refs = client.predict(
520
+ q,
521
+ 10, # topK
522
+ "Semantic Search",
523
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
524
+ api_name="/update_with_rag_md"
525
+ )[0]
526
 
527
+ r2 = client.predict(
528
+ q,
529
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
530
+ True,
531
+ api_name="/ask_llm"
532
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
+ # Combine the final result
535
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
536
+ st.markdown(result)
537
+
538
+ # Parse + handle papers
539
+ papers = parse_arxiv_refs(refs)
540
+ if papers:
541
+ # Create minimal links page if autosave is on
542
+ if st.session_state['autosave_output']:
543
  paper_links = create_paper_links_md(papers)
544
+ create_file(q, paper_links, "md")
545
+
546
+ create_paper_audio_files(papers, input_question=q)
547
+ display_papers(papers, get_marquee_settings())
548
+ display_papers_in_sidebar(papers)
549
+ else:
550
+ st.warning("No papers found in the response.")
551
+ return result
552
+
553
+ def perform_selections(q):
554
+ """
555
+ Checks which search(s) are enabled and runs them in sequence,
556
+ returning a combined response string.
557
+ """
558
+ combined_response = ""
559
 
560
+ if st.session_state['run_claude']:
561
+ claude_response = run_claude_search(q)
562
+ combined_response += "\n\nCLAUDE:\n" + claude_response
 
 
 
563
 
564
+ if st.session_state['run_arxiv']:
565
+ arxiv_response = run_arxiv_search(q)
566
+ combined_response += "\n\nARXIV:\n" + arxiv_response
567
+
568
+ return combined_response
569
 
570
  def process_voice_input(text):
571
+ """When user sends voice query, we run whichever searches are enabled."""
572
  if not text:
573
  return
574
  st.subheader("πŸ” Search Results")
575
+ # 1) Run the searches user has checked
576
+ result = perform_selections(text)
577
+
578
+ # 2) If autosave is turned on, store Q&A output
 
 
 
579
  md_file, audio_file = save_qa_with_audio(text, result)
580
+
581
+ # 3) If we saved TTS audio, play it
582
+ if audio_file:
583
+ st.subheader("πŸ“ Generated Audio")
584
+ play_and_download_audio(audio_file, st.session_state['audio_format'])
585
+
586
+ # 4) Refresh the sidebar file listing if autosave was used
587
+ if st.session_state['autosave_output']:
588
+ display_file_history_in_sidebar()
589
 
590
  # ─────────────────────────────────────────────────────────
591
  # 6. FILE HISTORY SIDEBAR
 
604
  with col1:
605
  if st.button("πŸ—‘οΈ Delete All"):
606
  # Delete all files except README.md
607
+ for pattern in ["*.md", "*.mp3", "*.wav", "*.mp4"]:
608
  for f in glob.glob(pattern):
609
  if f.lower() != "readme.md":
610
  try:
 
616
  with col2:
617
  # Get all files for potential zip
618
  md_files = [f for f in glob.glob("*.md") if f.lower() != "readme.md"]
619
+ mp3_files = glob.glob("*.mp3")
620
+ wav_files = glob.glob("*.wav")
621
+
622
+ if md_files or mp3_files or wav_files:
623
  zip_name = "Download.zip"
624
  if 'last_query' in st.session_state and st.session_state['last_query']:
625
+ # Use last_query in the zip name
626
+ clean_q = clean_text_for_filename(st.session_state['last_query'])[:30]
627
+ if clean_q:
628
+ zip_name = f"{clean_q}.zip"
629
+
630
  if st.button("πŸ“¦ Download All"):
631
  with zipfile.ZipFile(zip_name, 'w') as z:
632
+ for f in md_files + mp3_files + wav_files:
633
  z.write(f)
634
  st.sidebar.markdown(get_download_link(zip_name), unsafe_allow_html=True)
635
 
 
643
  st.sidebar.write("No files found.")
644
  return
645
 
646
+ # Sort by modification time descending
647
+ all_files_sorted = sorted(all_files, key=os.path.getmtime, reverse=True)
648
+
649
+ for f in all_files_sorted:
650
+ ext = os.path.splitext(f)[1].lower().replace('.', '')
651
+ emoji = FILE_EMOJIS.get(ext, "πŸ“")
652
+ mod_time = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%m-%d %H:%M")
653
+ # Download link
654
+ dl_link = get_download_link(f, file_type=ext)
655
+ with st.sidebar.expander(f"{emoji} {os.path.basename(f)} [{mod_time}]"):
656
+ if ext in ["mp3", "wav"]:
657
+ st.audio(f)
658
+ if ext == "md":
659
+ with open(f, 'r', encoding='utf-8') as file:
660
+ content = file.read()
661
+ st.markdown(f"```\n{content[:500]}\n...```")
662
+ st.markdown(dl_link, unsafe_allow_html=True)
663
 
664
  # ─────────────────────────────────────────────────────────
665
  # 7. MAIN APP
 
670
  update_marquee_settings_ui()
671
  marquee_settings = get_marquee_settings()
672
 
673
+ # 2) Place the radio for main tabs AND the row of checkboxes
674
+ colA, colB, colC, colD, colE = st.columns([2,1,1,1,1])
675
+ with colA:
676
+ tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"], horizontal=True)
677
+ with colB:
678
+ st.session_state['run_claude'] = st.checkbox("Claude", value=st.session_state['run_claude'])
679
+ with colC:
680
+ st.session_state['run_arxiv'] = st.checkbox("Arxiv", value=st.session_state['run_arxiv'])
681
+ with colD:
682
+ st.session_state['autorun_searches'] = st.checkbox("Autorun", value=st.session_state['autorun_searches'])
683
+ with colE:
684
+ st.session_state['autosave_output'] = st.checkbox("AutoSave", value=st.session_state['autosave_output'])
685
+
686
+ # 3) Display the marquee welcome
687
  display_marquee(st.session_state['marquee_content'],
688
  {**marquee_settings, "font-size": "28px", "lineHeight": "50px"},
689
  key_suffix="welcome")
690
 
691
+ # Example custom component usage:
 
 
 
 
692
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
693
  val = mycomponent(my_input_value="Hello")
694
 
 
696
  val_stripped = val.replace('\\n', ' ')
697
  edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
698
  run_option = st.selectbox("Model:", ["Arxiv"])
699
+ full_audio = st.checkbox("πŸ“šFullAudio", value=False)
 
 
 
 
 
700
  input_changed = (val != st.session_state.old_val)
701
 
702
+ # We'll define: if autorun is on, run immediately after input changes
703
+ if st.session_state['autorun_searches'] and input_changed:
704
  st.session_state.old_val = val
705
  st.session_state.last_query = edited_input
706
+ process_voice_input(edited_input)
 
 
 
 
707
  else:
708
  if st.button("β–Ά Run"):
709
  st.session_state.old_val = val
710
  st.session_state.last_query = edited_input
711
+ process_voice_input(edited_input)
712
+
 
 
 
 
713
  # ─────────────────────────────────────────────────────────
714
  # TAB: ArXiv
715
+ # (kept for demonstration if user chooses to do Arxiv only)
716
  # ─────────────────────────────────────────────────────────
717
  if tab_main == "πŸ” ArXiv":
718
  st.subheader("πŸ” Query ArXiv")
719
  q = st.text_input("πŸ” Query:", key="arxiv_query")
720
 
 
 
 
 
 
 
 
721
  if q and st.button("πŸ”Run"):
722
  st.session_state.last_query = q
723
+ # Even if the tab is "ArXiv," we can just call our standard function
724
+ # that uses whichever checkboxes are selected (or you can do Arxiv only).
725
+ process_voice_input(q)
 
726
 
727
  # ─────────────────────────────────────────────────────────
728
  # TAB: Voice
 
735
  "Select TTS Voice:",
736
  options=EDGE_TTS_VOICES,
737
  index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
738
+ if st.session_state['tts_voice'] in EDGE_TTS_VOICES
739
+ else 0
740
  )
741
 
742
  st.markdown("### πŸ”Š Audio Format")
743
  selected_format = st.radio(
744
  "Choose Audio Format:",
745
  options=["MP3", "WAV"],
746
+ index=0 if st.session_state['audio_format'] == "mp3" else 1
747
  )
748
 
749
  # Update session state if voice/format changes
 
754
  st.session_state['audio_format'] = selected_format.lower()
755
  st.rerun()
756
 
757
+ user_text = st.text_area("πŸ’¬ Message:", height=100).strip().replace('\n', ' ')
 
 
 
758
  if st.button("πŸ“¨ Send"):
759
+ st.session_state.last_query = user_text
760
+ # If autorun is off, we explicitly run
761
  process_voice_input(user_text)
762
 
763
  st.subheader("πŸ“œ Chat History")
 
770
  # ─────────────────────────────────────────────────────────
771
  elif tab_main == "πŸ“Έ Media":
772
  st.header("πŸ“Έ Media Gallery")
 
 
773
  tabs = st.tabs(["🎡 Audio", "πŸ–Ό Images", "πŸŽ₯ Video"])
774
 
775
  # AUDIO sub-tab