awacke1 commited on
Commit
8226326
·
verified ·
1 Parent(s): df12fe0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -56
app.py CHANGED
@@ -50,11 +50,12 @@ EDGE_TTS_VOICES = [
50
 
51
  # Initialize session state variables
52
  if 'marquee_settings' not in st.session_state:
 
53
  st.session_state['marquee_settings'] = {
54
  "background": "#1E1E1E",
55
  "color": "#FFFFFF",
56
  "font-size": "14px",
57
- "animationDuration": "10s",
58
  "width": "100%",
59
  "lineHeight": "35px"
60
  }
@@ -129,7 +130,7 @@ def initialize_marquee_settings():
129
  "background": "#1E1E1E",
130
  "color": "#FFFFFF",
131
  "font-size": "14px",
132
- "animationDuration": "10s",
133
  "width": "100%",
134
  "lineHeight": "35px"
135
  }
@@ -153,7 +154,8 @@ def update_marquee_settings_ui():
153
  key="text_color_picker")
154
  with cols[1]:
155
  font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider")
156
- duration = st.slider("⏱️ Speed", 1, 20, 10, key="duration_slider")
 
157
 
158
  st.session_state['marquee_settings'].update({
159
  "background": bg_color,
@@ -367,17 +369,12 @@ def parse_arxiv_refs(ref_text: str):
367
  return results[:20]
368
 
369
 
370
-
371
  # ---------------------------- Edit 1/11/2025 - add a constitution to my arxiv system templating to build configurable character and personality of IO.
372
 
373
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
374
  titles_summary=True, full_audio=False):
375
  start = time.time()
376
 
377
- #SCIENCE_PROBLEM = "Solving visual acuity of UI screens using gradio and streamlit apps that run reactive style components using html components and apis across gradio and streamlit partner apps - a cloud of contiguous org supporting ai agents"
378
- #SONG_STYLE = "techno, trance, industrial"
379
-
380
-
381
  ai_constitution = """
382
  You are a talented AI coder and songwriter with a unique ability to explain scientific concepts through music with code easter eggs.. Your task is to create a song that not only entertains but also educates listeners about a specific science problem and its potential solutions.
383
 
@@ -424,29 +421,18 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
424
  - Ensure catchy and memorable
425
  - Verify maintains the requested style throughout
426
  """
427
-
428
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
429
  refs = client.predict(q, 20, "Semantic Search",
430
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
431
  api_name="/update_with_rag_md")[0]
432
 
433
- #st.code(refs)
434
-
435
  r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1",
436
  True, api_name="/ask_llm")
437
 
438
- # mistralai/Mistral-Nemo-Instruct-2407
439
- # mistralai/Mistral-7B-Instruct-v0.3
440
-
441
- #st.code(r2)
442
-
443
-
444
-
445
-
446
  result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
447
- #st.markdown(result)
448
- #st.code(ai_constitution)
449
-
450
  md_file, audio_file = save_qa_with_audio(q, result)
451
 
452
  st.subheader("📝 Main Response Audio")
@@ -462,11 +448,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
462
  elapsed = time.time()-start
463
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
464
 
465
-
466
-
467
-
468
-
469
-
470
  return result
471
 
472
  def process_voice_input(text):
@@ -632,28 +613,8 @@ def main():
632
  with open(f, 'r', encoding='utf-8') as file:
633
  st.session_state['marquee_content'] = file.read()[:280]
634
 
635
- # Voice Settings
636
- st.sidebar.markdown("### 🎤 Voice Settings")
637
- selected_voice = st.sidebar.selectbox(
638
- "Select TTS Voice:",
639
- options=EDGE_TTS_VOICES,
640
- index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
641
- )
642
-
643
- # Audio Format Settings
644
- st.sidebar.markdown("### 🔊 Audio Format")
645
- selected_format = st.sidebar.radio(
646
- "Choose Audio Format:",
647
- options=["MP3", "WAV"],
648
- index=0
649
- )
650
-
651
- if selected_voice != st.session_state['tts_voice']:
652
- st.session_state['tts_voice'] = selected_voice
653
- st.rerun()
654
- if selected_format.lower() != st.session_state['audio_format']:
655
- st.session_state['audio_format'] = selected_format.lower()
656
- st.rerun()
657
 
658
  # Main Interface
659
  tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"],
@@ -679,15 +640,15 @@ def main():
679
  st.session_state.old_val = val
680
  st.session_state.last_query = edited_input
681
  result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
682
- titles_summary=True, full_audio=full_audio)
683
  else:
684
  if st.button("▶ Run"):
685
  st.session_state.old_val = val
686
  st.session_state.last_query = edited_input
687
  result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
688
- titles_summary=True, full_audio=full_audio)
689
 
690
-
691
  if tab_main == "🔍 ArXiv":
692
  st.subheader("🔍 Query ArXiv")
693
  q = st.text_input("🔍 Query:", key="arxiv_query")
@@ -699,27 +660,53 @@ def main():
699
  full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio")
700
  full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
701
 
702
-
703
  if q and st.button("🔍Run"):
704
  st.session_state.last_query = q
705
  result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
706
- titles_summary=titles_summary, full_audio=full_audio)
707
  if full_transcript:
708
  create_file(q, result, "md")
709
 
 
710
  elif tab_main == "🎤 Voice":
711
  st.subheader("🎤 Voice Input")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  user_text = st.text_area("💬 Message:", height=100)
713
  user_text = user_text.strip().replace('\n', ' ')
714
 
715
  if st.button("📨 Send"):
716
  process_voice_input(user_text)
717
-
718
  st.subheader("📜 Chat History")
719
  for c in st.session_state.chat_history:
720
  st.write("**You:**", c["user"])
721
  st.write("**Response:**", c["claude"])
722
 
 
723
  elif tab_main == "📸 Media":
724
  st.header("📸 Images & 🎥 Videos")
725
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
@@ -769,6 +756,7 @@ def main():
769
  else:
770
  st.write("No videos found.")
771
 
 
772
  elif tab_main == "📝 Editor":
773
  if st.session_state.editing_file:
774
  st.subheader(f"Editing: {st.session_state.editing_file}")
@@ -820,4 +808,4 @@ def main():
820
  st.rerun()
821
 
822
  if __name__ == "__main__":
823
- main()
 
50
 
51
  # Initialize session state variables
52
  if 'marquee_settings' not in st.session_state:
53
+ # Default to 20s animationDuration instead of 10s:
54
  st.session_state['marquee_settings'] = {
55
  "background": "#1E1E1E",
56
  "color": "#FFFFFF",
57
  "font-size": "14px",
58
+ "animationDuration": "20s", # <- changed to 20s
59
  "width": "100%",
60
  "lineHeight": "35px"
61
  }
 
130
  "background": "#1E1E1E",
131
  "color": "#FFFFFF",
132
  "font-size": "14px",
133
+ "animationDuration": "20s", # ensure 20s stays
134
  "width": "100%",
135
  "lineHeight": "35px"
136
  }
 
154
  key="text_color_picker")
155
  with cols[1]:
156
  font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider")
157
+ # The default is now 20, not 10
158
+ duration = st.slider("⏱️ Speed", 1, 20, 20, key="duration_slider")
159
 
160
  st.session_state['marquee_settings'].update({
161
  "background": bg_color,
 
369
  return results[:20]
370
 
371
 
 
372
  # ---------------------------- Edit 1/11/2025 - add a constitution to my arxiv system templating to build configurable character and personality of IO.
373
 
374
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
375
  titles_summary=True, full_audio=False):
376
  start = time.time()
377
 
 
 
 
 
378
  ai_constitution = """
379
  You are a talented AI coder and songwriter with a unique ability to explain scientific concepts through music with code easter eggs.. Your task is to create a song that not only entertains but also educates listeners about a specific science problem and its potential solutions.
380
 
 
421
  - Ensure catchy and memorable
422
  - Verify maintains the requested style throughout
423
  """
424
+
425
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
426
  refs = client.predict(q, 20, "Semantic Search",
427
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
428
  api_name="/update_with_rag_md")[0]
429
 
 
 
430
  r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1",
431
  True, api_name="/ask_llm")
432
 
 
 
 
 
 
 
 
 
433
  result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
434
+
435
+ # Save and produce audio
 
436
  md_file, audio_file = save_qa_with_audio(q, result)
437
 
438
  st.subheader("📝 Main Response Audio")
 
448
  elapsed = time.time()-start
449
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
450
 
 
 
 
 
 
451
  return result
452
 
453
  def process_voice_input(text):
 
613
  with open(f, 'r', encoding='utf-8') as file:
614
  st.session_state['marquee_content'] = file.read()[:280]
615
 
616
+ # Instead of putting voice settings in the sidebar,
617
+ # we will handle them in the "🎤 Voice" tab below.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
618
 
619
  # Main Interface
620
  tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"],
 
640
  st.session_state.old_val = val
641
  st.session_state.last_query = edited_input
642
  result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
643
+ titles_summary=True, full_audio=full_audio)
644
  else:
645
  if st.button("▶ Run"):
646
  st.session_state.old_val = val
647
  st.session_state.last_query = edited_input
648
  result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
649
+ titles_summary=True, full_audio=full_audio)
650
 
651
+ # --- Tab: ArXiv
652
  if tab_main == "🔍 ArXiv":
653
  st.subheader("🔍 Query ArXiv")
654
  q = st.text_input("🔍 Query:", key="arxiv_query")
 
660
  full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio")
661
  full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
662
 
 
663
  if q and st.button("🔍Run"):
664
  st.session_state.last_query = q
665
  result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
666
+ titles_summary=titles_summary, full_audio=full_audio)
667
  if full_transcript:
668
  create_file(q, result, "md")
669
 
670
+ # --- Tab: Voice
671
  elif tab_main == "🎤 Voice":
672
  st.subheader("🎤 Voice Input")
673
+
674
+ # Move voice selection here:
675
+ st.markdown("### 🎤 Voice Settings")
676
+ selected_voice = st.selectbox(
677
+ "Select TTS Voice:",
678
+ options=EDGE_TTS_VOICES,
679
+ index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
680
+ )
681
+
682
+ # Audio Format Settings below the voice selection
683
+ st.markdown("### 🔊 Audio Format")
684
+ selected_format = st.radio(
685
+ "Choose Audio Format:",
686
+ options=["MP3", "WAV"],
687
+ index=0
688
+ )
689
+
690
+ if selected_voice != st.session_state['tts_voice']:
691
+ st.session_state['tts_voice'] = selected_voice
692
+ st.rerun()
693
+ if selected_format.lower() != st.session_state['audio_format']:
694
+ st.session_state['audio_format'] = selected_format.lower()
695
+ st.rerun()
696
+
697
+ # Now the text area to enter your message
698
  user_text = st.text_area("💬 Message:", height=100)
699
  user_text = user_text.strip().replace('\n', ' ')
700
 
701
  if st.button("📨 Send"):
702
  process_voice_input(user_text)
703
+
704
  st.subheader("📜 Chat History")
705
  for c in st.session_state.chat_history:
706
  st.write("**You:**", c["user"])
707
  st.write("**Response:**", c["claude"])
708
 
709
+ # --- Tab: Media
710
  elif tab_main == "📸 Media":
711
  st.header("📸 Images & 🎥 Videos")
712
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
 
756
  else:
757
  st.write("No videos found.")
758
 
759
+ # --- Tab: Editor
760
  elif tab_main == "📝 Editor":
761
  if st.session_state.editing_file:
762
  st.subheader(f"Editing: {st.session_state.editing_file}")
 
808
  st.rerun()
809
 
810
  if __name__ == "__main__":
811
+ main()