awacke1 commited on
Commit
322d94b
·
verified ·
1 Parent(s): 8c120d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -240
app.py CHANGED
@@ -356,259 +356,149 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
356
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
357
 
358
  return response
359
-
360
  def render_search_interface():
361
  """Main search interface with voice recognition and model selection"""
362
  st.header("🔍 Voice Search & Research")
363
-
364
- # Voice and model settings
365
 
366
- col1, col2, col3 = st.columns([2, 1, 1])
367
- with col1:
368
- selected_voice = st.selectbox(
369
- "Select Voice",
370
- ENGLISH_VOICES,
371
- index=0,
372
- help="Choose the voice for audio responses"
373
- )
374
- with col2:
375
- run_option = st.selectbox(
376
- "Model:",
377
- ["Arxiv", "GPT-4o", "Claude-3.5"],
378
- key="run_option"
379
- )
380
- with col3:
381
- autorun = st.checkbox("⚙ AutoRun", value=True, key="autorun")
382
-
383
- # Voice component
384
- voice_result = create_voice_component()
385
-
386
- # Handle voice input with autorun
387
- if voice_result and isinstance(voice_result, (str, dict)):
388
- # Extract text and trigger info
389
- if isinstance(voice_result, dict):
390
- current_text = voice_result.get('text', '')
391
- trigger = voice_result.get('trigger')
392
- else:
393
- current_text = voice_result
394
- trigger = None
395
-
396
- # Show text in edit box
397
- edited_input = st.text_area(
398
- "✏️ Edit Input:",
399
- value=current_text,
400
- height=100,
401
- key="edited_input"
402
- )
403
-
404
- # Check if input has changed
405
- input_changed = (edited_input != st.session_state.get('last_processed_text', ''))
406
-
407
- # Process based on autorun and model selection
408
- if autorun and input_changed and edited_input:
409
- st.session_state.last_processed_text = edited_input
410
-
411
- try:
412
- with st.spinner("Processing..."):
413
- if run_option == "Arxiv":
414
- result = perform_ai_lookup(
415
- edited_input,
416
- vocal_summary=True,
417
- extended_refs=False,
418
- titles_summary=True,
419
- full_audio=True,
420
- voice=selected_voice
421
- )
422
- elif run_option == "GPT-4o":
423
- result = process_with_gpt(edited_input)
424
- # Generate audio for GPT response
425
- audio_file = asyncio.run(generate_audio(result, voice=selected_voice))
426
- if audio_file:
427
- render_audio_result(audio_file, "GPT-4 Response")
428
- elif run_option == "Claude-3.5":
429
- result = process_with_claude(edited_input)
430
- # Generate audio for Claude response
431
- audio_file = asyncio.run(generate_audio(result, voice=selected_voice))
432
- if audio_file:
433
- render_audio_result(audio_file, "Claude Response")
434
-
435
- # Save to history
436
- st.session_state.transcript_history.append({
437
- 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
438
- 'query': edited_input,
439
- 'response': result,
440
- 'model': run_option
441
- })
442
-
443
- except Exception as e:
444
- st.error(f"Error processing request: {str(e)}")
445
 
446
- # Manual run button
447
- elif st.button("▶ Run"):
448
- try:
449
- with st.spinner("Processing..."):
450
- if run_option == "Arxiv":
451
- result = perform_ai_lookup(
452
- edited_input,
453
- vocal_summary=True,
454
- extended_refs=False,
455
- titles_summary=True,
456
- full_audio=True,
457
- voice=selected_voice
458
- )
459
- elif run_option == "GPT-4o":
460
- result = process_with_gpt(edited_input)
461
- audio_file = asyncio.run(generate_audio(result, voice=selected_voice))
462
- if audio_file:
463
- render_audio_result(audio_file, "GPT-4 Response")
 
 
 
 
 
 
 
 
 
 
464
  elif run_option == "Claude-3.5":
465
- result = process_with_claude(edited_input)
466
- audio_file = asyncio.run(generate_audio(result, voice=selected_voice))
467
- if audio_file:
468
- render_audio_result(audio_file, "Claude Response")
469
-
470
- # Save to history
471
- st.session_state.transcript_history.append({
472
- 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
473
- 'query': edited_input,
474
- 'response': result,
475
- 'model': run_option
476
- })
477
-
478
- except Exception as e:
479
- st.error(f"Error processing request: {str(e)}")
480
 
 
481
  def main():
482
- st.title("🔬 ARIA Research Assistant")
483
-
484
- # Initialize settings
485
- with st.sidebar:
486
- st.title("⚙️ Settings")
487
-
488
- # Audio Settings
489
- st.subheader("Audio Settings")
490
- st.session_state.autoplay_audio = st.checkbox(
491
- "Autoplay Audio",
492
- value=True,
493
- help="Automatically play audio when generated"
494
- )
495
-
496
- rate = st.slider("Speech Rate", -50, 50, 0, 5)
497
- pitch = st.slider("Pitch", -50, 50, 0, 5)
498
-
499
- # Advanced Settings
500
- st.subheader("Advanced")
501
- save_history = st.checkbox(
502
- "Save History",
503
- value=True,
504
- help="Save transcripts and responses"
505
- )
506
- cleanup_old = st.checkbox(
507
- "Auto Cleanup",
508
- value=False,
509
- help="Remove old files automatically"
510
- )
511
-
512
- # Main content tabs
513
- tabs = st.tabs(["🎤 Voice Search", "📚 History", "🎵 Media", "⚙️ Advanced"])
514
-
515
- with tabs[0]:
516
  render_search_interface()
517
-
518
- with tabs[1]:
519
- st.header("Search History")
520
- if st.session_state.transcript_history:
521
- for entry in reversed(st.session_state.transcript_history):
522
- with st.expander(
523
- f"🔍 {entry['timestamp']} - {entry['query'][:50]}...",
524
- expanded=False
525
- ):
526
- st.markdown(f"**Model:** {entry['model']}")
527
- st.markdown(entry['response'])
528
-
529
- with tabs[2]:
530
- st.header("Media Files")
531
- media_tabs = st.tabs(["🎵 Audio", "🎥 Video", "📷 Images"])
532
-
533
- with media_tabs[0]:
534
- audio_files = glob.glob("*.mp3")
535
- if audio_files:
536
- for audio_file in sorted(audio_files, key=os.path.getmtime, reverse=True):
537
- st.markdown(get_audio_autoplay_html(audio_file), unsafe_allow_html=True)
538
- else:
539
- st.write("No audio files found")
 
 
 
 
 
 
 
 
540
 
541
- with media_tabs[1]:
542
- video_files = glob.glob("*.mp4")
543
- if video_files:
544
- cols = st.columns(2)
545
- for idx, video_file in enumerate(video_files):
546
- with cols[idx % 2]:
547
- st.video(video_file)
 
 
 
 
548
  else:
549
- st.write("No video files found")
550
-
551
- with media_tabs[2]:
552
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
553
- if image_files:
554
- cols = st.columns(3)
555
- for idx, image_file in enumerate(image_files):
556
- with cols[idx % 3]:
557
- st.image(Image.open(image_file), use_column_width=True)
 
 
558
  else:
559
- st.write("No images found")
560
-
561
- with tabs[3]:
562
- st.header("Advanced Settings")
563
-
564
- col1, col2 = st.columns(2)
565
- with col1:
566
- st.subheader("Model Settings")
567
- st.selectbox(
568
- "Default Model",
569
- ["Arxiv", "GPT-4o", "Claude-3.5"],
570
- key="default_model"
571
- )
572
- st.number_input(
573
- "Max Response Length",
574
- min_value=100,
575
- max_value=2000,
576
- value=1000,
577
- key="max_tokens"
578
- )
579
-
580
- with col2:
581
- st.subheader("Voice Settings")
582
- st.slider(
583
- "Pause Detection (ms)",
584
- min_value=500,
585
- max_value=3000,
586
- value=1500,
587
- step=100,
588
- key="pause_threshold"
589
- )
590
- st.checkbox(
591
- "High Quality Voice",
592
- value=True,
593
- key="high_quality_audio"
594
- )
595
 
596
- # Cleanup utility
597
- def cleanup_old_files(days=7):
598
- """Remove files older than specified days"""
599
- current_time = time.time()
600
- for pattern in ["*.md", "*.mp3"]:
601
- for f in glob.glob(pattern):
602
- creation_time = os.path.getctime(f)
603
- if (current_time - creation_time) // (24 * 3600) >= days:
604
- try:
605
- os.remove(f)
606
- except:
607
- pass
608
 
609
- if __name__ == "__main__":
610
- if st.session_state.get('cleanup_enabled', False):
611
- cleanup_old_files()
612
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
 
 
 
 
614
 
 
356
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
357
 
358
  return response
 
359
  def render_search_interface():
360
  """Main search interface with voice recognition and model selection"""
361
  st.header("🔍 Voice Search & Research")
 
 
362
 
363
+ # Get voice component value and set up model selection
364
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
365
+ val = mycomponent(my_input_value="Hello")
366
+
367
+ # Show input in edit box if detected
368
+ if val:
369
+ val_stripped = val.replace('\n', ' ')
370
+ edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
371
+ run_option = st.selectbox("Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
+ col1, col2 = st.columns(2)
374
+ with col1:
375
+ autorun = st.checkbox("⚙ AutoRun", value=True)
376
+ with col2:
377
+ full_audio = st.checkbox("📚FullAudio", value=False,
378
+ help="Generate full audio response")
379
+
380
+ input_changed = (val != st.session_state.get('old_val', None))
381
+
382
+ if autorun and input_changed:
383
+ st.session_state.old_val = val
384
+ if run_option == "Arxiv":
385
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
386
+ titles_summary=True, full_audio=full_audio)
387
+ else:
388
+ if run_option == "GPT-4o":
389
+ process_with_gpt(edited_input)
390
+ elif run_option == "Claude-3.5":
391
+ process_with_claude(edited_input)
392
+ else:
393
+ if st.button("▶ Run"):
394
+ st.session_state.old_val = val
395
+ if run_option == "Arxiv":
396
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
397
+ titles_summary=True, full_audio=full_audio)
398
+ else:
399
+ if run_option == "GPT-4o":
400
+ process_with_gpt(edited_input)
401
  elif run_option == "Claude-3.5":
402
+ process_with_claude(edited_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
404
+
405
  def main():
406
+ st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research")
407
+ tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"], horizontal=True)
408
+
409
+ if tab_main == "🎤 Voice":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
  render_search_interface()
411
+
412
+ elif tab_main == "🔍 ArXiv":
413
+ st.subheader("🔍 Query ArXiv")
414
+ q = st.text_input("🔍 Query:")
415
+
416
+ st.markdown("### 🎛 Options")
417
+ vocal_summary = st.checkbox("🎙ShortAudio", value=True)
418
+ extended_refs = st.checkbox("📜LongRefs", value=False)
419
+ titles_summary = st.checkbox("🔖TitlesOnly", value=True)
420
+ full_audio = st.checkbox("📚FullAudio", value=False,
421
+ help="Full audio of results")
422
+ full_transcript = st.checkbox("🧾FullTranscript", value=False,
423
+ help="Generate a full transcript file")
424
+
425
+ if q and st.button("🔍Run"):
426
+ result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
427
+ titles_summary=titles_summary, full_audio=full_audio)
428
+ if full_transcript:
429
+ save_full_transcript(q, result)
430
+
431
+ st.markdown("### Change Prompt & Re-Run")
432
+ q_new = st.text_input("🔄 Modify Query:")
433
+ if q_new and st.button("🔄 Re-Run with Modified Query"):
434
+ result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
435
+ titles_summary=titles_summary, full_audio=full_audio)
436
+ if full_transcript:
437
+ save_full_transcript(q_new, result)
438
+
439
+ elif tab_main == "📸 Media":
440
+ st.header("📸 Images & 🎥 Videos")
441
+ tabs = st.tabs(["🖼 Images", "🎥 Video"])
442
 
443
+ with tabs[0]:
444
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
445
+ if imgs:
446
+ c = st.slider("Cols",1,5,3)
447
+ cols = st.columns(c)
448
+ for i,f in enumerate(imgs):
449
+ with cols[i%c]:
450
+ st.image(Image.open(f),use_container_width=True)
451
+ if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
452
+ a = process_image(f,"Describe this image.")
453
+ st.markdown(a)
454
  else:
455
+ st.write("No images found.")
456
+
457
+ with tabs[1]:
458
+ vids = glob.glob("*.mp4")
459
+ if vids:
460
+ for v in vids:
461
+ with st.expander(f"🎥 {os.path.basename(v)}"):
462
+ st.video(v)
463
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
464
+ a = process_video_with_gpt(v,"Describe video.")
465
+ st.markdown(a)
466
  else:
467
+ st.write("No videos found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
 
469
+ elif tab_main == "📝 Editor":
470
+ if getattr(st.session_state,'current_file',None):
471
+ st.subheader(f"Editing: {st.session_state.current_file}")
472
+ new_text = st.text_area("✏️ Content:", st.session_state.file_content, height=300)
473
+ if st.button("💾 Save"):
474
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
475
+ f.write(new_text)
476
+ st.success("Updated!")
477
+ st.session_state.should_rerun = True
478
+ else:
479
+ st.write("Select a file from the sidebar to edit.")
 
480
 
481
+ groups, sorted_prefixes = load_files_for_sidebar()
482
+ display_file_manager_sidebar(groups, sorted_prefixes)
483
+
484
+ if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
485
+ st.write("---")
486
+ st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
487
+ for f in groups[st.session_state.viewing_prefix]:
488
+ fname = os.path.basename(f)
489
+ ext = os.path.splitext(fname)[1].lower().strip('.')
490
+ st.write(f"### {fname}")
491
+ if ext == "md":
492
+ content = open(f,'r',encoding='utf-8').read()
493
+ st.markdown(content)
494
+ elif ext == "mp3":
495
+ st.audio(f)
496
+ else:
497
+ st.markdown(get_download_link(f), unsafe_allow_html=True)
498
+ if st.button("❌ Close"):
499
+ st.session_state.viewing_prefix = None
500
 
501
+ if st.session_state.should_rerun:
502
+ st.session_state.should_rerun = False
503
+ st.rerun()
504