awacke1 commited on
Commit
263adfe
·
verified ·
1 Parent(s): a820539

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +368 -293
app.py CHANGED
@@ -1,18 +1,24 @@
1
  import streamlit as st
2
- import anthropic
3
- import openai
4
- import base64
5
- import cv2
6
- import glob
7
- import os
8
- import re
9
- import asyncio
10
- import edge_tts
11
  from datetime import datetime
12
- from collections import defaultdict
 
 
13
  from dotenv import load_dotenv
14
  from gradio_client import Client
 
 
15
  from PIL import Image
 
 
 
 
 
 
 
 
16
 
17
  # 🎯 1. Core Configuration & Setup
18
  st.set_page_config(
@@ -30,7 +36,8 @@ load_dotenv()
30
 
31
  # 🔑 2. API Setup & Clients
32
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
33
- anthropic_key = os.getenv('ANTHROPIC_API_KEY', "")
 
34
  if 'OPENAI_API_KEY' in st.secrets:
35
  openai_api_key = st.secrets['OPENAI_API_KEY']
36
  if 'ANTHROPIC_API_KEY' in st.secrets:
@@ -38,29 +45,39 @@ if 'ANTHROPIC_API_KEY' in st.secrets:
38
 
39
  openai.api_key = openai_api_key
40
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
 
 
 
41
 
42
  # 📝 3. Session State Management
43
- if 'parsed_papers' not in st.session_state:
44
- st.session_state['parsed_papers'] = []
45
- if 'audio_generated' not in st.session_state:
46
- st.session_state['audio_generated'] = {}
47
- if 'voices' not in st.session_state:
48
- st.session_state['voices'] = []
 
 
 
 
 
 
 
 
 
 
49
  if 'viewing_prefix' not in st.session_state:
50
  st.session_state['viewing_prefix'] = None
51
  if 'should_rerun' not in st.session_state:
52
  st.session_state['should_rerun'] = False
 
 
53
 
54
  # 🎨 4. Custom CSS
55
  st.markdown("""
56
  <style>
57
- .main {
58
- background: linear-gradient(to right, #1a1a1a, #2d2d2d);
59
- color: #fff;
60
- }
61
- .stMarkdown {
62
- font-family: 'Helvetica Neue', sans-serif;
63
- }
64
  .stButton>button {
65
  margin-right: 0.5rem;
66
  }
@@ -134,34 +151,46 @@ def clean_text_for_filename(text: str) -> str:
134
  return '_'.join(filtered)[:200]
135
 
136
  # 📁 6. File Operations
137
- def generate_filename(prefix, title, file_type="md"):
138
  """
139
- Generate filename with meaningful terms and prefix.
140
- The filename includes a timestamp and a cleaned title.
141
  """
142
- timestamp = datetime.now().strftime("%y%m_%H%M")
143
- title_cleaned = clean_text_for_filename(title)
144
- filename = f"{timestamp}_{prefix}_{title_cleaned}.{file_type}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  return filename
146
 
147
- def create_md_file(paper):
148
- """Create Markdown file for a paper."""
149
- filename = generate_filename("paper", paper['title'], "md")
150
- content = f"# {paper['title']}\n\n**Year:** {paper['year'] if paper['year'] else 'Unknown'}\n\n**Summary:**\n{paper['summary']}"
151
  with open(filename, 'w', encoding='utf-8') as f:
152
- f.write(content)
153
  return filename
154
 
155
  def get_download_link(file):
156
- """Generate download link for file."""
157
- with open(file, "rb") as f_file:
158
- b64 = base64.b64encode(f_file.read()).decode()
159
- mime_type = "audio/mpeg" if file.endswith(".mp3") else "text/markdown"
160
- return f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>'
161
 
162
  # 🔊 7. Audio Processing
163
  def clean_for_speech(text: str) -> str:
164
- """Clean text for speech synthesis."""
165
  text = text.replace("\n", " ")
166
  text = text.replace("</s>", " ")
167
  text = text.replace("#", "")
@@ -169,66 +198,79 @@ def clean_for_speech(text: str) -> str:
169
  text = re.sub(r"\s+", " ", text).strip()
170
  return text
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
173
- """Generate audio using Edge TTS."""
174
  text = clean_for_speech(text)
175
  if not text.strip():
176
  return None
177
  rate_str = f"{rate:+d}%"
178
  pitch_str = f"{pitch:+d}Hz"
179
  communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
180
- out_fn = generate_filename("audio", text[:50], "mp3")
181
  await communicate.save(out_fn)
182
  return out_fn
183
 
184
- def speak_with_edge_tts(text, voice, rate=0, pitch=0):
185
- """Wrapper for Edge TTS generation."""
186
- try:
187
- return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
188
- except Exception as e:
189
- st.error(f"Error generating audio: {e}")
190
- return None
191
 
192
  def play_and_download_audio(file_path):
193
- """Play and provide download link for audio."""
194
  if file_path and os.path.exists(file_path):
195
  st.audio(file_path)
196
- dl_link = get_download_link(file_path)
197
  st.markdown(dl_link, unsafe_allow_html=True)
198
 
199
  # 🎬 8. Media Processing
200
  def process_image(image_path, user_prompt):
201
- """Process image with GPT-4V."""
202
  with open(image_path, "rb") as imgf:
203
  image_data = imgf.read()
204
  b64img = base64.b64encode(image_data).decode("utf-8")
205
- resp = openai.ChatCompletion.create(
206
  model=st.session_state["openai_model"],
207
  messages=[
208
  {"role": "system", "content": "You are a helpful assistant."},
209
- {"role": "user", "content": f"{user_prompt} Image data: data:image/png;base64,{b64img}"}
 
 
 
210
  ],
211
  temperature=0.0,
212
  )
213
  return resp.choices[0].message.content
214
 
215
- def process_audio_file(audio_path):
216
- """Process audio with Whisper."""
217
  with open(audio_path, "rb") as f:
218
- transcription = openai.Audio.transcribe("whisper-1", f)
219
- return transcription['text']
 
220
 
221
  def process_video(video_path, seconds_per_frame=1):
222
- """Extract frames from video."""
223
  vid = cv2.VideoCapture(video_path)
224
  total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
225
  fps = vid.get(cv2.CAP_PROP_FPS)
226
- skip = int(fps * seconds_per_frame)
227
  frames_b64 = []
228
  for i in range(0, total, skip):
229
  vid.set(cv2.CAP_PROP_POS_FRAMES, i)
230
  ret, frame = vid.read()
231
- if not ret:
232
  break
233
  _, buf = cv2.imencode(".jpg", frame)
234
  frames_b64.append(base64.b64encode(buf).decode("utf-8"))
@@ -236,127 +278,206 @@ def process_video(video_path, seconds_per_frame=1):
236
  return frames_b64
237
 
238
  def process_video_with_gpt(video_path, prompt):
239
- """Analyze video frames with GPT-4V."""
240
  frames = process_video(video_path)
241
- combined_images = " ".join([f"data:image/jpeg;base64,{fr}" for fr in frames])
242
- resp = openai.ChatCompletion.create(
243
  model=st.session_state["openai_model"],
244
  messages=[
245
- {"role":"system","content":"Analyze the following video frames."},
246
- {"role":"user","content": f"{prompt} Frames: {combined_images}"}
 
 
 
247
  ]
248
  )
249
  return resp.choices[0].message.content
250
 
251
  # 🤖 9. AI Model Integration
252
- def parse_papers(transcript_text: str):
 
 
 
 
 
 
 
 
253
  """
254
- Parse the transcript text into individual papers.
255
- Assumes that each paper starts with a number and is enclosed in brackets for the title and year.
256
- Example:
257
- 1) [Paper Title (2023)] This is the summary...
 
 
258
  """
259
- papers = []
260
- # Split based on numbered entries
261
- paper_blocks = re.split(r'\d+\)\s*\[', transcript_text)
262
- for block in paper_blocks[1:]: # Skip the first split as it doesn't contain paper info
263
- try:
264
- title_year, summary = block.split(']', 1)
265
- # Extract title and year using regex
266
- title_match = re.match(r"(.+?)\s*\((\d{4})\)", title_year)
267
- if title_match:
268
- title = title_match.group(1).strip()
269
- year = int(title_match.group(2))
270
- else:
271
- title = title_year.strip()
272
- year = None
273
- summary = summary.strip()
274
- papers.append({
275
- 'title': title,
276
- 'year': year,
277
- 'summary': summary
278
- })
279
- except ValueError:
280
- continue # Skip blocks that don't match the expected format
281
- return papers
282
-
283
- def save_paper_files(paper, voice):
284
- """Generate and save Markdown and MP3 files for a paper."""
285
- # Create Markdown file
286
- md_filename = create_md_file(paper)
287
-
288
- # Generate audio for the entire paper
289
- audio_text = f"{paper['title']}. {paper['summary']}"
290
- audio_filename = speak_with_edge_tts(audio_text, voice)
291
-
292
- return md_filename, audio_filename
293
-
294
- def display_papers(papers, voice):
295
- """Display all papers with options to generate audio."""
296
- for idx, paper in enumerate(papers):
297
- st.markdown(f"### {idx + 1}. {paper['title']} ({paper['year'] if paper['year'] else 'Unknown Year'})")
298
- st.markdown(f"**Summary:** {paper['summary']}")
299
-
300
- # Button to generate and play audio
301
- if st.button(f"🔊 Read Aloud - {paper['title']}", key=f"read_aloud_{idx}"):
302
- md_file, audio_file = save_paper_files(paper, voice)
303
- if audio_file:
304
- st.success("Audio generated successfully!")
305
- play_and_download_audio(audio_file)
306
- else:
307
- st.error("Failed to generate audio.")
308
-
309
- st.write("---")
310
 
311
- def cache_parsed_papers(papers):
312
- """Cache the parsed papers."""
313
- st.session_state['parsed_papers'] = papers
 
 
 
314
 
315
- def get_cached_papers():
316
- """Retrieve cached papers."""
317
- return st.session_state.get('parsed_papers', [])
318
-
319
- def save_full_transcript(query, text):
320
- """Save full transcript of Arxiv results as a file."""
321
- filename = generate_filename("transcript", query, "md")
322
- with open(filename, 'w', encoding='utf-8') as f:
323
- f.write(text)
324
- return filename
325
 
326
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
327
- titles_summary=True, full_audio=False, selected_voice="en-US-AriaNeural"):
328
  """Perform Arxiv search and generate audio summaries."""
329
  start = time.time()
330
 
331
  # 🎯 1) Query the HF RAG pipeline
332
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
333
- refs = client.predict(q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md")[0]
334
- r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm")
335
-
336
  # 🎯 2) Combine for final text output
337
- clean_q = q.replace('\n', ' ')
338
- result = f"### 🔎 {clean_q}\n\n{r2}\n\n{refs}"
339
  st.markdown(result)
340
-
341
- # 🎯 3) Parse papers from the references
342
- parsed_papers = parse_papers(refs)
343
- cache_parsed_papers(parsed_papers)
344
-
345
- # 🎯 4) Display all parsed papers with options
346
- st.write("## Individual Papers")
347
- display_papers(parsed_papers, selected_voice)
348
-
349
- elapsed = time.time() - start
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
351
-
352
  # Always create a file with the result
353
- save_full_transcript(clean_q, result)
354
-
355
  return result
356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  # 📂 10. File Management
358
  def create_zip_of_files(md_files, mp3_files):
359
- """Create zip with intelligent naming."""
360
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
361
  all_files = md_files + mp3_files
362
  if not all_files:
@@ -370,22 +491,22 @@ def create_zip_of_files(md_files, mp3_files):
370
  all_content.append(file.read())
371
  elif f.endswith('.mp3'):
372
  all_content.append(os.path.basename(f))
373
-
374
  combined_content = " ".join(all_content)
375
  info_terms = get_high_info_terms(combined_content)
376
-
377
  timestamp = datetime.now().strftime("%y%m_%H%M")
378
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:3])
379
  zip_name = f"{timestamp}_{name_text}.zip"
380
-
381
  with zipfile.ZipFile(zip_name,'w') as z:
382
  for f in all_files:
383
  z.write(f)
384
-
385
  return zip_name
386
 
387
  def load_files_for_sidebar():
388
- """Load and group files for sidebar display."""
389
  md_files = glob.glob("*.md")
390
  mp3_files = glob.glob("*.mp3")
391
 
@@ -407,7 +528,7 @@ def load_files_for_sidebar():
407
  return groups, sorted_prefixes
408
 
409
  def extract_keywords_from_md(files):
410
- """Extract keywords from markdown files."""
411
  text = ""
412
  for f in files:
413
  if f.endswith(".md"):
@@ -416,7 +537,7 @@ def extract_keywords_from_md(files):
416
  return get_high_info_terms(text)
417
 
418
  def display_file_manager_sidebar(groups, sorted_prefixes):
419
- """Display file manager in sidebar."""
420
  st.sidebar.title("🎵 Audio & Docs Manager")
421
 
422
  all_md = []
@@ -443,19 +564,19 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
443
  if st.button("⬇️ ZipAll"):
444
  z = create_zip_of_files(all_md, all_mp3)
445
  if z:
446
- st.sidebar.markdown(get_download_link(z), unsafe_allow_html=True)
447
 
448
  for prefix in sorted_prefixes:
449
  files = groups[prefix]
450
  kw = extract_keywords_from_md(files)
451
  keywords_str = " ".join(kw) if kw else "No Keywords"
452
  with st.sidebar.expander(f"{prefix} Files ({len(files)}) - KW: {keywords_str}", expanded=True):
453
- c1, c2 = st.columns(2)
454
  with c1:
455
- if st.button("👀 View Group", key="view_group_"+prefix):
456
  st.session_state.viewing_prefix = prefix
457
  with c2:
458
- if st.button("🗑 Delete Group", key="del_group_"+prefix):
459
  for f in files:
460
  os.remove(f)
461
  st.success(f"Deleted group {prefix}!")
@@ -467,64 +588,73 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
467
  st.write(f"**{fname}** - {ctime}")
468
 
469
  # 🎯 11. Main Application
470
- async def get_available_voices():
471
- voices = await edge_tts.list_voices()
472
- return [voice["ShortName"] for voice in voices if voice["Locale"].startswith("en")]
473
-
474
- @st.cache_resource
475
- def fetch_voices():
476
- return asyncio.run(get_available_voices())
477
-
478
  def main():
479
  st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research")
480
- tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"], horizontal=True)
481
-
482
- # Initialize voices if not already done
483
- if not st.session_state['voices']:
484
- st.session_state['voices'] = fetch_voices()
485
-
486
- st.sidebar.markdown("### 🎤 Select Voice for Audio Generation")
487
- selected_voice = st.sidebar.selectbox(
488
- "Choose a voice:",
489
- options=st.session_state['voices'],
490
- index=st.session_state['voices'].index("en-US-AriaNeural") if "en-US-AriaNeural" in st.session_state['voices'] else 0
491
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492
 
493
- # Main Tabs
494
  if tab_main == "🔍 ArXiv":
495
  st.subheader("🔍 Query ArXiv")
496
- q = st.text_input("🔍 Query:").replace('\n', ' ')
497
 
498
  st.markdown("### 🎛 Options")
499
- vocal_summary = st.checkbox("🎙 Short Audio", value=True)
500
- extended_refs = st.checkbox("📜 Long References", value=False)
501
- titles_summary = st.checkbox("🔖 Titles Only", value=True)
502
- full_audio = st.checkbox("📚 Full Audio", value=False, help="Generate full audio response")
503
- full_transcript = st.checkbox("🧾 Full Transcript", value=False, help="Generate a full transcript file")
504
-
505
- if q and st.button("🔍 Run"):
506
- result = perform_ai_lookup(
507
- q,
508
- vocal_summary=vocal_summary,
509
- extended_refs=extended_refs,
510
- titles_summary=titles_summary,
511
- full_audio=full_audio,
512
- selected_voice=selected_voice
513
- )
514
  if full_transcript:
515
  save_full_transcript(q, result)
516
 
517
  st.markdown("### Change Prompt & Re-Run")
518
- q_new = st.text_input("🔄 Modify Query:").replace('\n', ' ')
519
  if q_new and st.button("🔄 Re-Run with Modified Query"):
520
- result = perform_ai_lookup(
521
- q_new,
522
- vocal_summary=vocal_summary,
523
- extended_refs=extended_refs,
524
- titles_summary=titles_summary,
525
- full_audio=full_audio,
526
- selected_voice=selected_voice
527
- )
528
  if full_transcript:
529
  save_full_transcript(q_new, result)
530
 
@@ -535,13 +665,13 @@ def main():
535
  if st.button("📨 Send"):
536
  process_with_gpt(user_text)
537
  st.subheader("📜 Chat History")
538
- t1, t2 = st.tabs(["Claude History", "GPT-4o History"])
539
  with t1:
540
- for c in st.session_state.get('chat_history', []):
541
  st.write("**You:**", c["user"])
542
  st.write("**Claude:**", c["claude"])
543
  with t2:
544
- for m in st.session_state.get('messages', []):
545
  with st.chat_message(m["role"]):
546
  st.markdown(m["content"])
547
 
@@ -549,48 +679,42 @@ def main():
549
  st.header("📸 Images & 🎥 Videos")
550
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
551
  with tabs[0]:
552
- imgs = glob.glob("*.png") + glob.glob("*.jpg")
553
  if imgs:
554
- cols = st.columns(min(5, len(imgs)))
555
- for i, f in enumerate(imgs[:20]):
556
- with cols[i % len(cols)]:
557
- st.image(Image.open(f), use_container_width=True)
 
558
  if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
559
- analysis = process_image(f, "Describe this image.")
560
- st.markdown(analysis)
561
  else:
562
  st.write("No images found.")
563
-
564
  with tabs[1]:
565
- vids = glob.glob("*.mp4")[:20]
566
  if vids:
567
  for v in vids:
568
  with st.expander(f"🎥 {os.path.basename(v)}"):
569
  st.video(v)
570
  if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
571
- analysis = process_video_with_gpt(v, "Describe video.")
572
- st.markdown(analysis)
573
  else:
574
  st.write("No videos found.")
575
 
576
  elif tab_main == "📝 Editor":
577
- st.subheader("📝 Editor")
578
- files = glob.glob("*.md")
579
- if files:
580
- selected_file = st.selectbox("Select a file to edit:", files)
581
- if selected_file:
582
- with open(selected_file, 'r', encoding='utf-8') as f:
583
- file_content = f.read()
584
- new_text = st.text_area("✏️ Content:", file_content, height=300)
585
- if st.button("💾 Save"):
586
- with open(selected_file, 'w', encoding='utf-8') as f:
587
- f.write(new_text)
588
- st.success("File updated successfully!")
589
- st.session_state.should_rerun = True
590
  else:
591
- st.write("No Markdown files available for editing.")
592
 
593
- # File Manager Sidebar
594
  groups, sorted_prefixes = load_files_for_sidebar()
595
  display_file_manager_sidebar(groups, sorted_prefixes)
596
 
@@ -602,8 +726,7 @@ def main():
602
  ext = os.path.splitext(fname)[1].lower().strip('.')
603
  st.write(f"### {fname}")
604
  if ext == "md":
605
- with open(f, 'r', encoding='utf-8') as file:
606
- content = file.read()
607
  st.markdown(content)
608
  elif ext == "mp3":
609
  st.audio(f)
@@ -614,55 +737,7 @@ def main():
614
 
615
  if st.session_state.should_rerun:
616
  st.session_state.should_rerun = False
617
- st.experimental_rerun()
618
-
619
- def process_with_gpt(text):
620
- """Process text with GPT-4."""
621
- if not text:
622
- return
623
- # Initialize messages if not present
624
- if 'messages' not in st.session_state:
625
- st.session_state['messages'] = []
626
- st.session_state['messages'].append({"role":"user","content":text})
627
- with st.chat_message("user"):
628
- st.markdown(text)
629
- with st.chat_message("assistant"):
630
- try:
631
- response = openai.ChatCompletion.create(
632
- model=st.session_state["openai_model"],
633
- messages=st.session_state['messages'],
634
- stream=False
635
- )
636
- ans = response.choices[0].message.content
637
- st.write("GPT-4o: " + ans)
638
- create_md_file({"title": "User Query", "year": None, "summary": ans})
639
- st.session_state['messages'].append({"role":"assistant","content":ans})
640
- except Exception as e:
641
- st.error(f"Error processing with GPT-4: {e}")
642
 
643
- def process_with_claude(text):
644
- """Process text with Claude."""
645
- if not text:
646
- return
647
- # Initialize chat_history if not present
648
- if 'chat_history' not in st.session_state:
649
- st.session_state['chat_history'] = []
650
- with st.chat_message("user"):
651
- st.markdown(text)
652
- with st.chat_message("assistant"):
653
- try:
654
- response = claude_client.messages.create(
655
- model="claude-3-sonnet-20240229",
656
- max_tokens=1000,
657
- messages=[{"role":"user","content":text}]
658
- )
659
- ans = response.content[0].text
660
- st.write("Claude-3.5: " + ans)
661
- create_md_file({"title": "User Query", "year": None, "summary": ans})
662
- st.session_state['chat_history'].append({"user":text,"claude":ans})
663
- except Exception as e:
664
- st.error(f"Error processing with Claude: {e}")
665
-
666
- # Run the application
667
  if __name__=="__main__":
668
  main()
 
1
  import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
 
 
 
 
 
 
5
  from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
  from dotenv import load_dotenv
10
  from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
  from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts
22
 
23
  # 🎯 1. Core Configuration & Setup
24
  st.set_page_config(
 
36
 
37
  # 🔑 2. API Setup & Clients
38
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
39
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
40
+ xai_key = os.getenv('xai',"")
41
  if 'OPENAI_API_KEY' in st.secrets:
42
  openai_api_key = st.secrets['OPENAI_API_KEY']
43
  if 'ANTHROPIC_API_KEY' in st.secrets:
 
45
 
46
  openai.api_key = openai_api_key
47
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
48
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
49
+ HF_KEY = os.getenv('HF_KEY')
50
+ API_URL = os.getenv('API_URL')
51
 
52
  # 📝 3. Session State Management
53
+ if 'transcript_history' not in st.session_state:
54
+ st.session_state['transcript_history'] = []
55
+ if 'chat_history' not in st.session_state:
56
+ st.session_state['chat_history'] = []
57
+ if 'openai_model' not in st.session_state:
58
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
59
+ if 'messages' not in st.session_state:
60
+ st.session_state['messages'] = []
61
+ if 'last_voice_input' not in st.session_state:
62
+ st.session_state['last_voice_input'] = ""
63
+ if 'editing_file' not in st.session_state:
64
+ st.session_state['editing_file'] = None
65
+ if 'edit_new_name' not in st.session_state:
66
+ st.session_state['edit_new_name'] = ""
67
+ if 'edit_new_content' not in st.session_state:
68
+ st.session_state['edit_new_content'] = ""
69
  if 'viewing_prefix' not in st.session_state:
70
  st.session_state['viewing_prefix'] = None
71
  if 'should_rerun' not in st.session_state:
72
  st.session_state['should_rerun'] = False
73
+ if 'old_val' not in st.session_state:
74
+ st.session_state['old_val'] = None
75
 
76
  # 🎨 4. Custom CSS
77
  st.markdown("""
78
  <style>
79
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
80
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
 
 
 
 
 
81
  .stButton>button {
82
  margin-right: 0.5rem;
83
  }
 
151
  return '_'.join(filtered)[:200]
152
 
153
  # 📁 6. File Operations
154
+ def generate_filename(prompt, response, file_type="md"):
155
  """
156
+ Generate filename with meaningful terms and short dense clips from prompt & response.
157
+ The filename should be about 150 chars total, include high-info terms, and a clipped snippet.
158
  """
159
+ prefix = datetime.now().strftime("%y%m_%H%M") + "_"
160
+ combined = (prompt + " " + response).strip()
161
+ info_terms = get_high_info_terms(combined)
162
+
163
+ # Include a short snippet from prompt and response
164
+ snippet = (prompt[:100] + " " + response[:100]).strip()
165
+ snippet_cleaned = clean_text_for_filename(snippet)
166
+
167
+ # Combine info terms and snippet
168
+ name_parts = info_terms + [snippet_cleaned]
169
+ full_name = '_'.join(name_parts)
170
+
171
+ # Trim to ~150 chars
172
+ if len(full_name) > 150:
173
+ full_name = full_name[:150]
174
+
175
+ filename = f"{prefix}{full_name}.{file_type}"
176
  return filename
177
 
178
+ def create_file(prompt, response, file_type="md"):
179
+ """Create file with intelligent naming"""
180
+ filename = generate_filename(prompt.strip(), response.strip(), file_type)
 
181
  with open(filename, 'w', encoding='utf-8') as f:
182
+ f.write(prompt + "\n\n" + response)
183
  return filename
184
 
185
  def get_download_link(file):
186
+ """Generate download link for file"""
187
+ with open(file, "rb") as f:
188
+ b64 = base64.b64encode(f.read()).decode()
189
+ return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>'
 
190
 
191
  # 🔊 7. Audio Processing
192
  def clean_for_speech(text: str) -> str:
193
+ """Clean text for speech synthesis"""
194
  text = text.replace("\n", " ")
195
  text = text.replace("</s>", " ")
196
  text = text.replace("#", "")
 
198
  text = re.sub(r"\s+", " ", text).strip()
199
  return text
200
 
201
+ @st.cache_resource
202
+ def speech_synthesis_html(result):
203
+ """Create HTML for speech synthesis"""
204
+ html_code = f"""
205
+ <html><body>
206
+ <script>
207
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
208
+ window.speechSynthesis.speak(msg);
209
+ </script>
210
+ </body></html>
211
+ """
212
+ components.html(html_code, height=0)
213
+
214
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
215
+ """Generate audio using Edge TTS"""
216
  text = clean_for_speech(text)
217
  if not text.strip():
218
  return None
219
  rate_str = f"{rate:+d}%"
220
  pitch_str = f"{pitch:+d}Hz"
221
  communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
222
+ out_fn = generate_filename(text, text, "mp3")
223
  await communicate.save(out_fn)
224
  return out_fn
225
 
226
+ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
227
+ """Wrapper for edge TTS generation"""
228
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
 
 
 
 
229
 
230
  def play_and_download_audio(file_path):
231
+ """Play and provide download link for audio"""
232
  if file_path and os.path.exists(file_path):
233
  st.audio(file_path)
234
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
235
  st.markdown(dl_link, unsafe_allow_html=True)
236
 
237
  # 🎬 8. Media Processing
238
  def process_image(image_path, user_prompt):
239
+ """Process image with GPT-4V"""
240
  with open(image_path, "rb") as imgf:
241
  image_data = imgf.read()
242
  b64img = base64.b64encode(image_data).decode("utf-8")
243
+ resp = openai_client.chat.completions.create(
244
  model=st.session_state["openai_model"],
245
  messages=[
246
  {"role": "system", "content": "You are a helpful assistant."},
247
+ {"role": "user", "content": [
248
+ {"type": "text", "text": user_prompt},
249
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
250
+ ]}
251
  ],
252
  temperature=0.0,
253
  )
254
  return resp.choices[0].message.content
255
 
256
+ def process_audio(audio_path):
257
+ """Process audio with Whisper"""
258
  with open(audio_path, "rb") as f:
259
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
260
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
261
+ return transcription.text
262
 
263
  def process_video(video_path, seconds_per_frame=1):
264
+ """Extract frames from video"""
265
  vid = cv2.VideoCapture(video_path)
266
  total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
267
  fps = vid.get(cv2.CAP_PROP_FPS)
268
+ skip = int(fps*seconds_per_frame)
269
  frames_b64 = []
270
  for i in range(0, total, skip):
271
  vid.set(cv2.CAP_PROP_POS_FRAMES, i)
272
  ret, frame = vid.read()
273
+ if not ret:
274
  break
275
  _, buf = cv2.imencode(".jpg", frame)
276
  frames_b64.append(base64.b64encode(buf).decode("utf-8"))
 
278
  return frames_b64
279
 
280
  def process_video_with_gpt(video_path, prompt):
281
+ """Analyze video frames with GPT-4V"""
282
  frames = process_video(video_path)
283
+ resp = openai_client.chat.completions.create(
 
284
  model=st.session_state["openai_model"],
285
  messages=[
286
+ {"role":"system","content":"Analyze video frames."},
287
+ {"role":"user","content":[
288
+ {"type":"text","text":prompt},
289
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
290
+ ]}
291
  ]
292
  )
293
  return resp.choices[0].message.content
294
 
295
  # 🤖 9. AI Model Integration
296
+
297
+ def save_full_transcript(query, text):
298
+ """Save full transcript of Arxiv results as a file."""
299
+ create_file(query, text, "md")
300
+
301
+ # ------------------------------
302
+ # NEW: Helper to parse references
303
+ # ------------------------------
304
+ def parse_arxiv_refs(ref_text: str):
305
  """
306
+ Parse the multi-line references returned by the RAG pipeline.
307
+ Typical format lines like:
308
+ 1) [Paper Title 2023] This is the summary ...
309
+ 2) [Another Title (2024)] Another summary text ...
310
+ We'll attempt to find a year with a small regex or fallback.
311
+ Return list of dicts: { 'title': str, 'summary': str, 'year': int or None }
312
  """
313
+ lines = ref_text.split('\n')
314
+ results = []
315
+ for line in lines:
316
+ line = line.strip()
317
+ if not line:
318
+ continue
319
+ # Attempt to find [Title ...]
320
+ title_match = re.search(r"\[([^\]]+)\]", line)
321
+ if title_match:
322
+ raw_title = title_match.group(1).strip()
323
+ else:
324
+ # If no bracket found, skip or treat entire line as summary
325
+ raw_title = "No Title"
326
+
327
+ # Attempt to find trailing summary after bracket
328
+ # Example line: " [Paper Title 2024] Paper summary blah blah"
329
+ # So remove the bracketed portion from the line
330
+ remainder = line.replace(title_match.group(0), "").strip() if title_match else line
331
+ summary = remainder
332
+
333
+ # Attempt to guess year from the raw title
334
+ # We look for 4-digit patterns in raw_title or summary
335
+ year_match = re.search(r'(20\d{2})', raw_title)
336
+ if not year_match:
337
+ # fallback: try summary
338
+ year_match = re.search(r'(20\d{2})', summary)
339
+ if year_match:
340
+ year = int(year_match.group(1))
341
+ else:
342
+ year = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
 
344
+ results.append({
345
+ 'title': raw_title,
346
+ 'summary': summary,
347
+ 'year': year
348
+ })
349
+ return results
350
 
 
 
 
 
 
 
 
 
 
 
351
 
352
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
353
+ titles_summary=True, full_audio=False):
354
  """Perform Arxiv search and generate audio summaries."""
355
  start = time.time()
356
 
357
  # 🎯 1) Query the HF RAG pipeline
358
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
359
+ refs = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")[0]
360
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
361
+
362
  # 🎯 2) Combine for final text output
363
+ result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
 
364
  st.markdown(result)
365
+
366
+ # 🎯 3) Generate "all at once" audio if requested
367
+ if full_audio:
368
+ complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
369
+ audio_file_full = speak_with_edge_tts(complete_text)
370
+ st.write("### 📚 Full Audio")
371
+ play_and_download_audio(audio_file_full)
372
+
373
+ if vocal_summary:
374
+ main_text = clean_for_speech(r2)
375
+ audio_file_main = speak_with_edge_tts(main_text)
376
+ st.write("### 🎙 Short Audio")
377
+ play_and_download_audio(audio_file_main)
378
+
379
+ if extended_refs:
380
+ summaries_text = "Extended references: " + refs.replace('"','')
381
+ summaries_text = clean_for_speech(summaries_text)
382
+ audio_file_refs = speak_with_edge_tts(summaries_text)
383
+ st.write("### 📜 Long Refs")
384
+ play_and_download_audio(audio_file_refs)
385
+
386
+ # --------------------------------------
387
+ # NEW: Parse references, show sorted list
388
+ # --------------------------------------
389
+ parsed_refs = parse_arxiv_refs(refs)
390
+
391
+ # Sort by year descending (put None at bottom)
392
+ # If you want to skip older than 2022, you can filter them:
393
+ # parsed_refs = [r for r in parsed_refs if (r["year"] is not None and r["year"] >= 2022)]
394
+ parsed_refs.sort(key=lambda x: x["year"] if x["year"] else 0, reverse=True)
395
+
396
+ st.write("## Individual Papers (Most Recent First)")
397
+ for idx, paper in enumerate(parsed_refs):
398
+ year_str = paper["year"] if paper["year"] else "Unknown Year"
399
+ st.markdown(f"**{idx+1}. {paper['title']}** \n*Year:* {year_str}")
400
+ st.markdown(f"*Summary:* {paper['summary']}")
401
+
402
+ # Two new TTS buttons: Title only or Title+Summary
403
+ colA, colB = st.columns(2)
404
+ with colA:
405
+ if st.button(f"🔊 Title", key=f"title_{idx}"):
406
+ text_tts = clean_for_speech(paper['title'])
407
+ audio_file_title = speak_with_edge_tts(text_tts)
408
+ play_and_download_audio(audio_file_title)
409
+
410
+ with colB:
411
+ if st.button(f"🔊 Title+Summary", key=f"summary_{idx}"):
412
+ text_tts = clean_for_speech(paper['title'] + ". " + paper['summary'])
413
+ audio_file_title_summary = speak_with_edge_tts(text_tts)
414
+ play_and_download_audio(audio_file_title_summary)
415
+
416
+ st.write("---")
417
+
418
+ # Keep your original block for "Titles Only" if you want:
419
+ if titles_summary:
420
+ # This is your existing code block
421
+ titles = []
422
+ for line in refs.split('\n'):
423
+ m = re.search(r"\[([^\]]+)\]", line)
424
+ if m:
425
+ titles.append(m.group(1))
426
+ if titles:
427
+ titles_text = "Titles: " + ", ".join(titles)
428
+ titles_text = clean_for_speech(titles_text)
429
+ audio_file_titles = speak_with_edge_tts(titles_text)
430
+ st.write("### 🔖 Titles (All-In-One)")
431
+ play_and_download_audio(audio_file_titles)
432
+
433
+ elapsed = time.time()-start
434
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
435
+
436
  # Always create a file with the result
437
+ create_file(q, result, "md")
438
+
439
  return result
440
 
441
+ def process_with_gpt(text):
442
+ """Process text with GPT-4"""
443
+ if not text:
444
+ return
445
+ st.session_state.messages.append({"role":"user","content":text})
446
+ with st.chat_message("user"):
447
+ st.markdown(text)
448
+ with st.chat_message("assistant"):
449
+ c = openai_client.chat.completions.create(
450
+ model=st.session_state["openai_model"],
451
+ messages=st.session_state.messages,
452
+ stream=False
453
+ )
454
+ ans = c.choices[0].message.content
455
+ st.write("GPT-4o: " + ans)
456
+ create_file(text, ans, "md")
457
+ st.session_state.messages.append({"role":"assistant","content":ans})
458
+ return ans
459
+
460
+ def process_with_claude(text):
461
+ """Process text with Claude"""
462
+ if not text:
463
+ return
464
+ with st.chat_message("user"):
465
+ st.markdown(text)
466
+ with st.chat_message("assistant"):
467
+ r = claude_client.messages.create(
468
+ model="claude-3-sonnet-20240229",
469
+ max_tokens=1000,
470
+ messages=[{"role":"user","content":text}]
471
+ )
472
+ ans = r.content[0].text
473
+ st.write("Claude-3.5: " + ans)
474
+ create_file(text, ans, "md")
475
+ st.session_state.chat_history.append({"user":text,"claude":ans})
476
+ return ans
477
+
478
  # 📂 10. File Management
479
  def create_zip_of_files(md_files, mp3_files):
480
+ """Create zip with intelligent naming"""
481
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
482
  all_files = md_files + mp3_files
483
  if not all_files:
 
491
  all_content.append(file.read())
492
  elif f.endswith('.mp3'):
493
  all_content.append(os.path.basename(f))
494
+
495
  combined_content = " ".join(all_content)
496
  info_terms = get_high_info_terms(combined_content)
497
+
498
  timestamp = datetime.now().strftime("%y%m_%H%M")
499
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:3])
500
  zip_name = f"{timestamp}_{name_text}.zip"
501
+
502
  with zipfile.ZipFile(zip_name,'w') as z:
503
  for f in all_files:
504
  z.write(f)
505
+
506
  return zip_name
507
 
508
  def load_files_for_sidebar():
509
+ """Load and group files for sidebar display"""
510
  md_files = glob.glob("*.md")
511
  mp3_files = glob.glob("*.mp3")
512
 
 
528
  return groups, sorted_prefixes
529
 
530
  def extract_keywords_from_md(files):
531
+ """Extract keywords from markdown files"""
532
  text = ""
533
  for f in files:
534
  if f.endswith(".md"):
 
537
  return get_high_info_terms(text)
538
 
539
  def display_file_manager_sidebar(groups, sorted_prefixes):
540
+ """Display file manager in sidebar"""
541
  st.sidebar.title("🎵 Audio & Docs Manager")
542
 
543
  all_md = []
 
564
  if st.button("⬇️ ZipAll"):
565
  z = create_zip_of_files(all_md, all_mp3)
566
  if z:
567
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
568
 
569
  for prefix in sorted_prefixes:
570
  files = groups[prefix]
571
  kw = extract_keywords_from_md(files)
572
  keywords_str = " ".join(kw) if kw else "No Keywords"
573
  with st.sidebar.expander(f"{prefix} Files ({len(files)}) - KW: {keywords_str}", expanded=True):
574
+ c1,c2 = st.columns(2)
575
  with c1:
576
+ if st.button("👀ViewGrp", key="view_group_"+prefix):
577
  st.session_state.viewing_prefix = prefix
578
  with c2:
579
+ if st.button("🗑DelGrp", key="del_group_"+prefix):
580
  for f in files:
581
  os.remove(f)
582
  st.success(f"Deleted group {prefix}!")
 
588
  st.write(f"**{fname}** - {ctime}")
589
 
590
  # 🎯 11. Main Application
 
 
 
 
 
 
 
 
591
  def main():
592
  st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research")
593
+ tab_main = st.radio("Action:",["🎤 Voice","📸 Media","🔍 ArXiv","📝 Editor"],horizontal=True)
594
+
595
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
596
+ val = mycomponent(my_input_value="Hello")
597
+
598
+ # Show input in a text box for editing if detected
599
+ if val:
600
+ val_stripped = val.replace('\n', ' ')
601
+ edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
602
+ run_option = st.selectbox("Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
603
+ col1, col2 = st.columns(2)
604
+ with col1:
605
+ autorun = st.checkbox("⚙ AutoRun", value=True)
606
+ with col2:
607
+ full_audio = st.checkbox("📚FullAudio", value=False,
608
+ help="Generate full audio response")
609
+
610
+ input_changed = (val != st.session_state.old_val)
611
+
612
+ if autorun and input_changed:
613
+ st.session_state.old_val = val
614
+ if run_option == "Arxiv":
615
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
616
+ titles_summary=True, full_audio=full_audio)
617
+ else:
618
+ if run_option == "GPT-4o":
619
+ process_with_gpt(edited_input)
620
+ elif run_option == "Claude-3.5":
621
+ process_with_claude(edited_input)
622
+ else:
623
+ if st.button("▶ Run"):
624
+ st.session_state.old_val = val
625
+ if run_option == "Arxiv":
626
+ perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
627
+ titles_summary=True, full_audio=full_audio)
628
+ else:
629
+ if run_option == "GPT-4o":
630
+ process_with_gpt(edited_input)
631
+ elif run_option == "Claude-3.5":
632
+ process_with_claude(edited_input)
633
 
 
634
  if tab_main == "🔍 ArXiv":
635
  st.subheader("🔍 Query ArXiv")
636
+ q = st.text_input("🔍 Query:")
637
 
638
  st.markdown("### 🎛 Options")
639
+ vocal_summary = st.checkbox("🎙ShortAudio", value=True)
640
+ extended_refs = st.checkbox("📜LongRefs", value=False)
641
+ titles_summary = st.checkbox("🔖TitlesOnly", value=True)
642
+ full_audio = st.checkbox("📚FullAudio", value=False,
643
+ help="Full audio of results")
644
+ full_transcript = st.checkbox("🧾FullTranscript", value=False,
645
+ help="Generate a full transcript file")
646
+
647
+ if q and st.button("🔍Run"):
648
+ result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
649
+ titles_summary=titles_summary, full_audio=full_audio)
 
 
 
 
650
  if full_transcript:
651
  save_full_transcript(q, result)
652
 
653
  st.markdown("### Change Prompt & Re-Run")
654
+ q_new = st.text_input("🔄 Modify Query:")
655
  if q_new and st.button("🔄 Re-Run with Modified Query"):
656
+ result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs,
657
+ titles_summary=titles_summary, full_audio=full_audio)
 
 
 
 
 
 
658
  if full_transcript:
659
  save_full_transcript(q_new, result)
660
 
 
665
  if st.button("📨 Send"):
666
  process_with_gpt(user_text)
667
  st.subheader("📜 Chat History")
668
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
669
  with t1:
670
+ for c in st.session_state.chat_history:
671
  st.write("**You:**", c["user"])
672
  st.write("**Claude:**", c["claude"])
673
  with t2:
674
+ for m in st.session_state.messages:
675
  with st.chat_message(m["role"]):
676
  st.markdown(m["content"])
677
 
 
679
  st.header("📸 Images & 🎥 Videos")
680
  tabs = st.tabs(["🖼 Images", "🎥 Video"])
681
  with tabs[0]:
682
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
683
  if imgs:
684
+ c = st.slider("Cols",1,5,3)
685
+ cols = st.columns(c)
686
+ for i,f in enumerate(imgs):
687
+ with cols[i%c]:
688
+ st.image(Image.open(f),use_container_width=True)
689
  if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
690
+ a = process_image(f,"Describe this image.")
691
+ st.markdown(a)
692
  else:
693
  st.write("No images found.")
 
694
  with tabs[1]:
695
+ vids = glob.glob("*.mp4")
696
  if vids:
697
  for v in vids:
698
  with st.expander(f"🎥 {os.path.basename(v)}"):
699
  st.video(v)
700
  if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
701
+ a = process_video_with_gpt(v,"Describe video.")
702
+ st.markdown(a)
703
  else:
704
  st.write("No videos found.")
705
 
706
  elif tab_main == "📝 Editor":
707
+ if getattr(st.session_state,'current_file',None):
708
+ st.subheader(f"Editing: {st.session_state.current_file}")
709
+ new_text = st.text_area("✏️ Content:", st.session_state.file_content, height=300)
710
+ if st.button("💾 Save"):
711
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
712
+ f.write(new_text)
713
+ st.success("Updated!")
714
+ st.session_state.should_rerun = True
 
 
 
 
 
715
  else:
716
+ st.write("Select a file from the sidebar to edit.")
717
 
 
718
  groups, sorted_prefixes = load_files_for_sidebar()
719
  display_file_manager_sidebar(groups, sorted_prefixes)
720
 
 
726
  ext = os.path.splitext(fname)[1].lower().strip('.')
727
  st.write(f"### {fname}")
728
  if ext == "md":
729
+ content = open(f,'r',encoding='utf-8').read()
 
730
  st.markdown(content)
731
  elif ext == "mp3":
732
  st.audio(f)
 
737
 
738
  if st.session_state.should_rerun:
739
  st.session_state.should_rerun = False
740
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742
  if __name__=="__main__":
743
  main()