awacke1 commited on
Commit
7387bfc
·
verified ·
1 Parent(s): 8cb044d

Create backup15.app.py

Browse files
Files changed (1) hide show
  1. backup15.app.py +823 -0
backup15.app.py ADDED
@@ -0,0 +1,823 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
+ from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque, Counter
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts
22
+ from streamlit_marquee import streamlit_marquee
23
+
24
+ # 🎯 1. Core Configuration & Setup
25
+ st.set_page_config(
26
+ page_title="🚲TalkingAIResearcher🏆",
27
+ page_icon="🚲🏆",
28
+ layout="wide",
29
+ initial_sidebar_state="auto",
30
+ menu_items={
31
+ 'Get Help': 'https://huggingface.co/awacke1',
32
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
33
+ 'About': "🚲TalkingAIResearcher🏆"
34
+ }
35
+ )
36
+ load_dotenv()
37
+
38
+ # Add available English voices for Edge TTS
39
+ EDGE_TTS_VOICES = [
40
+ "en-US-AriaNeural",
41
+ "en-US-GuyNeural",
42
+ "en-US-JennyNeural",
43
+ "en-GB-SoniaNeural",
44
+ "en-GB-RyanNeural",
45
+ "en-AU-NatashaNeural",
46
+ "en-AU-WilliamNeural",
47
+ "en-CA-ClaraNeural",
48
+ "en-CA-LiamNeural"
49
+ ]
50
+
51
+ # Initialize session state variables
52
+ if 'marquee_settings' not in st.session_state:
53
+ st.session_state['marquee_settings'] = {
54
+ "background": "#1E1E1E",
55
+ "color": "#FFFFFF",
56
+ "font-size": "14px",
57
+ "animationDuration": "10s",
58
+ "width": "100%",
59
+ "lineHeight": "35px"
60
+ }
61
+
62
+ if 'tts_voice' not in st.session_state:
63
+ st.session_state['tts_voice'] = EDGE_TTS_VOICES[0]
64
+ if 'audio_format' not in st.session_state:
65
+ st.session_state['audio_format'] = 'mp3'
66
+ if 'transcript_history' not in st.session_state:
67
+ st.session_state['transcript_history'] = []
68
+ if 'chat_history' not in st.session_state:
69
+ st.session_state['chat_history'] = []
70
+ if 'openai_model' not in st.session_state:
71
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
72
+ if 'messages' not in st.session_state:
73
+ st.session_state['messages'] = []
74
+ if 'last_voice_input' not in st.session_state:
75
+ st.session_state['last_voice_input'] = ""
76
+ if 'editing_file' not in st.session_state:
77
+ st.session_state['editing_file'] = None
78
+ if 'edit_new_name' not in st.session_state:
79
+ st.session_state['edit_new_name'] = ""
80
+ if 'edit_new_content' not in st.session_state:
81
+ st.session_state['edit_new_content'] = ""
82
+ if 'viewing_prefix' not in st.session_state:
83
+ st.session_state['viewing_prefix'] = None
84
+ if 'should_rerun' not in st.session_state:
85
+ st.session_state['should_rerun'] = False
86
+ if 'old_val' not in st.session_state:
87
+ st.session_state['old_val'] = None
88
+ if 'last_query' not in st.session_state:
89
+ st.session_state['last_query'] = ""
90
+ if 'marquee_content' not in st.session_state:
91
+ st.session_state['marquee_content'] = "🚀 Welcome to TalkingAIResearcher | 🤖 Your Research Assistant"
92
+
93
+ # 🔑 2. API Setup & Clients
94
+ openai_api_key = os.getenv('OPENAI_API_KEY', "")
95
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
96
+ xai_key = os.getenv('xai',"")
97
+ if 'OPENAI_API_KEY' in st.secrets:
98
+ openai_api_key = st.secrets['OPENAI_API_KEY']
99
+ if 'ANTHROPIC_API_KEY' in st.secrets:
100
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
101
+
102
+ openai.api_key = openai_api_key
103
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
104
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
105
+ HF_KEY = os.getenv('HF_KEY')
106
+ API_URL = os.getenv('API_URL')
107
+
108
+ # Constants
109
+ FILE_EMOJIS = {
110
+ "md": "📝",
111
+ "mp3": "🎵",
112
+ "wav": "🔊"
113
+ }
114
+
115
+ def get_central_time():
116
+ """Get current time in US Central timezone"""
117
+ central = pytz.timezone('US/Central')
118
+ return datetime.now(central)
119
+
120
+ def format_timestamp_prefix():
121
+ """Generate timestamp prefix in format MM_dd_yy_hh_mm_AM/PM"""
122
+ ct = get_central_time()
123
+ return ct.strftime("%m_%d_%y_%I_%M_%p")
124
+
125
+ def initialize_marquee_settings():
126
+ """Initialize marquee settings in session state"""
127
+ if 'marquee_settings' not in st.session_state:
128
+ st.session_state['marquee_settings'] = {
129
+ "background": "#1E1E1E",
130
+ "color": "#FFFFFF",
131
+ "font-size": "14px",
132
+ "animationDuration": "10s",
133
+ "width": "100%",
134
+ "lineHeight": "35px"
135
+ }
136
+
137
+ def get_marquee_settings():
138
+ """Get or update marquee settings from session state"""
139
+ initialize_marquee_settings()
140
+ return st.session_state['marquee_settings']
141
+
142
+ def update_marquee_settings_ui():
143
+ """Update marquee settings via UI controls"""
144
+ initialize_marquee_settings()
145
+ st.sidebar.markdown("### 🎯 Marquee Settings")
146
+ cols = st.sidebar.columns(2)
147
+ with cols[0]:
148
+ bg_color = st.color_picker("🎨 Background",
149
+ st.session_state['marquee_settings']["background"],
150
+ key="bg_color_picker")
151
+ text_color = st.color_picker("✍️ Text",
152
+ st.session_state['marquee_settings']["color"],
153
+ key="text_color_picker")
154
+ with cols[1]:
155
+ font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider")
156
+ duration = st.slider("⏱️ Speed", 1, 20, 10, key="duration_slider")
157
+
158
+ st.session_state['marquee_settings'].update({
159
+ "background": bg_color,
160
+ "color": text_color,
161
+ "font-size": f"{font_size}px",
162
+ "animationDuration": f"{duration}s"
163
+ })
164
+
165
+ def display_marquee(text, settings, key_suffix=""):
166
+ """Display marquee with given text and settings"""
167
+ truncated_text = text[:280] + "..." if len(text) > 280 else text
168
+ streamlit_marquee(
169
+ content=truncated_text,
170
+ **settings,
171
+ key=f"marquee_{key_suffix}"
172
+ )
173
+ st.write("")
174
+
175
+ def get_high_info_terms(text: str, top_n=10) -> list:
176
+ stop_words = set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with'])
177
+ words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower())
178
+ bi_grams = [' '.join(pair) for pair in zip(words, words[1:])]
179
+ combined = words + bi_grams
180
+ filtered = [term for term in combined if term not in stop_words and len(term.split()) <= 2]
181
+ counter = Counter(filtered)
182
+ return [term for term, freq in counter.most_common(top_n)]
183
+
184
+ def clean_text_for_filename(text: str) -> str:
185
+ text = text.lower()
186
+ text = re.sub(r'[^\w\s-]', '', text)
187
+ words = text.split()
188
+ stop_short = set(['the', 'and', 'for', 'with', 'this', 'that'])
189
+ filtered = [w for w in words if len(w) > 3 and w not in stop_short]
190
+ return '_'.join(filtered)[:200]
191
+
192
+ def generate_filename(prompt, response, file_type="md"):
193
+ prefix = format_timestamp_prefix() + "_"
194
+ combined = (prompt + " " + response).strip()
195
+ info_terms = get_high_info_terms(combined, top_n=10)
196
+ snippet = (prompt[:100] + " " + response[:100]).strip()
197
+ snippet_cleaned = clean_text_for_filename(snippet)
198
+ name_parts = info_terms + [snippet_cleaned]
199
+ full_name = '_'.join(name_parts)
200
+ if len(full_name) > 150:
201
+ full_name = full_name[:150]
202
+ return f"{prefix}{full_name}.{file_type}"
203
+
204
+ def create_file(prompt, response, file_type="md"):
205
+ filename = generate_filename(prompt.strip(), response.strip(), file_type)
206
+ with open(filename, 'w', encoding='utf-8') as f:
207
+ f.write(prompt + "\n\n" + response)
208
+ return filename
209
+
210
+ def get_download_link(file, file_type="zip"):
211
+ with open(file, "rb") as f:
212
+ b64 = base64.b64encode(f.read()).decode()
213
+ if file_type == "zip":
214
+ return f'<a href="data:application/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>'
215
+ elif file_type == "mp3":
216
+ return f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file)}">🎵 Download {os.path.basename(file)}</a>'
217
+ elif file_type == "wav":
218
+ return f'<a href="data:audio/wav;base64,{b64}" download="{os.path.basename(file)}">🔊 Download {os.path.basename(file)}</a>'
219
+ elif file_type == "md":
220
+ return f'<a href="data:text/markdown;base64,{b64}" download="{os.path.basename(file)}">📝 Download {os.path.basename(file)}</a>'
221
+ else:
222
+ return f'<a href="data:application/octet-stream;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}</a>'
223
+
224
+ def clean_for_speech(text: str) -> str:
225
+ text = text.replace("\n", " ")
226
+ text = text.replace("</s>", " ")
227
+ text = text.replace("#", "")
228
+ text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
229
+ text = re.sub(r"\s+", " ", text).strip()
230
+ return text
231
+
232
+ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"):
233
+ text = clean_for_speech(text)
234
+ if not text.strip():
235
+ return None
236
+ rate_str = f"{rate:+d}%"
237
+ pitch_str = f"{pitch:+d}Hz"
238
+ communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
239
+ out_fn = generate_filename(text, text, file_type=file_format)
240
+ await communicate.save(out_fn)
241
+ return out_fn
242
+
243
+ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"):
244
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch, file_format))
245
+
246
+ def play_and_download_audio(file_path, file_type="mp3"):
247
+ if file_path and os.path.exists(file_path):
248
+ st.audio(file_path)
249
+ dl_link = get_download_link(file_path, file_type=file_type)
250
+ st.markdown(dl_link, unsafe_allow_html=True)
251
+
252
+ def save_qa_with_audio(question, answer, voice=None):
253
+ """Save Q&A to markdown and generate audio"""
254
+ if not voice:
255
+ voice = st.session_state['tts_voice']
256
+
257
+ # Create markdown file
258
+ combined_text = f"# Question\n{question}\n\n# Answer\n{answer}"
259
+ md_file = create_file(question, answer, "md")
260
+
261
+ # Generate audio file
262
+ audio_text = f"Question: {question}\n\nAnswer: {answer}"
263
+ audio_file = speak_with_edge_tts(
264
+ audio_text,
265
+ voice=voice,
266
+ file_format=st.session_state['audio_format']
267
+ )
268
+
269
+ return md_file, audio_file
270
+
271
+ def process_paper_content(paper):
272
+ marquee_text = f"📄 {paper['title']} | 👤 {paper['authors'][:100]} | 📝 {paper['summary'][:100]}"
273
+ audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
274
+ return marquee_text, audio_text
275
+
276
+ def create_paper_audio_files(papers, input_question):
277
+ for paper in papers:
278
+ try:
279
+ marquee_text, audio_text = process_paper_content(paper)
280
+
281
+ audio_text = clean_for_speech(audio_text)
282
+ file_format = st.session_state['audio_format']
283
+ audio_file = speak_with_edge_tts(audio_text,
284
+ voice=st.session_state['tts_voice'],
285
+ file_format=file_format)
286
+ paper['full_audio'] = audio_file
287
+
288
+ st.write(f"### {FILE_EMOJIS.get(file_format, '')} {os.path.basename(audio_file)}")
289
+ play_and_download_audio(audio_file, file_type=file_format)
290
+ paper['marquee_text'] = marquee_text
291
+
292
+ except Exception as e:
293
+ st.warning(f"Error processing paper {paper['title']}: {str(e)}")
294
+ paper['full_audio'] = None
295
+ paper['marquee_text'] = None
296
+
297
+ def display_papers(papers, marquee_settings):
298
+ st.write("## Research Papers")
299
+
300
+ papercount = 0
301
+ for paper in papers:
302
+ papercount += 1
303
+ if papercount <= 20:
304
+ if paper.get('marquee_text'):
305
+ display_marquee(paper['marquee_text'],
306
+ marquee_settings,
307
+ key_suffix=f"paper_{papercount}")
308
+
309
+ with st.expander(f"{papercount}. 📄 {paper['title']}", expanded=True):
310
+ st.markdown(f"**{paper['date']} | {paper['title']} | ⬇️**")
311
+ st.markdown(f"*{paper['authors']}*")
312
+ st.markdown(paper['summary'])
313
+
314
+ if paper.get('full_audio'):
315
+ st.write("📚 Paper Audio")
316
+ file_ext = os.path.splitext(paper['full_audio'])[1].lower().strip('.')
317
+ if file_ext in ['mp3', 'wav']:
318
+ st.audio(paper['full_audio'])
319
+
320
+ def parse_arxiv_refs(ref_text: str):
321
+ if not ref_text:
322
+ return []
323
+
324
+ results = []
325
+ current_paper = {}
326
+ lines = ref_text.split('\n')
327
+
328
+ for i, line in enumerate(lines):
329
+ if line.count('|') == 2:
330
+ if current_paper:
331
+ results.append(current_paper)
332
+ if len(results) >= 20:
333
+ break
334
+
335
+ try:
336
+ header_parts = line.strip('* ').split('|')
337
+ date = header_parts[0].strip()
338
+ title = header_parts[1].strip()
339
+ url_match = re.search(r'(https://arxiv.org/\S+)', line)
340
+ url = url_match.group(1) if url_match else f"paper_{len(results)}"
341
+
342
+ current_paper = {
343
+ 'date': date,
344
+ 'title': title,
345
+ 'url': url,
346
+ 'authors': '',
347
+ 'summary': '',
348
+ 'content_start': i + 1
349
+ }
350
+ except Exception as e:
351
+ st.warning(f"Error parsing paper header: {str(e)}")
352
+ current_paper = {}
353
+ continue
354
+
355
+ elif current_paper:
356
+ if not current_paper['authors']:
357
+ current_paper['authors'] = line.strip('* ')
358
+ else:
359
+ if current_paper['summary']:
360
+ current_paper['summary'] += ' ' + line.strip()
361
+ else:
362
+ current_paper['summary'] = line.strip()
363
+
364
+ if current_paper:
365
+ results.append(current_paper)
366
+
367
+ return results[:20]
368
+
369
+
370
+
371
+ # ---------------------------- Edit 1/11/2025 - add a constitution to my arxiv system templating to build configurable character and personality of IO.
372
+
373
+ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
374
+ titles_summary=True, full_audio=False):
375
+ start = time.time()
376
+
377
+ #SCIENCE_PROBLEM = "Solving visual acuity of UI screens using gradio and streamlit apps that run reactive style components using html components and apis across gradio and streamlit partner apps - a cloud of contiguous org supporting ai agents"
378
+ #SONG_STYLE = "techno, trance, industrial"
379
+
380
+
381
+ ai_constitution = """
382
+ You are a talented AI coder and songwriter with a unique ability to explain scientific concepts through music with code easter eggs.. Your task is to create a song that not only entertains but also educates listeners about a specific science problem and its potential solutions.
383
+
384
+ 1. First, carefully read and analyze the problem provided:
385
+ <science_problem>
386
+ {{q}}
387
+ </science_problem>
388
+
389
+ 2. Next, consider the style requested:
390
+ <song_style>
391
+ {{SONG_STYLE}}
392
+ </song_style>
393
+
394
+ 3. Follow these steps to create your output:
395
+
396
+ 1. Analyze the problem:
397
+ - Identify the key issues and challenges
398
+ - Note any potential solutions or technologies mentioned, especially in AI
399
+ - Consider how these concepts can be simplified for a general audience
400
+
401
+ 2. Plan your structure. Document and enumerate in markdown outlines with emojis.:
402
+ - Decide on a format that fits the style
403
+ - Plan to introduce the problem
404
+ - Highlight key points or solutions
405
+
406
+ 3. Write.:
407
+ - Begin with an attention-grabbing opening line
408
+ - Use metaphors and analogies to explain complex concepts
409
+ - Ensure the flow naturally fits the rhythm of the chosen style
410
+ - Include scientific terminology, but explain it in simple terms within
411
+
412
+ 4. Incorporate scientific explanations.:
413
+ - Weave factual information throughout the verses
414
+ - Use the chorus to reinforce main ideas or solutions
415
+ - Ensure that the scientific content is accurate and up-to-date
416
+
417
+ 5. Match the requested style.:
418
+ - Adapt your word choice and phrasing to fit the genre
419
+ - Consider the typical rhythm and structure of this style
420
+ - If applicable, include style-specific elements
421
+
422
+ 6. Review and refine, add useful paper titles, keywords, descriptions of topics and concepts.:
423
+ - Check that effectively communicates the problem and solutions
424
+ - Ensure catchy and memorable
425
+ - Verify maintains the requested style throughout
426
+ """
427
+
428
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
429
+ refs = client.predict(q, 20, "Semantic Search",
430
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
431
+ api_name="/update_with_rag_md")[0]
432
+
433
+ #st.code(refs)
434
+
435
+ r2 = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1",
436
+ True, api_name="/ask_llm")
437
+
438
+ # mistralai/Mistral-Nemo-Instruct-2407
439
+ # mistralai/Mistral-7B-Instruct-v0.3
440
+
441
+ #st.code(r2)
442
+
443
+
444
+
445
+
446
+ result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
447
+ #st.markdown(result)
448
+ #st.code(ai_constitution)
449
+
450
+ md_file, audio_file = save_qa_with_audio(q, result)
451
+
452
+ st.subheader("📝 Main Response Audio")
453
+ play_and_download_audio(audio_file, st.session_state['audio_format'])
454
+
455
+ papers = parse_arxiv_refs(refs)
456
+ if papers:
457
+ create_paper_audio_files(papers, input_question=q)
458
+ display_papers(papers, get_marquee_settings())
459
+ else:
460
+ st.warning("No papers found in the response.")
461
+
462
+ elapsed = time.time()-start
463
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
464
+
465
+
466
+
467
+
468
+
469
+
470
+ return result
471
+
472
+ def process_voice_input(text):
473
+ if not text:
474
+ return
475
+
476
+ st.subheader("🔍 Search Results")
477
+ result = perform_ai_lookup(
478
+ text,
479
+ vocal_summary=True,
480
+ extended_refs=False,
481
+ titles_summary=True,
482
+ full_audio=True
483
+ )
484
+
485
+ md_file, audio_file = save_qa_with_audio(text, result)
486
+
487
+ st.subheader("📝 Generated Files")
488
+ st.write(f"Markdown: {md_file}")
489
+ st.write(f"Audio: {audio_file}")
490
+ play_and_download_audio(audio_file, st.session_state['audio_format'])
491
+
492
+ def load_files_for_sidebar():
493
+ md_files = glob.glob("*.md")
494
+ mp3_files = glob.glob("*.mp3")
495
+ wav_files = glob.glob("*.wav")
496
+
497
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
498
+ all_files = md_files + mp3_files + wav_files
499
+
500
+ groups = defaultdict(list)
501
+ prefix_length = len("MM_dd_yy_hh_mm_AP")
502
+
503
+ for f in all_files:
504
+ basename = os.path.basename(f)
505
+ if len(basename) >= prefix_length and '_' in basename:
506
+ group_name = basename[:prefix_length]
507
+ groups[group_name].append(f)
508
+ else:
509
+ groups['Other'].append(f)
510
+
511
+ sorted_groups = sorted(groups.items(),
512
+ key=lambda x: x[0] if x[0] != 'Other' else '',
513
+ reverse=True)
514
+ return sorted_groups
515
+
516
+ def display_file_manager_sidebar(groups_sorted):
517
+ st.sidebar.title("🎵 Audio & Docs Manager")
518
+
519
+ all_md = []
520
+ all_mp3 = []
521
+ all_wav = []
522
+ for _, files in groups_sorted:
523
+ for f in files:
524
+ if f.endswith(".md"):
525
+ all_md.append(f)
526
+ elif f.endswith(".mp3"):
527
+ all_mp3.append(f)
528
+ elif f.endswith(".wav"):
529
+ all_wav.append(f)
530
+
531
+ col1, col2, col3, col4 = st.sidebar.columns(4)
532
+ with col1:
533
+ if st.button("🗑 DelMD"):
534
+ for f in all_md:
535
+ os.remove(f)
536
+ st.session_state.should_rerun = True
537
+ with col2:
538
+ if st.button("🗑 DelMP3"):
539
+ for f in all_mp3:
540
+ os.remove(f)
541
+ st.session_state.should_rerun = True
542
+ with col3:
543
+ if st.button("🗑 DelWAV"):
544
+ for f in all_wav:
545
+ os.remove(f)
546
+ st.session_state.should_rerun = True
547
+ with col4:
548
+ if st.button("⬇️ ZipAll"):
549
+ zip_name = create_zip_of_files(all_md, all_mp3, all_wav, st.session_state.get('last_query', ''))
550
+ if zip_name:
551
+ st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True)
552
+
553
+ for group_name, files in groups_sorted:
554
+ if group_name == 'Other':
555
+ group_label = 'Other Files'
556
+ else:
557
+ try:
558
+ timestamp_dt = datetime.strptime(group_name, "%m_%d_%y_%I_%M_%p")
559
+ group_label = timestamp_dt.strftime("%b %d, %Y %I:%M %p")
560
+ except ValueError:
561
+ group_label = group_name
562
+
563
+ with st.sidebar.expander(f"📁 {group_label} ({len(files)})", expanded=True):
564
+ c1, c2 = st.columns(2)
565
+ with c1:
566
+ if st.button("👀 View", key=f"view_group_{group_name}"):
567
+ st.session_state.viewing_prefix = group_name
568
+ with c2:
569
+ if st.button("🗑 Del", key=f"del_group_{group_name}"):
570
+ for f in files:
571
+ os.remove(f)
572
+ st.success(f"Deleted group {group_label}!")
573
+ st.session_state.should_rerun = True
574
+
575
+ for f in files:
576
+ fname = os.path.basename(f)
577
+ ext = os.path.splitext(fname)[1].lower()
578
+ emoji = FILE_EMOJIS.get(ext.strip('.'), '')
579
+ mtime = os.path.getmtime(f)
580
+ ctime = datetime.fromtimestamp(mtime).strftime("%I:%M:%S %p")
581
+ st.write(f"{emoji} **{fname}** - {ctime}")
582
+
583
+ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
584
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
585
+ all_files = md_files + mp3_files + wav_files
586
+ if not all_files:
587
+ return None
588
+
589
+ all_content = []
590
+ for f in all_files:
591
+ if f.endswith('.md'):
592
+ with open(f, 'r', encoding='utf-8') as file:
593
+ all_content.append(file.read())
594
+ elif f.endswith('.mp3') or f.endswith('.wav'):
595
+ basename = os.path.splitext(os.path.basename(f))[0]
596
+ words = basename.replace('_', ' ')
597
+ all_content.append(words)
598
+
599
+ all_content.append(input_question)
600
+ combined_content = " ".join(all_content)
601
+ info_terms = get_high_info_terms(combined_content, top_n=10)
602
+
603
+ timestamp = format_timestamp_prefix()
604
+ name_text = '_'.join(term.replace(' ', '-') for term in info_terms[:10])
605
+ zip_name = f"{timestamp}_{name_text}.zip"
606
+
607
+ with zipfile.ZipFile(zip_name, 'w') as z:
608
+ for f in all_files:
609
+ z.write(f)
610
+
611
+ return zip_name
612
+
613
+ def main():
614
+ # Update marquee settings UI first
615
+ update_marquee_settings_ui()
616
+ marquee_settings = get_marquee_settings()
617
+
618
+ # Initial welcome marquee
619
+ display_marquee(st.session_state['marquee_content'],
620
+ {**marquee_settings, "font-size": "28px", "lineHeight": "50px"},
621
+ key_suffix="welcome")
622
+
623
+ # Load files for sidebar
624
+ groups_sorted = load_files_for_sidebar()
625
+
626
+ # Update marquee content when viewing files
627
+ if st.session_state.viewing_prefix:
628
+ for group_name, files in groups_sorted:
629
+ if group_name == st.session_state.viewing_prefix:
630
+ for f in files:
631
+ if f.endswith('.md'):
632
+ with open(f, 'r', encoding='utf-8') as file:
633
+ st.session_state['marquee_content'] = file.read()[:280]
634
+
635
+ # Voice Settings
636
+ st.sidebar.markdown("### 🎤 Voice Settings")
637
+ selected_voice = st.sidebar.selectbox(
638
+ "Select TTS Voice:",
639
+ options=EDGE_TTS_VOICES,
640
+ index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
641
+ )
642
+
643
+ # Audio Format Settings
644
+ st.sidebar.markdown("### 🔊 Audio Format")
645
+ selected_format = st.sidebar.radio(
646
+ "Choose Audio Format:",
647
+ options=["MP3", "WAV"],
648
+ index=0
649
+ )
650
+
651
+ if selected_voice != st.session_state['tts_voice']:
652
+ st.session_state['tts_voice'] = selected_voice
653
+ st.rerun()
654
+ if selected_format.lower() != st.session_state['audio_format']:
655
+ st.session_state['audio_format'] = selected_format.lower()
656
+ st.rerun()
657
+
658
+ # Main Interface
659
+ tab_main = st.radio("Action:", ["🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"],
660
+ horizontal=True)
661
+
662
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
663
+ val = mycomponent(my_input_value="Hello")
664
+
665
+ if val:
666
+ val_stripped = val.replace('\\n', ' ')
667
+ edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
668
+
669
+ run_option = st.selectbox("Model:", ["Arxiv"])
670
+ col1, col2 = st.columns(2)
671
+ with col1:
672
+ autorun = st.checkbox("⚙ AutoRun", value=True)
673
+ with col2:
674
+ full_audio = st.checkbox("📚FullAudio", value=False)
675
+
676
+ input_changed = (val != st.session_state.old_val)
677
+
678
+ if autorun and input_changed:
679
+ st.session_state.old_val = val
680
+ st.session_state.last_query = edited_input
681
+ result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
682
+ titles_summary=True, full_audio=full_audio)
683
+ else:
684
+ if st.button("▶ Run"):
685
+ st.session_state.old_val = val
686
+ st.session_state.last_query = edited_input
687
+ result = perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
688
+ titles_summary=True, full_audio=full_audio)
689
+
690
+
691
+ if tab_main == "🔍 ArXiv":
692
+ st.subheader("🔍 Query ArXiv")
693
+ q = st.text_input("🔍 Query:", key="arxiv_query")
694
+
695
+ st.markdown("### 🎛 Options")
696
+ vocal_summary = st.checkbox("🎙ShortAudio", value=True, key="option_vocal_summary")
697
+ extended_refs = st.checkbox("📜LongRefs", value=False, key="option_extended_refs")
698
+ titles_summary = st.checkbox("🔖TitlesOnly", value=True, key="option_titles_summary")
699
+ full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio")
700
+ full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
701
+
702
+
703
+ if q and st.button("🔍Run"):
704
+ st.session_state.last_query = q
705
+ result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
706
+ titles_summary=titles_summary, full_audio=full_audio)
707
+ if full_transcript:
708
+ create_file(q, result, "md")
709
+
710
+ elif tab_main == "🎤 Voice":
711
+ st.subheader("🎤 Voice Input")
712
+ user_text = st.text_area("💬 Message:", height=100)
713
+ user_text = user_text.strip().replace('\n', ' ')
714
+
715
+ if st.button("📨 Send"):
716
+ process_voice_input(user_text)
717
+
718
+ st.subheader("📜 Chat History")
719
+ for c in st.session_state.chat_history:
720
+ st.write("**You:**", c["user"])
721
+ st.write("**Response:**", c["claude"])
722
+
723
+ elif tab_main == "📸 Media":
724
+ st.header("📸 Images & 🎥 Videos")
725
+ tabs = st.tabs(["🖼 Images", "🎥 Video"])
726
+ with tabs[0]:
727
+ imgs = glob.glob("*.png") + glob.glob("*.jpg")
728
+ if imgs:
729
+ c = st.slider("Cols", 1, 5, 3)
730
+ cols = st.columns(c)
731
+ for i, f in enumerate(imgs):
732
+ with cols[i % c]:
733
+ st.image(Image.open(f), use_container_width=True)
734
+ if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
735
+ response = openai_client.chat.completions.create(
736
+ model=st.session_state["openai_model"],
737
+ messages=[
738
+ {"role": "system", "content": "Analyze the image content."},
739
+ {"role": "user", "content": [
740
+ {"type": "image_url",
741
+ "image_url": {"url": f"data:image/jpeg;base64,{base64.b64encode(open(f, 'rb').read()).decode()}"}}
742
+ ]}
743
+ ]
744
+ )
745
+ st.markdown(response.choices[0].message.content)
746
+ else:
747
+ st.write("No images found.")
748
+
749
+ with tabs[1]:
750
+ vids = glob.glob("*.mp4")
751
+ if vids:
752
+ for v in vids:
753
+ with st.expander(f"🎥 {os.path.basename(v)}"):
754
+ st.video(v)
755
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
756
+ frames = process_video(v)
757
+ response = openai_client.chat.completions.create(
758
+ model=st.session_state["openai_model"],
759
+ messages=[
760
+ {"role": "system", "content": "Analyze video frames."},
761
+ {"role": "user", "content": [
762
+ {"type": "image_url",
763
+ "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
764
+ for frame in frames
765
+ ]}
766
+ ]
767
+ )
768
+ st.markdown(response.choices[0].message.content)
769
+ else:
770
+ st.write("No videos found.")
771
+
772
+ elif tab_main == "📝 Editor":
773
+ if st.session_state.editing_file:
774
+ st.subheader(f"Editing: {st.session_state.editing_file}")
775
+ new_text = st.text_area("✏️ Content:", st.session_state.edit_new_content, height=300)
776
+ if st.button("💾 Save"):
777
+ with open(st.session_state.editing_file, 'w', encoding='utf-8') as f:
778
+ f.write(new_text)
779
+ st.success("File updated successfully!")
780
+ st.session_state.should_rerun = True
781
+ st.session_state.editing_file = None
782
+ else:
783
+ st.write("Select a file from the sidebar to edit.")
784
+
785
+ # Display file manager in sidebar
786
+ display_file_manager_sidebar(groups_sorted)
787
+
788
+ # Display viewed group content
789
+ if st.session_state.viewing_prefix and any(st.session_state.viewing_prefix == group for group, _ in groups_sorted):
790
+ st.write("---")
791
+ st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
792
+ for group_name, files in groups_sorted:
793
+ if group_name == st.session_state.viewing_prefix:
794
+ for f in files:
795
+ fname = os.path.basename(f)
796
+ ext = os.path.splitext(fname)[1].lower().strip('.')
797
+ st.write(f"### {fname}")
798
+ if ext == "md":
799
+ content = open(f, 'r', encoding='utf-8').read()
800
+ st.markdown(content)
801
+ elif ext in ["mp3", "wav"]:
802
+ st.audio(f)
803
+ else:
804
+ st.markdown(get_download_link(f), unsafe_allow_html=True)
805
+ break
806
+ if st.button("❌ Close"):
807
+ st.session_state.viewing_prefix = None
808
+ st.session_state['marquee_content'] = "🚀 Welcome to TalkingAIResearcher | 🤖 Your Research Assistant"
809
+
810
+ st.markdown("""
811
+ <style>
812
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
813
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
814
+ .stButton>button { margin-right: 0.5rem; }
815
+ </style>
816
+ """, unsafe_allow_html=True)
817
+
818
+ if st.session_state.should_rerun:
819
+ st.session_state.should_rerun = False
820
+ st.rerun()
821
+
822
+ if __name__ == "__main__":
823
+ main()