Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,8 @@ from streamlit_paste_button import paste_image_button
|
|
20 |
import pytz
|
21 |
import shutil
|
22 |
from urllib.parse import urlencode
|
|
|
|
|
23 |
|
24 |
# Patch for nested async - sneaky fix! πβ¨
|
25 |
nest_asyncio.apply()
|
@@ -87,7 +89,7 @@ UNICODE_FONTS = [
|
|
87 |
("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
|
88 |
("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
|
89 |
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
|
90 |
-
("Squared", lambda x: "".join(chr(ord(c) -
|
91 |
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
|
92 |
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
|
93 |
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
|
@@ -106,10 +108,6 @@ if 'last_chat_update' not in st.session_state:
|
|
106 |
st.session_state.last_chat_update = 0
|
107 |
if 'displayed_chat_lines' not in st.session_state:
|
108 |
st.session_state.displayed_chat_lines = []
|
109 |
-
if 'old_val' not in st.session_state:
|
110 |
-
st.session_state.old_val = ""
|
111 |
-
if 'last_query' not in st.session_state:
|
112 |
-
st.session_state.last_query = ""
|
113 |
if 'message_text' not in st.session_state:
|
114 |
st.session_state.message_text = ""
|
115 |
if 'audio_cache' not in st.session_state:
|
@@ -125,7 +123,7 @@ if 'base64_cache' not in st.session_state:
|
|
125 |
if 'image_hashes' not in st.session_state:
|
126 |
st.session_state.image_hashes = set()
|
127 |
if 'gallery_columns' not in st.session_state:
|
128 |
-
st.session_state.gallery_columns = 1
|
129 |
if 'user_id' not in st.session_state:
|
130 |
st.session_state.user_id = None
|
131 |
if 'user_hash' not in st.session_state:
|
@@ -177,6 +175,48 @@ def clean_text_for_tts(text):
|
|
177 |
cleaned = ' '.join(cleaned.split())
|
178 |
return cleaned[:200] if cleaned else "No text to speak"
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
# Chat saver - words locked tight! π¬π
|
181 |
async def save_chat_entry(username, message, is_markdown=False, quote_line=None):
|
182 |
await asyncio.to_thread(log_action, username, "π¬π - Chat saver - words locked tight!")
|
@@ -203,23 +243,28 @@ async def save_chat_entry(username, message, is_markdown=False, quote_line=None)
|
|
203 |
f.write(f"{entry}\n")
|
204 |
|
205 |
cleaned_message = clean_text_for_tts(message)
|
206 |
-
|
207 |
-
|
|
|
|
|
208 |
with open(HISTORY_FILE, 'a') as f:
|
209 |
-
f.write(f"[{timestamp}] {username} ({voice}): Audio generated - {
|
210 |
with open(user_history_file, 'a') as f:
|
211 |
-
f.write(f"{indent}[{timestamp}] Audio: {
|
212 |
|
213 |
-
# Embed audio in global chat if
|
214 |
-
if message.startswith("Pasted image:") or message.startswith("Uploaded media:"):
|
215 |
with open(CHAT_FILE, 'a') as f:
|
216 |
-
f.write(f"{indent}[{timestamp}] Audio: {
|
|
|
|
|
|
|
217 |
|
218 |
await broadcast_message(f"{username}|{message}", "chat")
|
219 |
st.session_state.last_chat_update = time.time()
|
220 |
-
return
|
221 |
|
222 |
-
# Save chat history with image
|
223 |
async def save_chat_history_with_image(username, image_path):
|
224 |
central = pytz.timezone('US/Central')
|
225 |
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
|
@@ -230,7 +275,7 @@ async def save_chat_history_with_image(username, image_path):
|
|
230 |
with open(user_history_file, 'w') as f:
|
231 |
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
|
232 |
with open(user_history_file, 'a') as f:
|
233 |
-
f.write(f"[{timestamp}] {username} (Voice: {voice}) Shared
|
234 |
f.write(f"```markdown\n{chat_content}\n```\n")
|
235 |
|
236 |
# Chat loader - history unleashed! ππ
|
@@ -315,7 +360,7 @@ async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3
|
|
315 |
username = st.session_state.get('username', 'System π')
|
316 |
await asyncio.to_thread(log_action, username, "πΆπ - Audio maker - voices come alive!")
|
317 |
timestamp = format_timestamp_prefix(username)
|
318 |
-
filename = f"{timestamp}.{file_format}"
|
319 |
filepath = filename # Top-level file
|
320 |
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
|
321 |
try:
|
@@ -353,7 +398,34 @@ async def save_pasted_image(image, username):
|
|
353 |
await save_chat_history_with_image(username, filepath)
|
354 |
return filepath
|
355 |
|
356 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
357 |
def get_video_html(video_path, width="100%"):
|
358 |
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
|
359 |
return f'''
|
@@ -443,13 +515,16 @@ def delete_user_files():
|
|
443 |
st.session_state.displayed_chat_lines.clear()
|
444 |
return deleted_files
|
445 |
|
446 |
-
# Query parameter checker
|
447 |
def check_query_params():
|
448 |
query_params = st.query_params if hasattr(st, 'query_params') else st.experimental_get_query_params()
|
449 |
-
|
450 |
-
if
|
451 |
-
st.session_state.
|
452 |
-
|
|
|
|
|
|
|
453 |
|
454 |
# Main execution - letβs roll! π²π
|
455 |
def main():
|
@@ -464,15 +539,21 @@ def main():
|
|
464 |
st.session_state.user_id = str(uuid.uuid4())
|
465 |
st.session_state.user_hash = await generate_user_hash()
|
466 |
if 'username' not in st.session_state:
|
467 |
-
|
468 |
-
|
469 |
-
|
|
|
|
|
470 |
st.session_state.voice = FUN_USERNAMES[st.session_state.username]
|
471 |
st.markdown(f"**ποΈ Voice Selected**: {st.session_state.voice} π£οΈ for {st.session_state.username}")
|
472 |
|
473 |
-
# Check
|
474 |
-
|
475 |
-
|
|
|
|
|
|
|
|
|
476 |
st.write(f"Your unique URL path: [{user_url}]({user_url})")
|
477 |
|
478 |
st.title(f"π€π§ MMO {st.session_state.username}ππ¬")
|
@@ -484,7 +565,6 @@ def main():
|
|
484 |
# Unified Chat History at Top
|
485 |
st.subheader(f"{START_ROOM} Chat History π¬")
|
486 |
chat_content = await load_chat()
|
487 |
-
chat_lines = chat_content.split('\n')
|
488 |
chat_lines = [line for line in chat_content.split('\n') if line.strip() and not line.startswith('#')]
|
489 |
if chat_lines:
|
490 |
col1, col2 = st.columns([2, 1])
|
@@ -501,6 +581,15 @@ def main():
|
|
501 |
st.markdown(line)
|
502 |
else:
|
503 |
st.markdown(line)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
with col_audio:
|
505 |
username = line.split(': ')[1].split(' ')[0] if ': ' in line else "Unknown"
|
506 |
cache_key = f"{line}_{FUN_USERNAMES.get(username, 'en-US-AriaNeural')}"
|
@@ -530,7 +619,8 @@ def main():
|
|
530 |
filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
|
531 |
if filename:
|
532 |
st.session_state.pasted_image_data = filename
|
533 |
-
|
|
|
534 |
markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
|
535 |
if st.session_state.pasted_image_data:
|
536 |
markdown_response += f"\n- **Image**: "
|
@@ -576,14 +666,15 @@ def main():
|
|
576 |
st.image(paste_result_msg.image_data, caption="Received Image for Message")
|
577 |
filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
|
578 |
if filename:
|
579 |
-
st.session_state.
|
|
|
580 |
|
581 |
tab_main = st.radio("Action:", ["πΈ Media", "π ArXiv", "π Editor"], horizontal=True)
|
582 |
useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True)
|
583 |
useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False)
|
584 |
|
585 |
-
st.subheader("Upload Media
|
586 |
-
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3'])
|
587 |
if uploaded_file:
|
588 |
timestamp = format_timestamp_prefix(st.session_state.username)
|
589 |
username = st.session_state.username
|
@@ -594,7 +685,14 @@ def main():
|
|
594 |
file_path = filename # Top-level file
|
595 |
await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
|
596 |
st.success(f"Uploaded {filename}")
|
597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
await save_chat_history_with_image(username, file_path)
|
599 |
st.session_state.image_hashes.add(file_hash)
|
600 |
if file_path.endswith('.mp4'):
|
@@ -622,10 +720,10 @@ def main():
|
|
622 |
st.rerun()
|
623 |
|
624 |
# Gallery with Adjustable Tiles
|
625 |
-
st.subheader("Media Gallery
|
626 |
gallery_columns = st.slider("Number of Gallery Tiles", 1, 20, st.session_state.gallery_columns)
|
627 |
st.session_state.gallery_columns = gallery_columns
|
628 |
-
media_files = glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.mp4")
|
629 |
if media_files:
|
630 |
media_votes = await load_votes(MEDIA_VOTES_FILE)
|
631 |
seen_files = set()
|
@@ -642,6 +740,8 @@ def main():
|
|
642 |
st.image(media_file, use_container_width=True)
|
643 |
elif media_file.endswith('.mp4'):
|
644 |
st.markdown(get_video_html(media_file), unsafe_allow_html=True)
|
|
|
|
|
645 |
if st.button(f"π {vote_count}", key=f"media_vote_{media_file}"):
|
646 |
await save_vote(MEDIA_VOTES_FILE, media_file, await generate_user_hash(), st.session_state.username)
|
647 |
st.rerun()
|
|
|
20 |
import pytz
|
21 |
import shutil
|
22 |
from urllib.parse import urlencode
|
23 |
+
from PyPDF2 import PdfReader
|
24 |
+
import json
|
25 |
|
26 |
# Patch for nested async - sneaky fix! πβ¨
|
27 |
nest_asyncio.apply()
|
|
|
89 |
("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
|
90 |
("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
|
91 |
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
|
92 |
+
("Squared", lambda x: "".join(chr(ord(c) - 0x1F130 - 0x41) if 'A' <= c <= 'Z' else c for c in x)),
|
93 |
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
|
94 |
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
|
95 |
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
|
|
|
108 |
st.session_state.last_chat_update = 0
|
109 |
if 'displayed_chat_lines' not in st.session_state:
|
110 |
st.session_state.displayed_chat_lines = []
|
|
|
|
|
|
|
|
|
111 |
if 'message_text' not in st.session_state:
|
112 |
st.session_state.message_text = ""
|
113 |
if 'audio_cache' not in st.session_state:
|
|
|
123 |
if 'image_hashes' not in st.session_state:
|
124 |
st.session_state.image_hashes = set()
|
125 |
if 'gallery_columns' not in st.session_state:
|
126 |
+
st.session_state.gallery_columns = 1
|
127 |
if 'user_id' not in st.session_state:
|
128 |
st.session_state.user_id = None
|
129 |
if 'user_hash' not in st.session_state:
|
|
|
175 |
cleaned = ' '.join(cleaned.split())
|
176 |
return cleaned[:200] if cleaned else "No text to speak"
|
177 |
|
178 |
+
# Audio Processor Class from your code
|
179 |
+
class AudioProcessor:
|
180 |
+
def __init__(self):
|
181 |
+
self.cache_dir = "audio_cache"
|
182 |
+
os.makedirs(self.cache_dir, exist_ok=True)
|
183 |
+
self.metadata = self._load_metadata()
|
184 |
+
|
185 |
+
def _load_metadata(self):
|
186 |
+
metadata_file = os.path.join(self.cache_dir, "metadata.json")
|
187 |
+
return json.load(open(metadata_file)) if os.path.exists(metadata_file) else {}
|
188 |
+
|
189 |
+
def _save_metadata(self):
|
190 |
+
metadata_file = os.path.join(self.cache_dir, "metadata.json")
|
191 |
+
with open(metadata_file, 'w') as f:
|
192 |
+
json.dump(self.metadata, f)
|
193 |
+
|
194 |
+
async def create_audio(self, text, voice='en-US-AriaNeural', filename=None):
|
195 |
+
cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest()
|
196 |
+
cache_path = filename or os.path.join(self.cache_dir, f"{cache_key}.mp3")
|
197 |
+
|
198 |
+
if cache_key in self.metadata and os.path.exists(cache_path):
|
199 |
+
return open(cache_path, 'rb').read()
|
200 |
+
|
201 |
+
# Clean text for speech
|
202 |
+
text = text.replace("\n", " ").replace("</s>", " ").strip()
|
203 |
+
if not text:
|
204 |
+
return None
|
205 |
+
|
206 |
+
# Generate audio with edge_tts
|
207 |
+
communicate = edge_tts.Communicate(text, voice)
|
208 |
+
await communicate.save(cache_path)
|
209 |
+
|
210 |
+
# Update metadata
|
211 |
+
self.metadata[cache_key] = {
|
212 |
+
'timestamp': datetime.now().isoformat(),
|
213 |
+
'text_length': len(text),
|
214 |
+
'voice': voice
|
215 |
+
}
|
216 |
+
self._save_metadata()
|
217 |
+
|
218 |
+
return open(cache_path, 'rb').read()
|
219 |
+
|
220 |
# Chat saver - words locked tight! π¬π
|
221 |
async def save_chat_entry(username, message, is_markdown=False, quote_line=None):
|
222 |
await asyncio.to_thread(log_action, username, "π¬π - Chat saver - words locked tight!")
|
|
|
243 |
f.write(f"{entry}\n")
|
244 |
|
245 |
cleaned_message = clean_text_for_tts(message)
|
246 |
+
audio_processor = AudioProcessor()
|
247 |
+
audio_filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(cleaned_message.encode()).hexdigest()[:8]}.mp3"
|
248 |
+
audio_data = await audio_processor.create_audio(cleaned_message, voice, audio_filename)
|
249 |
+
if audio_data:
|
250 |
with open(HISTORY_FILE, 'a') as f:
|
251 |
+
f.write(f"[{timestamp}] {username} ({voice}): Audio generated - {audio_filename}\n")
|
252 |
with open(user_history_file, 'a') as f:
|
253 |
+
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
|
254 |
|
255 |
+
# Embed audio and image in global chat if applicable
|
256 |
+
if message.startswith("Pasted image:") or message.startswith("Uploaded media:") or message.startswith("Uploaded PDF:"):
|
257 |
with open(CHAT_FILE, 'a') as f:
|
258 |
+
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
|
259 |
+
file_path = message.split(": ")[1]
|
260 |
+
if os.path.exists(file_path) and file_path.endswith(('.png', '.jpg', '.mp4', '.pdf')):
|
261 |
+
f.write(f"{indent}[{timestamp}] Media: \n")
|
262 |
|
263 |
await broadcast_message(f"{username}|{message}", "chat")
|
264 |
st.session_state.last_chat_update = time.time()
|
265 |
+
return audio_filename
|
266 |
|
267 |
+
# Save chat history with image or PDF
|
268 |
async def save_chat_history_with_image(username, image_path):
|
269 |
central = pytz.timezone('US/Central')
|
270 |
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
|
|
|
275 |
with open(user_history_file, 'w') as f:
|
276 |
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
|
277 |
with open(user_history_file, 'a') as f:
|
278 |
+
f.write(f"[{timestamp}] {username} (Voice: {voice}) Shared Media: {os.path.basename(image_path)}\n")
|
279 |
f.write(f"```markdown\n{chat_content}\n```\n")
|
280 |
|
281 |
# Chat loader - history unleashed! ππ
|
|
|
360 |
username = st.session_state.get('username', 'System π')
|
361 |
await asyncio.to_thread(log_action, username, "πΆπ - Audio maker - voices come alive!")
|
362 |
timestamp = format_timestamp_prefix(username)
|
363 |
+
filename = f"{timestamp}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}"
|
364 |
filepath = filename # Top-level file
|
365 |
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
|
366 |
try:
|
|
|
398 |
await save_chat_history_with_image(username, filepath)
|
399 |
return filepath
|
400 |
|
401 |
+
# PDF saver and audio generator
|
402 |
+
async def save_pdf_and_generate_audio(pdf_file, username, max_pages=10):
|
403 |
+
await asyncio.to_thread(log_action, username, "ππΆ - PDF saver and audio generator!")
|
404 |
+
timestamp = format_timestamp_prefix(username)
|
405 |
+
file_hash = hashlib.md5(pdf_file.getbuffer()).hexdigest()[:8]
|
406 |
+
pdf_filename = f"{timestamp}-{file_hash}.pdf"
|
407 |
+
with open(pdf_filename, 'wb') as f:
|
408 |
+
f.write(pdf_file.getbuffer())
|
409 |
+
|
410 |
+
reader = PdfReader(pdf_filename)
|
411 |
+
total_pages = min(len(reader.pages), max_pages)
|
412 |
+
texts = []
|
413 |
+
audio_files = []
|
414 |
+
|
415 |
+
audio_processor = AudioProcessor()
|
416 |
+
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
|
417 |
+
|
418 |
+
for i in range(total_pages):
|
419 |
+
text = reader.pages[i].extract_text()
|
420 |
+
texts.append(text)
|
421 |
+
audio_filename = f"{timestamp}-page{i+1}-{file_hash}.mp3"
|
422 |
+
audio_data = await audio_processor.create_audio(text, voice, audio_filename)
|
423 |
+
if audio_data:
|
424 |
+
audio_files.append(audio_filename)
|
425 |
+
|
426 |
+
return pdf_filename, texts, audio_files
|
427 |
+
|
428 |
+
# Video renderer - movies roll with autoplay! π₯π¬
|
429 |
def get_video_html(video_path, width="100%"):
|
430 |
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
|
431 |
return f'''
|
|
|
515 |
st.session_state.displayed_chat_lines.clear()
|
516 |
return deleted_files
|
517 |
|
518 |
+
# Query parameter checker - parse q for username
|
519 |
def check_query_params():
|
520 |
query_params = st.query_params if hasattr(st, 'query_params') else st.experimental_get_query_params()
|
521 |
+
q_value = query_params.get("q", [None])[0]
|
522 |
+
if q_value and q_value in FUN_USERNAMES:
|
523 |
+
st.session_state.username = q_value
|
524 |
+
st.session_state.voice = FUN_USERNAMES[q_value]
|
525 |
+
elif q_value:
|
526 |
+
st.session_state.user_id = q_value # Use as user_id if not a valid username
|
527 |
+
return q_value
|
528 |
|
529 |
# Main execution - letβs roll! π²π
|
530 |
def main():
|
|
|
539 |
st.session_state.user_id = str(uuid.uuid4())
|
540 |
st.session_state.user_hash = await generate_user_hash()
|
541 |
if 'username' not in st.session_state:
|
542 |
+
q_value = check_query_params()
|
543 |
+
if not q_value or q_value not in FUN_USERNAMES:
|
544 |
+
chat_content = await load_chat()
|
545 |
+
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
|
546 |
+
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
|
547 |
st.session_state.voice = FUN_USERNAMES[st.session_state.username]
|
548 |
st.markdown(f"**ποΈ Voice Selected**: {st.session_state.voice} π£οΈ for {st.session_state.username}")
|
549 |
|
550 |
+
# Check existing history file for content
|
551 |
+
user_history_file = f"{st.session_state.username}_history.md"
|
552 |
+
if os.path.exists(user_history_file):
|
553 |
+
with open(user_history_file, 'r') as f:
|
554 |
+
st.session_state.displayed_chat_lines = f.read().split('\n')
|
555 |
+
|
556 |
+
user_url = f"/q={st.session_state.username}"
|
557 |
st.write(f"Your unique URL path: [{user_url}]({user_url})")
|
558 |
|
559 |
st.title(f"π€π§ MMO {st.session_state.username}ππ¬")
|
|
|
565 |
# Unified Chat History at Top
|
566 |
st.subheader(f"{START_ROOM} Chat History π¬")
|
567 |
chat_content = await load_chat()
|
|
|
568 |
chat_lines = [line for line in chat_content.split('\n') if line.strip() and not line.startswith('#')]
|
569 |
if chat_lines:
|
570 |
col1, col2 = st.columns([2, 1])
|
|
|
581 |
st.markdown(line)
|
582 |
else:
|
583 |
st.markdown(line)
|
584 |
+
if "Media:" in line:
|
585 |
+
file_path = line.split("Media: ")[-1].strip('![]()')
|
586 |
+
if os.path.exists(file_path):
|
587 |
+
if file_path.endswith(('.png', '.jpg')):
|
588 |
+
st.image(file_path, use_container_width=True)
|
589 |
+
elif file_path.endswith('.mp4'):
|
590 |
+
st.markdown(get_video_html(file_path), unsafe_allow_html=True)
|
591 |
+
elif file_path.endswith('.pdf'):
|
592 |
+
st.write(f"PDF: {os.path.basename(file_path)}")
|
593 |
with col_audio:
|
594 |
username = line.split(': ')[1].split(' ')[0] if ': ' in line else "Unknown"
|
595 |
cache_key = f"{line}_{FUN_USERNAMES.get(username, 'en-US-AriaNeural')}"
|
|
|
619 |
filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
|
620 |
if filename:
|
621 |
st.session_state.pasted_image_data = filename
|
622 |
+
await save_chat_entry(st.session_state.username, f"Pasted image: {filename}", quote_line=st.session_state.quote_line)
|
623 |
+
if st.button("Send Quote π", key="send_quote"):
|
624 |
markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
|
625 |
if st.session_state.pasted_image_data:
|
626 |
markdown_response += f"\n- **Image**: "
|
|
|
666 |
st.image(paste_result_msg.image_data, caption="Received Image for Message")
|
667 |
filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
|
668 |
if filename:
|
669 |
+
await save_chat_entry(st.session_state.username, f"Pasted image: {filename}")
|
670 |
+
st.session_state.pasted_image_data = None
|
671 |
|
672 |
tab_main = st.radio("Action:", ["πΈ Media", "π ArXiv", "π Editor"], horizontal=True)
|
673 |
useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True)
|
674 |
useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False)
|
675 |
|
676 |
+
st.subheader("Upload Media π¨πΆππ₯")
|
677 |
+
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3', 'wav', 'pdf'])
|
678 |
if uploaded_file:
|
679 |
timestamp = format_timestamp_prefix(st.session_state.username)
|
680 |
username = st.session_state.username
|
|
|
685 |
file_path = filename # Top-level file
|
686 |
await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
|
687 |
st.success(f"Uploaded {filename}")
|
688 |
+
if ext == 'pdf':
|
689 |
+
pdf_filename, texts, audio_files = await save_pdf_and_generate_audio(uploaded_file, username)
|
690 |
+
await save_chat_entry(username, f"Uploaded PDF: {pdf_filename}")
|
691 |
+
for i, (text, audio_file) in enumerate(zip(texts, audio_files)):
|
692 |
+
with open(CHAT_FILE, 'a') as f:
|
693 |
+
f.write(f" [{timestamp}] Page {i+1} Audio: {audio_file}\n")
|
694 |
+
else:
|
695 |
+
await save_chat_entry(username, f"Uploaded media: {file_path}")
|
696 |
await save_chat_history_with_image(username, file_path)
|
697 |
st.session_state.image_hashes.add(file_hash)
|
698 |
if file_path.endswith('.mp4'):
|
|
|
720 |
st.rerun()
|
721 |
|
722 |
# Gallery with Adjustable Tiles
|
723 |
+
st.subheader("Media Gallery π¨πΆππ₯")
|
724 |
gallery_columns = st.slider("Number of Gallery Tiles", 1, 20, st.session_state.gallery_columns)
|
725 |
st.session_state.gallery_columns = gallery_columns
|
726 |
+
media_files = glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.mp4") + glob.glob("*.pdf")
|
727 |
if media_files:
|
728 |
media_votes = await load_votes(MEDIA_VOTES_FILE)
|
729 |
seen_files = set()
|
|
|
740 |
st.image(media_file, use_container_width=True)
|
741 |
elif media_file.endswith('.mp4'):
|
742 |
st.markdown(get_video_html(media_file), unsafe_allow_html=True)
|
743 |
+
elif media_file.endswith('.pdf'):
|
744 |
+
st.write(f"PDF: {filename}")
|
745 |
if st.button(f"π {vote_count}", key=f"media_vote_{media_file}"):
|
746 |
await save_vote(MEDIA_VOTES_FILE, media_file, await generate_user_hash(), st.session_state.username)
|
747 |
st.rerun()
|