Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,20 +15,20 @@ from urllib.parse import quote
|
|
| 15 |
import streamlit as st
|
| 16 |
import streamlit.components.v1 as components
|
| 17 |
|
| 18 |
-
#
|
| 19 |
from huggingface_hub import InferenceClient
|
| 20 |
|
| 21 |
-
|
| 22 |
# ----------------------------
|
| 23 |
# Configurable BASE_URL
|
| 24 |
# ----------------------------
|
| 25 |
BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
|
| 26 |
|
| 27 |
-
# Example placeholders
|
| 28 |
PromptPrefix = "AI-Search: "
|
| 29 |
PromptPrefix2 = "AI-Refine: "
|
| 30 |
PromptPrefix3 = "AI-JS: "
|
| 31 |
|
|
|
|
| 32 |
roleplaying_glossary = {
|
| 33 |
"Core Rulebooks": {
|
| 34 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
|
@@ -39,11 +39,13 @@ roleplaying_glossary = {
|
|
| 39 |
}
|
| 40 |
}
|
| 41 |
|
|
|
|
| 42 |
transhuman_glossary = {
|
| 43 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
| 44 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
| 45 |
}
|
| 46 |
|
|
|
|
| 47 |
def process_text(text):
|
| 48 |
st.write(f"process_text called with: {text}")
|
| 49 |
|
|
@@ -59,7 +61,7 @@ def process_image(image_file, prompt):
|
|
| 59 |
def process_video(video_file, seconds_per_frame):
|
| 60 |
st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
|
| 61 |
|
| 62 |
-
# Stub if you have a
|
| 63 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
| 64 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
| 65 |
|
|
@@ -72,6 +74,9 @@ def InferenceLLM(prompt):
|
|
| 72 |
# ------------------------------------------
|
| 73 |
@st.cache_resource
|
| 74 |
def display_glossary_entity(k):
|
|
|
|
|
|
|
|
|
|
| 75 |
search_urls = {
|
| 76 |
"๐๐ArXiv": lambda x: f"/?q={quote(x)}",
|
| 77 |
"๐Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
|
|
@@ -87,6 +92,9 @@ def display_glossary_entity(k):
|
|
| 87 |
st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
|
| 88 |
|
| 89 |
def display_content_or_image(query):
|
|
|
|
|
|
|
|
|
|
| 90 |
for category, term_list in transhuman_glossary.items():
|
| 91 |
for term in term_list:
|
| 92 |
if query.lower() in term.lower():
|
|
@@ -101,9 +109,12 @@ def display_content_or_image(query):
|
|
| 101 |
return False
|
| 102 |
|
| 103 |
def clear_query_params():
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
st.warning("Define a redirect or link without query params if you want to truly clear them.")
|
| 105 |
|
| 106 |
-
|
| 107 |
# -----------------------
|
| 108 |
# File Handling
|
| 109 |
# -----------------------
|
|
@@ -130,6 +141,9 @@ def get_zip_download_link(zip_file):
|
|
| 130 |
return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
| 131 |
|
| 132 |
def get_table_download_link(file_path):
|
|
|
|
|
|
|
|
|
|
| 133 |
try:
|
| 134 |
with open(file_path, 'r', encoding='utf-8') as file:
|
| 135 |
data = file.read()
|
|
@@ -154,7 +168,11 @@ def get_file_size(file_path):
|
|
| 154 |
return os.path.getsize(file_path)
|
| 155 |
|
| 156 |
def FileSidebar():
|
|
|
|
|
|
|
|
|
|
| 157 |
all_files = glob.glob("*.md")
|
|
|
|
| 158 |
all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
|
| 159 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
| 160 |
|
|
@@ -174,9 +192,9 @@ def FileSidebar():
|
|
| 174 |
next_action = ''
|
| 175 |
|
| 176 |
for file in all_files:
|
| 177 |
-
col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
|
| 178 |
with col1:
|
| 179 |
-
if st.button("๐", key="md_"+file):
|
| 180 |
file_contents = load_file(file)
|
| 181 |
file_name = file
|
| 182 |
next_action = 'md'
|
|
@@ -184,7 +202,7 @@ def FileSidebar():
|
|
| 184 |
with col2:
|
| 185 |
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
|
| 186 |
with col3:
|
| 187 |
-
if st.button("๐", key="open_"+file):
|
| 188 |
file_contents = load_file(file)
|
| 189 |
file_name = file
|
| 190 |
next_action = 'open'
|
|
@@ -193,16 +211,17 @@ def FileSidebar():
|
|
| 193 |
st.session_state['filetext'] = file_contents
|
| 194 |
st.session_state['next_action'] = next_action
|
| 195 |
with col4:
|
| 196 |
-
if st.button("โถ๏ธ", key="read_"+file):
|
| 197 |
file_contents = load_file(file)
|
| 198 |
file_name = file
|
| 199 |
next_action = 'search'
|
| 200 |
st.session_state['next_action'] = next_action
|
| 201 |
with col5:
|
| 202 |
-
if st.button("๐", key="delete_"+file):
|
| 203 |
os.remove(file)
|
| 204 |
st.rerun()
|
| 205 |
|
|
|
|
| 206 |
if file_contents:
|
| 207 |
if next_action == 'open':
|
| 208 |
open1, open2 = st.columns([0.8, 0.2])
|
|
@@ -225,7 +244,7 @@ def FileSidebar():
|
|
| 225 |
elif next_action == 'md':
|
| 226 |
st.markdown(file_contents)
|
| 227 |
SpeechSynthesis(file_contents)
|
| 228 |
-
if st.button(
|
| 229 |
st.write("Running GPT logic placeholder...")
|
| 230 |
|
| 231 |
# ---------------------------
|
|
@@ -238,6 +257,9 @@ def generate_key(label, header, idx):
|
|
| 238 |
return f"{header}_{label}_{idx}_key"
|
| 239 |
|
| 240 |
def update_score(key, increment=1):
|
|
|
|
|
|
|
|
|
|
| 241 |
score_file = os.path.join(score_dir, f"{key}.json")
|
| 242 |
if os.path.exists(score_file):
|
| 243 |
with open(score_file, "r") as file:
|
|
@@ -259,6 +281,9 @@ def load_score(key):
|
|
| 259 |
return 0
|
| 260 |
|
| 261 |
def display_buttons_with_scores(num_columns_text):
|
|
|
|
|
|
|
|
|
|
| 262 |
game_emojis = {
|
| 263 |
"Dungeons and Dragons": "๐",
|
| 264 |
"Call of Cthulhu": "๐",
|
|
@@ -288,9 +313,9 @@ def display_buttons_with_scores(num_columns_text):
|
|
| 288 |
game_emoji = game_emojis.get(game, "๐ฎ")
|
| 289 |
for term in terms:
|
| 290 |
key = f"{category}_{game}_{term}".replace(' ', '_').lower()
|
| 291 |
-
|
| 292 |
-
if st.button(f"{game_emoji} {category} {game} {term} {
|
| 293 |
-
newscore = update_score(key.replace('?',''))
|
| 294 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
| 295 |
|
| 296 |
# -------------------------------
|
|
@@ -349,6 +374,9 @@ def display_videos_and_links(num_columns=4):
|
|
| 349 |
# MERMAID DIAGRAM
|
| 350 |
# --------------------------------
|
| 351 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
|
|
|
|
|
|
|
|
|
| 352 |
return f"""
|
| 353 |
<html>
|
| 354 |
<head>
|
|
@@ -376,16 +404,24 @@ def generate_mermaid_html(mermaid_code: str) -> str:
|
|
| 376 |
"""
|
| 377 |
|
| 378 |
def append_model_param(url: str, model_selected: bool) -> str:
|
|
|
|
|
|
|
|
|
|
| 379 |
if not model_selected:
|
| 380 |
return url
|
| 381 |
delimiter = "&" if "?" in url else "?"
|
| 382 |
return f"{url}{delimiter}model=1"
|
| 383 |
|
| 384 |
def inject_base_url(url: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
if url.startswith("http"):
|
| 386 |
return url
|
| 387 |
return f"{BASE_URL}{url}"
|
| 388 |
|
|
|
|
| 389 |
DEFAULT_MERMAID = """
|
| 390 |
flowchart LR
|
| 391 |
U((User ๐)) -- "Talk ๐ฃ๏ธ" --> LLM[LLM Agent ๐ค\\nExtract Info]
|
|
@@ -405,15 +441,19 @@ flowchart LR
|
|
| 405 |
def main():
|
| 406 |
st.set_page_config(page_title="Mermaid + Clickable Links with Base URL", layout="wide")
|
| 407 |
|
| 408 |
-
#
|
|
|
|
|
|
|
| 409 |
query_params = st.query_params
|
| 410 |
query_list = (query_params.get('q') or query_params.get('query') or [''])
|
| 411 |
q_or_query = query_list[0] if query_list else ''
|
| 412 |
if q_or_query.strip():
|
|
|
|
| 413 |
search_payload = PromptPrefix + q_or_query
|
| 414 |
st.markdown(search_payload)
|
| 415 |
process_text(search_payload)
|
| 416 |
|
|
|
|
| 417 |
if 'action' in query_params:
|
| 418 |
action_list = query_params['action']
|
| 419 |
if action_list:
|
|
@@ -423,25 +463,38 @@ def main():
|
|
| 423 |
elif action == 'clear':
|
| 424 |
clear_query_params()
|
| 425 |
|
|
|
|
| 426 |
if 'query' in query_params:
|
| 427 |
query_val = query_params['query'][0]
|
| 428 |
display_content_or_image(query_val)
|
| 429 |
|
| 430 |
-
#
|
|
|
|
|
|
|
| 431 |
st.sidebar.write("## Diagram Link Settings")
|
| 432 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
| 433 |
|
| 434 |
-
#
|
|
|
|
|
|
|
| 435 |
base_diagram = DEFAULT_MERMAID
|
| 436 |
lines = base_diagram.strip().split("\n")
|
| 437 |
new_lines = []
|
|
|
|
| 438 |
for line in lines:
|
|
|
|
| 439 |
if "click " in line and '"/?' in line:
|
|
|
|
| 440 |
parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
|
| 441 |
if len(parts) == 4:
|
| 442 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
# 1) Prepend base if needed
|
| 444 |
-
new_url = inject_base_url(old_url)
|
| 445 |
# 2) Possibly add &model=1
|
| 446 |
new_url = append_model_param(new_url, model_selected)
|
| 447 |
|
|
@@ -449,21 +502,26 @@ def main():
|
|
| 449 |
new_line = f"{parts[0]}\"{new_url}\" {parts[2]}"
|
| 450 |
new_lines.append(new_line)
|
| 451 |
else:
|
|
|
|
| 452 |
new_lines.append(line)
|
| 453 |
else:
|
| 454 |
new_lines.append(line)
|
| 455 |
|
| 456 |
mermaid_code = "\n".join(new_lines)
|
| 457 |
|
| 458 |
-
#
|
|
|
|
|
|
|
| 459 |
st.title("Mermaid Diagram with Base URL Injection")
|
| 460 |
diagram_html = generate_mermaid_html(mermaid_code)
|
| 461 |
components.html(diagram_html, height=400, scrolling=True)
|
| 462 |
|
| 463 |
-
#
|
|
|
|
|
|
|
| 464 |
left_col, right_col = st.columns(2)
|
| 465 |
|
| 466 |
-
# Left: Markdown Editor
|
| 467 |
with left_col:
|
| 468 |
st.subheader("Markdown Side ๐")
|
| 469 |
if "markdown_text" not in st.session_state:
|
|
@@ -488,10 +546,11 @@ def main():
|
|
| 488 |
st.markdown("**Preview:**")
|
| 489 |
st.markdown(markdown_text)
|
| 490 |
|
| 491 |
-
# Right: Mermaid Editor
|
| 492 |
with right_col:
|
| 493 |
st.subheader("Mermaid Side ๐งโโ๏ธ")
|
| 494 |
|
|
|
|
| 495 |
if "current_mermaid" not in st.session_state:
|
| 496 |
st.session_state["current_mermaid"] = mermaid_code
|
| 497 |
|
|
@@ -516,24 +575,35 @@ def main():
|
|
| 516 |
st.markdown("**Mermaid Source:**")
|
| 517 |
st.code(mermaid_input, language="python", line_numbers=True)
|
| 518 |
|
| 519 |
-
#
|
|
|
|
|
|
|
| 520 |
st.markdown("---")
|
| 521 |
st.header("Media Galleries")
|
|
|
|
| 522 |
num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
|
| 523 |
display_images_and_wikipedia_summaries(num_columns_images)
|
| 524 |
|
| 525 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
| 526 |
display_videos_and_links(num_columns_video)
|
| 527 |
|
|
|
|
| 528 |
showExtendedTextInterface = False
|
| 529 |
if showExtendedTextInterface:
|
| 530 |
-
#
|
|
|
|
|
|
|
|
|
|
| 531 |
pass
|
| 532 |
|
| 533 |
-
#
|
|
|
|
|
|
|
| 534 |
FileSidebar()
|
| 535 |
|
| 536 |
-
#
|
|
|
|
|
|
|
| 537 |
titles = [
|
| 538 |
"๐ง ๐ญ Semantic Symphonies & Episodic Encores",
|
| 539 |
"๐๐ผ AI Rhythms of Memory Lane",
|
|
|
|
| 15 |
import streamlit as st
|
| 16 |
import streamlit.components.v1 as components
|
| 17 |
|
| 18 |
+
# If you do model inference via huggingface_hub:
|
| 19 |
from huggingface_hub import InferenceClient
|
| 20 |
|
|
|
|
| 21 |
# ----------------------------
|
| 22 |
# Configurable BASE_URL
|
| 23 |
# ----------------------------
|
| 24 |
BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
|
| 25 |
|
| 26 |
+
# Example placeholders for prompt prefixes
|
| 27 |
PromptPrefix = "AI-Search: "
|
| 28 |
PromptPrefix2 = "AI-Refine: "
|
| 29 |
PromptPrefix3 = "AI-JS: "
|
| 30 |
|
| 31 |
+
# Example roleplaying glossary
|
| 32 |
roleplaying_glossary = {
|
| 33 |
"Core Rulebooks": {
|
| 34 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
|
|
|
| 39 |
}
|
| 40 |
}
|
| 41 |
|
| 42 |
+
# Example transhuman glossary
|
| 43 |
transhuman_glossary = {
|
| 44 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
| 45 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
| 46 |
}
|
| 47 |
|
| 48 |
+
# Simple function stubs
|
| 49 |
def process_text(text):
|
| 50 |
st.write(f"process_text called with: {text}")
|
| 51 |
|
|
|
|
| 61 |
def process_video(video_file, seconds_per_frame):
|
| 62 |
st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
|
| 63 |
|
| 64 |
+
# Stub if you have a Hugging Face endpoint
|
| 65 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
| 66 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
| 67 |
|
|
|
|
| 74 |
# ------------------------------------------
|
| 75 |
@st.cache_resource
|
| 76 |
def display_glossary_entity(k):
|
| 77 |
+
"""
|
| 78 |
+
Creates multiple link emojis for a single entity.
|
| 79 |
+
"""
|
| 80 |
search_urls = {
|
| 81 |
"๐๐ArXiv": lambda x: f"/?q={quote(x)}",
|
| 82 |
"๐Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
|
|
|
|
| 92 |
st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
|
| 93 |
|
| 94 |
def display_content_or_image(query):
|
| 95 |
+
"""
|
| 96 |
+
If a query matches something in transhuman_glossary or a local image, show it.
|
| 97 |
+
"""
|
| 98 |
for category, term_list in transhuman_glossary.items():
|
| 99 |
for term in term_list:
|
| 100 |
if query.lower() in term.lower():
|
|
|
|
| 109 |
return False
|
| 110 |
|
| 111 |
def clear_query_params():
|
| 112 |
+
"""
|
| 113 |
+
For clearing URL params, you'd typically use a new link or st.experimental_set_query_params().
|
| 114 |
+
Here, we just warn the user.
|
| 115 |
+
"""
|
| 116 |
st.warning("Define a redirect or link without query params if you want to truly clear them.")
|
| 117 |
|
|
|
|
| 118 |
# -----------------------
|
| 119 |
# File Handling
|
| 120 |
# -----------------------
|
|
|
|
| 141 |
return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
| 142 |
|
| 143 |
def get_table_download_link(file_path):
|
| 144 |
+
"""
|
| 145 |
+
Creates a download link for a single file from your snippet.
|
| 146 |
+
"""
|
| 147 |
try:
|
| 148 |
with open(file_path, 'r', encoding='utf-8') as file:
|
| 149 |
data = file.read()
|
|
|
|
| 168 |
return os.path.getsize(file_path)
|
| 169 |
|
| 170 |
def FileSidebar():
|
| 171 |
+
"""
|
| 172 |
+
Renders .md files, providing open/view/delete/run logic in the sidebar.
|
| 173 |
+
"""
|
| 174 |
all_files = glob.glob("*.md")
|
| 175 |
+
# Exclude short-named or special files if needed
|
| 176 |
all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
|
| 177 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
| 178 |
|
|
|
|
| 192 |
next_action = ''
|
| 193 |
|
| 194 |
for file in all_files:
|
| 195 |
+
col1, col2, col3, col4, col5 = st.sidebar.columns([1, 6, 1, 1, 1])
|
| 196 |
with col1:
|
| 197 |
+
if st.button("๐", key="md_" + file):
|
| 198 |
file_contents = load_file(file)
|
| 199 |
file_name = file
|
| 200 |
next_action = 'md'
|
|
|
|
| 202 |
with col2:
|
| 203 |
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
|
| 204 |
with col3:
|
| 205 |
+
if st.button("๐", key="open_" + file):
|
| 206 |
file_contents = load_file(file)
|
| 207 |
file_name = file
|
| 208 |
next_action = 'open'
|
|
|
|
| 211 |
st.session_state['filetext'] = file_contents
|
| 212 |
st.session_state['next_action'] = next_action
|
| 213 |
with col4:
|
| 214 |
+
if st.button("โถ๏ธ", key="read_" + file):
|
| 215 |
file_contents = load_file(file)
|
| 216 |
file_name = file
|
| 217 |
next_action = 'search'
|
| 218 |
st.session_state['next_action'] = next_action
|
| 219 |
with col5:
|
| 220 |
+
if st.button("๐", key="delete_" + file):
|
| 221 |
os.remove(file)
|
| 222 |
st.rerun()
|
| 223 |
|
| 224 |
+
# If we loaded a file
|
| 225 |
if file_contents:
|
| 226 |
if next_action == 'open':
|
| 227 |
open1, open2 = st.columns([0.8, 0.2])
|
|
|
|
| 244 |
elif next_action == 'md':
|
| 245 |
st.markdown(file_contents)
|
| 246 |
SpeechSynthesis(file_contents)
|
| 247 |
+
if st.button("๐Run"):
|
| 248 |
st.write("Running GPT logic placeholder...")
|
| 249 |
|
| 250 |
# ---------------------------
|
|
|
|
| 257 |
return f"{header}_{label}_{idx}_key"
|
| 258 |
|
| 259 |
def update_score(key, increment=1):
|
| 260 |
+
"""
|
| 261 |
+
Track a 'score' for each glossary item or term, saved in JSON per key.
|
| 262 |
+
"""
|
| 263 |
score_file = os.path.join(score_dir, f"{key}.json")
|
| 264 |
if os.path.exists(score_file):
|
| 265 |
with open(score_file, "r") as file:
|
|
|
|
| 281 |
return 0
|
| 282 |
|
| 283 |
def display_buttons_with_scores(num_columns_text):
|
| 284 |
+
"""
|
| 285 |
+
Show glossary items as clickable buttons that increment a 'score'.
|
| 286 |
+
"""
|
| 287 |
game_emojis = {
|
| 288 |
"Dungeons and Dragons": "๐",
|
| 289 |
"Call of Cthulhu": "๐",
|
|
|
|
| 313 |
game_emoji = game_emojis.get(game, "๐ฎ")
|
| 314 |
for term in terms:
|
| 315 |
key = f"{category}_{game}_{term}".replace(' ', '_').lower()
|
| 316 |
+
score_val = load_score(key)
|
| 317 |
+
if st.button(f"{game_emoji} {category} {game} {term} {score_val}", key=key):
|
| 318 |
+
newscore = update_score(key.replace('?', ''))
|
| 319 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
| 320 |
|
| 321 |
# -------------------------------
|
|
|
|
| 374 |
# MERMAID DIAGRAM
|
| 375 |
# --------------------------------
|
| 376 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
| 377 |
+
"""
|
| 378 |
+
Returns HTML that centers the Mermaid diagram, loading from a CDN.
|
| 379 |
+
"""
|
| 380 |
return f"""
|
| 381 |
<html>
|
| 382 |
<head>
|
|
|
|
| 404 |
"""
|
| 405 |
|
| 406 |
def append_model_param(url: str, model_selected: bool) -> str:
|
| 407 |
+
"""
|
| 408 |
+
If user checks 'Append ?model=1', we append &model=1 or ?model=1 if not present.
|
| 409 |
+
"""
|
| 410 |
if not model_selected:
|
| 411 |
return url
|
| 412 |
delimiter = "&" if "?" in url else "?"
|
| 413 |
return f"{url}{delimiter}model=1"
|
| 414 |
|
| 415 |
def inject_base_url(url: str) -> str:
|
| 416 |
+
"""
|
| 417 |
+
If a link does not start with http, prepend your BASE_URL
|
| 418 |
+
so it becomes an absolute link to huggingface.co/spaces/...
|
| 419 |
+
"""
|
| 420 |
if url.startswith("http"):
|
| 421 |
return url
|
| 422 |
return f"{BASE_URL}{url}"
|
| 423 |
|
| 424 |
+
# Our default diagram, containing the "click" lines with /?q=...
|
| 425 |
DEFAULT_MERMAID = """
|
| 426 |
flowchart LR
|
| 427 |
U((User ๐)) -- "Talk ๐ฃ๏ธ" --> LLM[LLM Agent ๐ค\\nExtract Info]
|
|
|
|
| 441 |
def main():
|
| 442 |
st.set_page_config(page_title="Mermaid + Clickable Links with Base URL", layout="wide")
|
| 443 |
|
| 444 |
+
# ---------------------------------------------
|
| 445 |
+
# Query Param Parsing (non-experimental)
|
| 446 |
+
# ---------------------------------------------
|
| 447 |
query_params = st.query_params
|
| 448 |
query_list = (query_params.get('q') or query_params.get('query') or [''])
|
| 449 |
q_or_query = query_list[0] if query_list else ''
|
| 450 |
if q_or_query.strip():
|
| 451 |
+
# If there's a q= or query= param, do some processing
|
| 452 |
search_payload = PromptPrefix + q_or_query
|
| 453 |
st.markdown(search_payload)
|
| 454 |
process_text(search_payload)
|
| 455 |
|
| 456 |
+
# If an 'action' param is present
|
| 457 |
if 'action' in query_params:
|
| 458 |
action_list = query_params['action']
|
| 459 |
if action_list:
|
|
|
|
| 463 |
elif action == 'clear':
|
| 464 |
clear_query_params()
|
| 465 |
|
| 466 |
+
# If a 'query' param is present, show content or image
|
| 467 |
if 'query' in query_params:
|
| 468 |
query_val = query_params['query'][0]
|
| 469 |
display_content_or_image(query_val)
|
| 470 |
|
| 471 |
+
# ---------------------------------------------
|
| 472 |
+
# Let user pick if we want ?model=1
|
| 473 |
+
# ---------------------------------------------
|
| 474 |
st.sidebar.write("## Diagram Link Settings")
|
| 475 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
| 476 |
|
| 477 |
+
# ---------------------------------------------
|
| 478 |
+
# Rebuild the clickable lines in the Mermaid code
|
| 479 |
+
# ---------------------------------------------
|
| 480 |
base_diagram = DEFAULT_MERMAID
|
| 481 |
lines = base_diagram.strip().split("\n")
|
| 482 |
new_lines = []
|
| 483 |
+
|
| 484 |
for line in lines:
|
| 485 |
+
# We look for lines like: click SOMENODE "/?q=Something" _self
|
| 486 |
if "click " in line and '"/?' in line:
|
| 487 |
+
# Try to extract the URL part
|
| 488 |
parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
|
| 489 |
if len(parts) == 4:
|
| 490 |
+
# Example:
|
| 491 |
+
# parts[0] -> 'click LLM '
|
| 492 |
+
# parts[1] -> '/?q=LLM%20Agent%20Extract%20Info'
|
| 493 |
+
# parts[2] -> ' _self'
|
| 494 |
+
# parts[3] -> '' or trailing
|
| 495 |
+
old_url = parts[1]
|
| 496 |
# 1) Prepend base if needed
|
| 497 |
+
new_url = inject_base_url(old_url)
|
| 498 |
# 2) Possibly add &model=1
|
| 499 |
new_url = append_model_param(new_url, model_selected)
|
| 500 |
|
|
|
|
| 502 |
new_line = f"{parts[0]}\"{new_url}\" {parts[2]}"
|
| 503 |
new_lines.append(new_line)
|
| 504 |
else:
|
| 505 |
+
# If we can't parse it, keep it as is
|
| 506 |
new_lines.append(line)
|
| 507 |
else:
|
| 508 |
new_lines.append(line)
|
| 509 |
|
| 510 |
mermaid_code = "\n".join(new_lines)
|
| 511 |
|
| 512 |
+
# ---------------------------------------------
|
| 513 |
+
# Render the top-centered Mermaid diagram
|
| 514 |
+
# ---------------------------------------------
|
| 515 |
st.title("Mermaid Diagram with Base URL Injection")
|
| 516 |
diagram_html = generate_mermaid_html(mermaid_code)
|
| 517 |
components.html(diagram_html, height=400, scrolling=True)
|
| 518 |
|
| 519 |
+
# ---------------------------------------------
|
| 520 |
+
# Two-column interface: Markdown & Mermaid
|
| 521 |
+
# ---------------------------------------------
|
| 522 |
left_col, right_col = st.columns(2)
|
| 523 |
|
| 524 |
+
# --- Left: Markdown Editor
|
| 525 |
with left_col:
|
| 526 |
st.subheader("Markdown Side ๐")
|
| 527 |
if "markdown_text" not in st.session_state:
|
|
|
|
| 546 |
st.markdown("**Preview:**")
|
| 547 |
st.markdown(markdown_text)
|
| 548 |
|
| 549 |
+
# --- Right: Mermaid Editor
|
| 550 |
with right_col:
|
| 551 |
st.subheader("Mermaid Side ๐งโโ๏ธ")
|
| 552 |
|
| 553 |
+
# We store the final code in session state, so user can edit
|
| 554 |
if "current_mermaid" not in st.session_state:
|
| 555 |
st.session_state["current_mermaid"] = mermaid_code
|
| 556 |
|
|
|
|
| 575 |
st.markdown("**Mermaid Source:**")
|
| 576 |
st.code(mermaid_input, language="python", line_numbers=True)
|
| 577 |
|
| 578 |
+
# ---------------------------------------------
|
| 579 |
+
# Media Galleries
|
| 580 |
+
# ---------------------------------------------
|
| 581 |
st.markdown("---")
|
| 582 |
st.header("Media Galleries")
|
| 583 |
+
|
| 584 |
num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
|
| 585 |
display_images_and_wikipedia_summaries(num_columns_images)
|
| 586 |
|
| 587 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
| 588 |
display_videos_and_links(num_columns_video)
|
| 589 |
|
| 590 |
+
# (Optionally) Extended text interface
|
| 591 |
showExtendedTextInterface = False
|
| 592 |
if showExtendedTextInterface:
|
| 593 |
+
# For example:
|
| 594 |
+
# display_glossary_grid(roleplaying_glossary)
|
| 595 |
+
# num_columns_text = st.slider("Choose Number of Text Columns", 1, 15, 4)
|
| 596 |
+
# display_buttons_with_scores(num_columns_text)
|
| 597 |
pass
|
| 598 |
|
| 599 |
+
# ---------------------------------------------
|
| 600 |
+
# File Sidebar
|
| 601 |
+
# ---------------------------------------------
|
| 602 |
FileSidebar()
|
| 603 |
|
| 604 |
+
# ---------------------------------------------
|
| 605 |
+
# Random Title at the bottom
|
| 606 |
+
# ---------------------------------------------
|
| 607 |
titles = [
|
| 608 |
"๐ง ๐ญ Semantic Symphonies & Episodic Encores",
|
| 609 |
"๐๐ผ AI Rhythms of Memory Lane",
|