Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -15,18 +15,20 @@ from urllib.parse import quote
|
|
15 |
import streamlit as st
|
16 |
import streamlit.components.v1 as components
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
|
21 |
-
#
|
22 |
-
#
|
23 |
-
#
|
24 |
BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
|
25 |
|
|
|
26 |
PromptPrefix = "AI-Search: "
|
27 |
PromptPrefix2 = "AI-Refine: "
|
28 |
PromptPrefix3 = "AI-JS: "
|
29 |
|
|
|
30 |
roleplaying_glossary = {
|
31 |
"Core Rulebooks": {
|
32 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
@@ -37,75 +39,61 @@ roleplaying_glossary = {
|
|
37 |
}
|
38 |
}
|
39 |
|
|
|
40 |
transhuman_glossary = {
|
41 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
42 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
43 |
}
|
44 |
|
45 |
-
|
46 |
def process_text(text):
|
47 |
-
"""🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
|
48 |
st.write(f"process_text called with: {text}")
|
49 |
|
50 |
-
|
51 |
def search_arxiv(text):
|
52 |
-
"""🔭 search_arxiv: pretend to search ArXiv, just prints debug for now."""
|
53 |
st.write(f"search_arxiv called with: {text}")
|
54 |
|
55 |
-
|
56 |
def SpeechSynthesis(text):
|
57 |
-
"""🗣 SpeechSynthesis: read lines out loud? Here, we log them for demonstration."""
|
58 |
st.write(f"SpeechSynthesis called with: {text}")
|
59 |
|
60 |
-
|
61 |
def process_image(image_file, prompt):
|
62 |
-
"
|
63 |
-
return f"[process_image placeholder] {image_file} => {prompt}"
|
64 |
-
|
65 |
|
66 |
def process_video(video_file, seconds_per_frame):
|
67 |
-
"
|
68 |
-
st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
|
69 |
-
|
70 |
|
|
|
71 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
72 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
73 |
|
74 |
-
|
75 |
@st.cache_resource
|
76 |
def InferenceLLM(prompt):
|
77 |
-
"""🔮 InferenceLLM: a stub returning a mock response for 'prompt'."""
|
78 |
return f"[InferenceLLM placeholder response to prompt: {prompt}]"
|
79 |
|
80 |
-
|
81 |
-
#
|
82 |
-
#
|
83 |
-
# =====================================================================================
|
84 |
@st.cache_resource
|
85 |
def display_glossary_entity(k):
|
86 |
"""
|
87 |
Creates multiple link emojis for a single entity.
|
88 |
-
Each link might point to /?q=..., /?q=<prefix>..., or external sites.
|
89 |
"""
|
90 |
search_urls = {
|
91 |
-
"🚀🌌ArXiv":
|
92 |
-
"🃏Analyst":
|
93 |
-
"📚PyCoder":
|
94 |
-
"🔬JSCoder":
|
95 |
-
"📖":
|
96 |
-
"🔍":
|
97 |
-
"🔎":
|
98 |
-
"🎥":
|
99 |
-
"🐦":
|
100 |
}
|
101 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
102 |
st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
|
103 |
|
104 |
-
|
105 |
def display_content_or_image(query):
|
106 |
"""
|
107 |
-
If
|
108 |
-
we show it. Otherwise warn.
|
109 |
"""
|
110 |
for category, term_list in transhuman_glossary.items():
|
111 |
for term in term_list:
|
@@ -120,47 +108,41 @@ def display_content_or_image(query):
|
|
120 |
st.warning("No matching content or image found.")
|
121 |
return False
|
122 |
|
123 |
-
|
124 |
def clear_query_params():
|
125 |
-
"""
|
|
|
|
|
|
|
126 |
st.warning("Define a redirect or link without query params if you want to truly clear them.")
|
127 |
|
128 |
-
|
129 |
-
#
|
130 |
-
#
|
131 |
-
# =====================================================================================
|
132 |
def load_file(file_path):
|
133 |
-
"""Load file contents as UTF-8 text, or return empty on error."""
|
134 |
try:
|
135 |
with open(file_path, "r", encoding='utf-8') as f:
|
136 |
return f.read()
|
137 |
except:
|
138 |
return ""
|
139 |
|
140 |
-
|
141 |
@st.cache_resource
|
142 |
def create_zip_of_files(files):
|
143 |
-
"""Combine multiple local files into a single .zip for user to download."""
|
144 |
zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
|
145 |
with zipfile.ZipFile(zip_name, 'w') as zipf:
|
146 |
for file in files:
|
147 |
zipf.write(file)
|
148 |
return zip_name
|
149 |
|
150 |
-
|
151 |
@st.cache_resource
|
152 |
def get_zip_download_link(zip_file):
|
153 |
-
"""Return an <a> link to download the given zip_file (base64-encoded)."""
|
154 |
with open(zip_file, 'rb') as f:
|
155 |
data = f.read()
|
156 |
b64 = base64.b64encode(data).decode()
|
157 |
return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
158 |
|
159 |
-
|
160 |
def get_table_download_link(file_path):
|
161 |
"""
|
162 |
Creates a download link for a single file from your snippet.
|
163 |
-
Encodes it as base64 data.
|
164 |
"""
|
165 |
try:
|
166 |
with open(file_path, 'r', encoding='utf-8') as file:
|
@@ -169,31 +151,28 @@ def get_table_download_link(file_path):
|
|
169 |
file_name = os.path.basename(file_path)
|
170 |
ext = os.path.splitext(file_name)[1]
|
171 |
mime_map = {
|
172 |
-
'.txt':
|
173 |
-
'.py':
|
174 |
'.xlsx': 'text/plain',
|
175 |
-
'.csv':
|
176 |
-
'.htm':
|
177 |
-
'.md':
|
178 |
-
'.wav':
|
179 |
}
|
180 |
mime_type = mime_map.get(ext, 'application/octet-stream')
|
181 |
return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
182 |
except:
|
183 |
return ''
|
184 |
|
185 |
-
|
186 |
def get_file_size(file_path):
|
187 |
-
"""Get file size in bytes."""
|
188 |
return os.path.getsize(file_path)
|
189 |
|
190 |
-
|
191 |
def FileSidebar():
|
192 |
"""
|
193 |
-
Renders .md files
|
194 |
"""
|
195 |
all_files = glob.glob("*.md")
|
196 |
-
#
|
197 |
all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
|
198 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
199 |
|
@@ -242,6 +221,7 @@ def FileSidebar():
|
|
242 |
os.remove(file)
|
243 |
st.rerun()
|
244 |
|
|
|
245 |
if file_contents:
|
246 |
if next_action == 'open':
|
247 |
open1, open2 = st.columns([0.8, 0.2])
|
@@ -267,20 +247,19 @@ def FileSidebar():
|
|
267 |
if st.button("🔍Run"):
|
268 |
st.write("Running GPT logic placeholder...")
|
269 |
|
270 |
-
|
271 |
-
#
|
272 |
-
#
|
273 |
-
# =====================================================================================
|
274 |
score_dir = "scores"
|
275 |
os.makedirs(score_dir, exist_ok=True)
|
276 |
|
277 |
-
|
278 |
def generate_key(label, header, idx):
|
279 |
return f"{header}_{label}_{idx}_key"
|
280 |
|
281 |
-
|
282 |
def update_score(key, increment=1):
|
283 |
-
"""
|
|
|
|
|
284 |
score_file = os.path.join(score_dir, f"{key}.json")
|
285 |
if os.path.exists(score_file):
|
286 |
with open(score_file, "r") as file:
|
@@ -293,9 +272,7 @@ def update_score(key, increment=1):
|
|
293 |
json.dump(score_data, file)
|
294 |
return score_data["score"]
|
295 |
|
296 |
-
|
297 |
def load_score(key):
|
298 |
-
"""Load the stored score from .json if it exists, else 0."""
|
299 |
file_path = os.path.join(score_dir, f"{key}.json")
|
300 |
if os.path.exists(file_path):
|
301 |
with open(file_path, "r") as file:
|
@@ -303,31 +280,30 @@ def load_score(key):
|
|
303 |
return score_data["score"]
|
304 |
return 0
|
305 |
|
306 |
-
|
307 |
def display_buttons_with_scores(num_columns_text):
|
308 |
"""
|
309 |
-
Show glossary items as clickable buttons
|
310 |
"""
|
311 |
game_emojis = {
|
312 |
"Dungeons and Dragons": "🐉",
|
313 |
-
"Call of Cthulhu":
|
314 |
-
"GURPS":
|
315 |
-
"Pathfinder":
|
316 |
-
"Kindred of the East":
|
317 |
-
"Changeling":
|
318 |
}
|
319 |
topic_emojis = {
|
320 |
-
"Core Rulebooks":
|
321 |
-
"Maps & Settings":
|
322 |
-
"Game Mechanics & Tools":
|
323 |
-
"Monsters & Adversaries":
|
324 |
-
"Campaigns & Adventures":
|
325 |
-
"Creatives & Assets":
|
326 |
-
"Game Master Resources":
|
327 |
-
"Lore & Background":
|
328 |
-
"Character Development":
|
329 |
-
"Homebrew Content":
|
330 |
-
"General Topics":
|
331 |
}
|
332 |
|
333 |
for category, games in roleplaying_glossary.items():
|
@@ -342,12 +318,10 @@ def display_buttons_with_scores(num_columns_text):
|
|
342 |
newscore = update_score(key.replace('?', ''))
|
343 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
344 |
|
345 |
-
|
346 |
-
#
|
347 |
-
#
|
348 |
-
# =====================================================================================
|
349 |
def display_images_and_wikipedia_summaries(num_columns=4):
|
350 |
-
"""Display .png images in a grid, referencing the name as a 'keyword'."""
|
351 |
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
|
352 |
if not image_files:
|
353 |
st.write("No PNG images found in the current directory.")
|
@@ -372,9 +346,7 @@ def display_images_and_wikipedia_summaries(num_columns=4):
|
|
372 |
st.write(f"Could not open {image_file}")
|
373 |
col_index += 1
|
374 |
|
375 |
-
|
376 |
def display_videos_and_links(num_columns=4):
|
377 |
-
"""Displays all .mp4/.webm in a grid, plus text input for prompts."""
|
378 |
video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
|
379 |
if not video_files:
|
380 |
st.write("No MP4 or WEBM videos found in the current directory.")
|
@@ -398,12 +370,13 @@ def display_videos_and_links(num_columns=4):
|
|
398 |
st.error("Invalid input for seconds per frame!")
|
399 |
col_index += 1
|
400 |
|
401 |
-
|
402 |
-
#
|
403 |
-
#
|
404 |
-
# =====================================================================================
|
405 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
406 |
-
"""
|
|
|
|
|
407 |
return f"""
|
408 |
<html>
|
409 |
<head>
|
@@ -430,114 +403,57 @@ def generate_mermaid_html(mermaid_code: str) -> str:
|
|
430 |
</html>
|
431 |
"""
|
432 |
|
433 |
-
|
434 |
def append_model_param(url: str, model_selected: bool) -> str:
|
435 |
-
"""
|
|
|
|
|
436 |
if not model_selected:
|
437 |
return url
|
438 |
delimiter = "&" if "?" in url else "?"
|
439 |
return f"{url}{delimiter}model=1"
|
440 |
|
441 |
-
|
442 |
def inject_base_url(url: str) -> str:
|
443 |
-
"""
|
|
|
|
|
|
|
444 |
if url.startswith("http"):
|
445 |
return url
|
446 |
return f"{BASE_URL}{url}"
|
447 |
|
448 |
-
|
449 |
# Our default diagram, containing the "click" lines with /?q=...
|
450 |
-
DEFAULT_MERMAID =
|
451 |
flowchart LR
|
452 |
-
U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent
|
453 |
-
click U "/?q=User%20😎"
|
454 |
-
click LLM "/?q=LLM%20Agent%20Extract%20Info"
|
455 |
|
456 |
-
LLM -- "Query 🔍" --> HS[Hybrid Search
|
457 |
-
click HS "/?q=Hybrid%20Search%20Vector+NER+Lexical"
|
458 |
|
459 |
-
HS -- "Reason 🤔" --> RE[Reasoning Engine
|
460 |
-
click RE "/?q=Reasoning%20Engine%20NeuralNetwork+Medical"
|
461 |
|
462 |
-
RE -- "Link 📡" --> KG((Knowledge Graph
|
463 |
-
click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG"
|
464 |
"""
|
465 |
|
466 |
-
|
467 |
-
# BFS subgraph: we parse lines like A -- "Label" --> B
|
468 |
-
def parse_mermaid_edges(mermaid_text: str):
|
469 |
-
"""
|
470 |
-
🍿 parse_mermaid_edges:
|
471 |
-
- Find lines like: A -- "Label" --> B
|
472 |
-
- Return adjacency dict: edges[A] = [(label, B), ...]
|
473 |
-
"""
|
474 |
-
adjacency = {}
|
475 |
-
# e.g. U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
|
476 |
-
edge_pattern = re.compile(r'(\S+)\s*--\s*"([^"]*)"\s*-->\s*(\S+)')
|
477 |
-
for line in mermaid_text.split('\n'):
|
478 |
-
match = edge_pattern.search(line.strip())
|
479 |
-
if match:
|
480 |
-
nodeA, label, nodeB = match.groups()
|
481 |
-
if nodeA not in adjacency:
|
482 |
-
adjacency[nodeA] = []
|
483 |
-
adjacency[nodeA].append((label, nodeB))
|
484 |
-
return adjacency
|
485 |
-
|
486 |
-
|
487 |
-
def bfs_subgraph(adjacency, start_node, depth=1):
|
488 |
-
"""
|
489 |
-
🍎 bfs_subgraph:
|
490 |
-
- Gather edges up to 'depth' levels from start_node
|
491 |
-
- If depth=1, only direct edges from node
|
492 |
-
"""
|
493 |
-
from collections import deque
|
494 |
-
visited = set()
|
495 |
-
queue = deque([(start_node, 0)])
|
496 |
-
edges = []
|
497 |
-
|
498 |
-
while queue:
|
499 |
-
current, lvl = queue.popleft()
|
500 |
-
if current in visited:
|
501 |
-
continue
|
502 |
-
visited.add(current)
|
503 |
-
|
504 |
-
if current in adjacency and lvl < depth:
|
505 |
-
for (label, child) in adjacency[current]:
|
506 |
-
edges.append((current, label, child))
|
507 |
-
queue.append((child, lvl + 1))
|
508 |
-
|
509 |
-
return edges
|
510 |
-
|
511 |
-
|
512 |
-
def create_subgraph_mermaid(sub_edges, start_node):
|
513 |
-
"""
|
514 |
-
🍄 create_subgraph_mermaid:
|
515 |
-
- build a smaller flowchart snippet with edges from BFS
|
516 |
-
"""
|
517 |
-
sub_mermaid = "flowchart LR\n"
|
518 |
-
sub_mermaid += f" %% Subgraph for {start_node}\n"
|
519 |
-
if not sub_edges:
|
520 |
-
sub_mermaid += f" {start_node}\n"
|
521 |
-
sub_mermaid += " %% End of partial subgraph\n"
|
522 |
-
return sub_mermaid
|
523 |
-
for (A, label, B) in sub_edges:
|
524 |
-
sub_mermaid += f' {A} -- "{label}" --> {B}\n'
|
525 |
-
sub_mermaid += " %% End of partial subgraph\n"
|
526 |
-
return sub_mermaid
|
527 |
-
|
528 |
-
|
529 |
-
# =====================================================================================
|
530 |
-
# 7) MAIN APP
|
531 |
-
# =====================================================================================
|
532 |
def main():
|
533 |
-
st.set_page_config(page_title="Mermaid +
|
534 |
|
535 |
-
#
|
|
|
|
|
536 |
query_params = st.query_params
|
537 |
query_list = (query_params.get('q') or query_params.get('query') or [''])
|
538 |
-
q_or_query = query_list[0]
|
539 |
-
|
540 |
-
|
|
|
|
|
|
|
|
|
|
|
541 |
if 'action' in query_params:
|
542 |
action_list = query_params['action']
|
543 |
if action_list:
|
@@ -547,92 +463,69 @@ def main():
|
|
547 |
elif action == 'clear':
|
548 |
clear_query_params()
|
549 |
|
550 |
-
# If
|
551 |
if 'query' in query_params:
|
552 |
query_val = query_params['query'][0]
|
553 |
display_content_or_image(query_val)
|
554 |
|
555 |
-
#
|
|
|
|
|
556 |
st.sidebar.write("## Diagram Link Settings")
|
557 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
558 |
|
559 |
-
#
|
560 |
-
#
|
561 |
-
|
|
|
|
|
562 |
new_lines = []
|
|
|
563 |
for line in lines:
|
|
|
564 |
if "click " in line and '"/?' in line:
|
565 |
-
# Try to
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
old_url
|
574 |
-
|
575 |
-
target = match.group(4) # e.g. _blank
|
576 |
-
|
577 |
-
# 1) base
|
578 |
new_url = inject_base_url(old_url)
|
579 |
-
# 2) model
|
580 |
new_url = append_model_param(new_url, model_selected)
|
581 |
|
582 |
-
|
|
|
583 |
new_lines.append(new_line)
|
584 |
else:
|
|
|
585 |
new_lines.append(line)
|
586 |
else:
|
587 |
new_lines.append(line)
|
588 |
|
589 |
-
|
590 |
-
adjacency = parse_mermaid_edges(final_mermaid)
|
591 |
-
|
592 |
-
# 4) If user clicked a shape => we show a partial subgraph as "SearchResult"
|
593 |
-
partial_subgraph_html = ""
|
594 |
-
if q_or_query:
|
595 |
-
st.info(f"process_text called with: {PromptPrefix}{q_or_query}")
|
596 |
-
|
597 |
-
# Attempt to find a node whose ID or label includes q_or_query:
|
598 |
-
# We'll do a naive approach: if q_or_query is substring ignoring spaces
|
599 |
-
possible_keys = []
|
600 |
-
for nodeKey in adjacency.keys():
|
601 |
-
# e.g. nodeKey might be 'U((User 😎))'
|
602 |
-
simplified_key = nodeKey.replace("\\n", " ").replace("[", "").replace("]", "").lower()
|
603 |
-
simplified_query = q_or_query.lower().replace("%20", " ")
|
604 |
-
if simplified_query in simplified_key:
|
605 |
-
possible_keys.append(nodeKey)
|
606 |
-
|
607 |
-
chosen_node = None
|
608 |
-
if possible_keys:
|
609 |
-
chosen_node = possible_keys[0]
|
610 |
-
else:
|
611 |
-
st.warning("No adjacency node matched the query param's text. Subgraph is empty.")
|
612 |
-
|
613 |
-
if chosen_node:
|
614 |
-
sub_edges = bfs_subgraph(adjacency, chosen_node, depth=1)
|
615 |
-
sub_mermaid = create_subgraph_mermaid(sub_edges, chosen_node)
|
616 |
-
partial_subgraph_html = generate_mermaid_html(sub_mermaid)
|
617 |
-
|
618 |
-
# 5) Show partial subgraph top-center if we have any
|
619 |
-
if partial_subgraph_html:
|
620 |
-
st.subheader("SearchResult Subgraph")
|
621 |
-
components.html(partial_subgraph_html, height=300, scrolling=False)
|
622 |
-
|
623 |
-
# 6) Render the top-centered *full* diagram
|
624 |
-
st.title("Full Mermaid Diagram (with Base URL + BFS partial subgraphs)")
|
625 |
|
626 |
-
|
|
|
|
|
|
|
|
|
627 |
components.html(diagram_html, height=400, scrolling=True)
|
628 |
|
629 |
-
#
|
|
|
|
|
630 |
left_col, right_col = st.columns(2)
|
631 |
|
|
|
632 |
with left_col:
|
633 |
st.subheader("Markdown Side 📝")
|
634 |
if "markdown_text" not in st.session_state:
|
635 |
-
st.session_state["markdown_text"] = "## Hello!\
|
636 |
markdown_text = st.text_area(
|
637 |
"Edit Markdown:",
|
638 |
value=st.session_state["markdown_text"],
|
@@ -640,7 +533,6 @@ def main():
|
|
640 |
)
|
641 |
st.session_state["markdown_text"] = markdown_text
|
642 |
|
643 |
-
# Buttons
|
644 |
colA, colB = st.columns(2)
|
645 |
with colA:
|
646 |
if st.button("🔄 Refresh Markdown"):
|
@@ -654,16 +546,20 @@ def main():
|
|
654 |
st.markdown("**Preview:**")
|
655 |
st.markdown(markdown_text)
|
656 |
|
|
|
657 |
with right_col:
|
658 |
st.subheader("Mermaid Side 🧜♂️")
|
|
|
|
|
659 |
if "current_mermaid" not in st.session_state:
|
660 |
-
st.session_state["current_mermaid"] =
|
661 |
|
662 |
mermaid_input = st.text_area(
|
663 |
"Edit Mermaid Code:",
|
664 |
value=st.session_state["current_mermaid"],
|
665 |
height=300
|
666 |
)
|
|
|
667 |
colC, colD = st.columns(2)
|
668 |
with colC:
|
669 |
if st.button("🎨 Refresh Diagram"):
|
@@ -679,27 +575,35 @@ def main():
|
|
679 |
st.markdown("**Mermaid Source:**")
|
680 |
st.code(mermaid_input, language="python", line_numbers=True)
|
681 |
|
682 |
-
#
|
|
|
|
|
683 |
st.markdown("---")
|
684 |
st.header("Media Galleries")
|
|
|
685 |
num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
|
686 |
display_images_and_wikipedia_summaries(num_columns_images)
|
687 |
|
688 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
689 |
display_videos_and_links(num_columns_video)
|
690 |
|
691 |
-
#
|
692 |
showExtendedTextInterface = False
|
693 |
if showExtendedTextInterface:
|
694 |
-
#
|
|
|
695 |
# num_columns_text = st.slider("Choose Number of Text Columns", 1, 15, 4)
|
696 |
# display_buttons_with_scores(num_columns_text)
|
697 |
pass
|
698 |
|
699 |
-
#
|
|
|
|
|
700 |
FileSidebar()
|
701 |
|
702 |
-
#
|
|
|
|
|
703 |
titles = [
|
704 |
"🧠🎭 Semantic Symphonies & Episodic Encores",
|
705 |
"🌌🎼 AI Rhythms of Memory Lane",
|
|
|
15 |
import streamlit as st
|
16 |
import streamlit.components.v1 as components
|
17 |
|
18 |
+
# If you do model inference via huggingface_hub:
|
19 |
+
from huggingface_hub import InferenceClient
|
20 |
|
21 |
+
# ----------------------------
|
22 |
+
# Configurable BASE_URL
|
23 |
+
# ----------------------------
|
24 |
BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
|
25 |
|
26 |
+
# Example placeholders for prompt prefixes
|
27 |
PromptPrefix = "AI-Search: "
|
28 |
PromptPrefix2 = "AI-Refine: "
|
29 |
PromptPrefix3 = "AI-JS: "
|
30 |
|
31 |
+
# Example roleplaying glossary
|
32 |
roleplaying_glossary = {
|
33 |
"Core Rulebooks": {
|
34 |
"Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"],
|
|
|
39 |
}
|
40 |
}
|
41 |
|
42 |
+
# Example transhuman glossary
|
43 |
transhuman_glossary = {
|
44 |
"Neural Interfaces": ["Cortex Jack", "Mind-Machine Fusion"],
|
45 |
"Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
|
46 |
}
|
47 |
|
48 |
+
# Simple function stubs
|
49 |
def process_text(text):
|
|
|
50 |
st.write(f"process_text called with: {text}")
|
51 |
|
|
|
52 |
def search_arxiv(text):
|
|
|
53 |
st.write(f"search_arxiv called with: {text}")
|
54 |
|
|
|
55 |
def SpeechSynthesis(text):
|
|
|
56 |
st.write(f"SpeechSynthesis called with: {text}")
|
57 |
|
|
|
58 |
def process_image(image_file, prompt):
|
59 |
+
return f"[process_image placeholder] Processing {image_file} with prompt: {prompt}"
|
|
|
|
|
60 |
|
61 |
def process_video(video_file, seconds_per_frame):
|
62 |
+
st.write(f"[process_video placeholder] Video: {video_file}, seconds/frame: {seconds_per_frame}")
|
|
|
|
|
63 |
|
64 |
+
# Stub if you have a Hugging Face endpoint
|
65 |
API_URL = "https://huggingface-inference-endpoint-placeholder"
|
66 |
API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
67 |
|
|
|
68 |
@st.cache_resource
|
69 |
def InferenceLLM(prompt):
|
|
|
70 |
return f"[InferenceLLM placeholder response to prompt: {prompt}]"
|
71 |
|
72 |
+
# ------------------------------------------
|
73 |
+
# Glossary & File Utility
|
74 |
+
# ------------------------------------------
|
|
|
75 |
@st.cache_resource
|
76 |
def display_glossary_entity(k):
|
77 |
"""
|
78 |
Creates multiple link emojis for a single entity.
|
|
|
79 |
"""
|
80 |
search_urls = {
|
81 |
+
"🚀🌌ArXiv": lambda x: f"/?q={quote(x)}",
|
82 |
+
"🃏Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
|
83 |
+
"📚PyCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix2)}",
|
84 |
+
"🔬JSCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix3)}",
|
85 |
+
"📖": lambda x: f"https://en.wikipedia.org/wiki/{quote(x)}",
|
86 |
+
"🔍": lambda x: f"https://www.google.com/search?q={quote(x)}",
|
87 |
+
"🔎": lambda x: f"https://www.bing.com/search?q={quote(x)}",
|
88 |
+
"🎥": lambda x: f"https://www.youtube.com/results?search_query={quote(x)}",
|
89 |
+
"🐦": lambda x: f"https://twitter.com/search?q={quote(x)}",
|
90 |
}
|
91 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
92 |
st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
|
93 |
|
|
|
94 |
def display_content_or_image(query):
|
95 |
"""
|
96 |
+
If a query matches something in transhuman_glossary or a local image, show it.
|
|
|
97 |
"""
|
98 |
for category, term_list in transhuman_glossary.items():
|
99 |
for term in term_list:
|
|
|
108 |
st.warning("No matching content or image found.")
|
109 |
return False
|
110 |
|
|
|
111 |
def clear_query_params():
|
112 |
+
"""
|
113 |
+
For clearing URL params, you'd typically use a new link or st.experimental_set_query_params().
|
114 |
+
Here, we just warn the user.
|
115 |
+
"""
|
116 |
st.warning("Define a redirect or link without query params if you want to truly clear them.")
|
117 |
|
118 |
+
# -----------------------
|
119 |
+
# File Handling
|
120 |
+
# -----------------------
|
|
|
121 |
def load_file(file_path):
|
|
|
122 |
try:
|
123 |
with open(file_path, "r", encoding='utf-8') as f:
|
124 |
return f.read()
|
125 |
except:
|
126 |
return ""
|
127 |
|
|
|
128 |
@st.cache_resource
|
129 |
def create_zip_of_files(files):
|
|
|
130 |
zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
|
131 |
with zipfile.ZipFile(zip_name, 'w') as zipf:
|
132 |
for file in files:
|
133 |
zipf.write(file)
|
134 |
return zip_name
|
135 |
|
|
|
136 |
@st.cache_resource
|
137 |
def get_zip_download_link(zip_file):
|
|
|
138 |
with open(zip_file, 'rb') as f:
|
139 |
data = f.read()
|
140 |
b64 = base64.b64encode(data).decode()
|
141 |
return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
142 |
|
|
|
143 |
def get_table_download_link(file_path):
|
144 |
"""
|
145 |
Creates a download link for a single file from your snippet.
|
|
|
146 |
"""
|
147 |
try:
|
148 |
with open(file_path, 'r', encoding='utf-8') as file:
|
|
|
151 |
file_name = os.path.basename(file_path)
|
152 |
ext = os.path.splitext(file_name)[1]
|
153 |
mime_map = {
|
154 |
+
'.txt': 'text/plain',
|
155 |
+
'.py': 'text/plain',
|
156 |
'.xlsx': 'text/plain',
|
157 |
+
'.csv': 'text/plain',
|
158 |
+
'.htm': 'text/html',
|
159 |
+
'.md': 'text/markdown',
|
160 |
+
'.wav': 'audio/wav'
|
161 |
}
|
162 |
mime_type = mime_map.get(ext, 'application/octet-stream')
|
163 |
return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
164 |
except:
|
165 |
return ''
|
166 |
|
|
|
167 |
def get_file_size(file_path):
|
|
|
168 |
return os.path.getsize(file_path)
|
169 |
|
|
|
170 |
def FileSidebar():
|
171 |
"""
|
172 |
+
Renders .md files, providing open/view/delete/run logic in the sidebar.
|
173 |
"""
|
174 |
all_files = glob.glob("*.md")
|
175 |
+
# Exclude short-named or special files if needed
|
176 |
all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
|
177 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
|
178 |
|
|
|
221 |
os.remove(file)
|
222 |
st.rerun()
|
223 |
|
224 |
+
# If we loaded a file
|
225 |
if file_contents:
|
226 |
if next_action == 'open':
|
227 |
open1, open2 = st.columns([0.8, 0.2])
|
|
|
247 |
if st.button("🔍Run"):
|
248 |
st.write("Running GPT logic placeholder...")
|
249 |
|
250 |
+
# ---------------------------
|
251 |
+
# Scoring / Glossaries
|
252 |
+
# ---------------------------
|
|
|
253 |
score_dir = "scores"
|
254 |
os.makedirs(score_dir, exist_ok=True)
|
255 |
|
|
|
256 |
def generate_key(label, header, idx):
|
257 |
return f"{header}_{label}_{idx}_key"
|
258 |
|
|
|
259 |
def update_score(key, increment=1):
|
260 |
+
"""
|
261 |
+
Track a 'score' for each glossary item or term, saved in JSON per key.
|
262 |
+
"""
|
263 |
score_file = os.path.join(score_dir, f"{key}.json")
|
264 |
if os.path.exists(score_file):
|
265 |
with open(score_file, "r") as file:
|
|
|
272 |
json.dump(score_data, file)
|
273 |
return score_data["score"]
|
274 |
|
|
|
275 |
def load_score(key):
|
|
|
276 |
file_path = os.path.join(score_dir, f"{key}.json")
|
277 |
if os.path.exists(file_path):
|
278 |
with open(file_path, "r") as file:
|
|
|
280 |
return score_data["score"]
|
281 |
return 0
|
282 |
|
|
|
283 |
def display_buttons_with_scores(num_columns_text):
|
284 |
"""
|
285 |
+
Show glossary items as clickable buttons that increment a 'score'.
|
286 |
"""
|
287 |
game_emojis = {
|
288 |
"Dungeons and Dragons": "🐉",
|
289 |
+
"Call of Cthulhu": "🐙",
|
290 |
+
"GURPS": "🎲",
|
291 |
+
"Pathfinder": "🗺️",
|
292 |
+
"Kindred of the East": "🌅",
|
293 |
+
"Changeling": "🍃",
|
294 |
}
|
295 |
topic_emojis = {
|
296 |
+
"Core Rulebooks": "📚",
|
297 |
+
"Maps & Settings": "🗺️",
|
298 |
+
"Game Mechanics & Tools": "⚙️",
|
299 |
+
"Monsters & Adversaries": "👹",
|
300 |
+
"Campaigns & Adventures": "📜",
|
301 |
+
"Creatives & Assets": "🎨",
|
302 |
+
"Game Master Resources": "🛠️",
|
303 |
+
"Lore & Background": "📖",
|
304 |
+
"Character Development": "🧍",
|
305 |
+
"Homebrew Content": "🔧",
|
306 |
+
"General Topics": "🌍",
|
307 |
}
|
308 |
|
309 |
for category, games in roleplaying_glossary.items():
|
|
|
318 |
newscore = update_score(key.replace('?', ''))
|
319 |
st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
|
320 |
|
321 |
+
# -------------------------------
|
322 |
+
# Image & Video
|
323 |
+
# -------------------------------
|
|
|
324 |
def display_images_and_wikipedia_summaries(num_columns=4):
|
|
|
325 |
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
|
326 |
if not image_files:
|
327 |
st.write("No PNG images found in the current directory.")
|
|
|
346 |
st.write(f"Could not open {image_file}")
|
347 |
col_index += 1
|
348 |
|
|
|
349 |
def display_videos_and_links(num_columns=4):
|
|
|
350 |
video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
|
351 |
if not video_files:
|
352 |
st.write("No MP4 or WEBM videos found in the current directory.")
|
|
|
370 |
st.error("Invalid input for seconds per frame!")
|
371 |
col_index += 1
|
372 |
|
373 |
+
# --------------------------------
|
374 |
+
# MERMAID DIAGRAM
|
375 |
+
# --------------------------------
|
|
|
376 |
def generate_mermaid_html(mermaid_code: str) -> str:
|
377 |
+
"""
|
378 |
+
Returns HTML that centers the Mermaid diagram, loading from a CDN.
|
379 |
+
"""
|
380 |
return f"""
|
381 |
<html>
|
382 |
<head>
|
|
|
403 |
</html>
|
404 |
"""
|
405 |
|
|
|
406 |
def append_model_param(url: str, model_selected: bool) -> str:
|
407 |
+
"""
|
408 |
+
If user checks 'Append ?model=1', we append &model=1 or ?model=1 if not present.
|
409 |
+
"""
|
410 |
if not model_selected:
|
411 |
return url
|
412 |
delimiter = "&" if "?" in url else "?"
|
413 |
return f"{url}{delimiter}model=1"
|
414 |
|
|
|
415 |
def inject_base_url(url: str) -> str:
|
416 |
+
"""
|
417 |
+
If a link does not start with http, prepend your BASE_URL
|
418 |
+
so it becomes an absolute link to huggingface.co/spaces/...
|
419 |
+
"""
|
420 |
if url.startswith("http"):
|
421 |
return url
|
422 |
return f"{BASE_URL}{url}"
|
423 |
|
|
|
424 |
# Our default diagram, containing the "click" lines with /?q=...
|
425 |
+
DEFAULT_MERMAID = """
|
426 |
flowchart LR
|
427 |
+
U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\\nExtract Info]
|
428 |
+
click U "/?q=User%20😎" _self
|
429 |
+
click LLM "/?q=LLM%20Agent%20Extract%20Info" _blank
|
430 |
|
431 |
+
LLM -- "Query 🔍" --> HS[Hybrid Search 🔎\\nVector+NER+Lexical]
|
432 |
+
click HS "/?q=Hybrid%20Search%20Vector+NER+Lexical" _blank
|
433 |
|
434 |
+
HS -- "Reason 🤔" --> RE[Reasoning Engine 🛠️\\nNeuralNetwork+Medical]
|
435 |
+
click RE "/?q=Reasoning%20Engine%20NeuralNetwork+Medical" _blank
|
436 |
|
437 |
+
RE -- "Link 📡" --> KG((Knowledge Graph 📚\\nOntology+GAR+RAG))
|
438 |
+
click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" _blank
|
439 |
"""
|
440 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
def main():
|
442 |
+
st.set_page_config(page_title="Mermaid + Clickable Links with Base URL", layout="wide")
|
443 |
|
444 |
+
# ---------------------------------------------
|
445 |
+
# Query Param Parsing (non-experimental)
|
446 |
+
# ---------------------------------------------
|
447 |
query_params = st.query_params
|
448 |
query_list = (query_params.get('q') or query_params.get('query') or [''])
|
449 |
+
q_or_query = query_list[0] if query_list else ''
|
450 |
+
if q_or_query.strip():
|
451 |
+
# If there's a q= or query= param, do some processing
|
452 |
+
search_payload = PromptPrefix + q_or_query
|
453 |
+
st.markdown(search_payload)
|
454 |
+
process_text(search_payload)
|
455 |
+
|
456 |
+
# If an 'action' param is present
|
457 |
if 'action' in query_params:
|
458 |
action_list = query_params['action']
|
459 |
if action_list:
|
|
|
463 |
elif action == 'clear':
|
464 |
clear_query_params()
|
465 |
|
466 |
+
# If a 'query' param is present, show content or image
|
467 |
if 'query' in query_params:
|
468 |
query_val = query_params['query'][0]
|
469 |
display_content_or_image(query_val)
|
470 |
|
471 |
+
# ---------------------------------------------
|
472 |
+
# Let user pick if we want ?model=1
|
473 |
+
# ---------------------------------------------
|
474 |
st.sidebar.write("## Diagram Link Settings")
|
475 |
model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
|
476 |
|
477 |
+
# ---------------------------------------------
|
478 |
+
# Rebuild the clickable lines in the Mermaid code
|
479 |
+
# ---------------------------------------------
|
480 |
+
base_diagram = DEFAULT_MERMAID
|
481 |
+
lines = base_diagram.strip().split("\n")
|
482 |
new_lines = []
|
483 |
+
|
484 |
for line in lines:
|
485 |
+
# We look for lines like: click SOMENODE "/?q=Something" _self
|
486 |
if "click " in line and '"/?' in line:
|
487 |
+
# Try to extract the URL part
|
488 |
+
parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+("_self")', line)
|
489 |
+
if len(parts) == 4:
|
490 |
+
# Example:
|
491 |
+
# parts[0] -> 'click LLM '
|
492 |
+
# parts[1] -> '/?q=LLM%20Agent%20Extract%20Info'
|
493 |
+
# parts[2] -> ' _self'
|
494 |
+
# parts[3] -> '' or trailing
|
495 |
+
old_url = parts[1]
|
496 |
+
# 1) Prepend base if needed
|
|
|
|
|
|
|
497 |
new_url = inject_base_url(old_url)
|
498 |
+
# 2) Possibly add &model=1
|
499 |
new_url = append_model_param(new_url, model_selected)
|
500 |
|
501 |
+
# Recombine
|
502 |
+
new_line = f"{parts[0]}\"{new_url}\" {parts[2]}"
|
503 |
new_lines.append(new_line)
|
504 |
else:
|
505 |
+
# If we can't parse it, keep it as is
|
506 |
new_lines.append(line)
|
507 |
else:
|
508 |
new_lines.append(line)
|
509 |
|
510 |
+
mermaid_code = "\n".join(new_lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
511 |
|
512 |
+
# ---------------------------------------------
|
513 |
+
# Render the top-centered Mermaid diagram
|
514 |
+
# ---------------------------------------------
|
515 |
+
st.sidebar.markdown("Mermaid Diagram with Base URL Injection")
|
516 |
+
diagram_html = generate_mermaid_html(mermaid_code)
|
517 |
components.html(diagram_html, height=400, scrolling=True)
|
518 |
|
519 |
+
# ---------------------------------------------
|
520 |
+
# Two-column interface: Markdown & Mermaid
|
521 |
+
# ---------------------------------------------
|
522 |
left_col, right_col = st.columns(2)
|
523 |
|
524 |
+
# --- Left: Markdown Editor
|
525 |
with left_col:
|
526 |
st.subheader("Markdown Side 📝")
|
527 |
if "markdown_text" not in st.session_state:
|
528 |
+
st.session_state["markdown_text"] = "## Hello!\nType some *Markdown* here.\n"
|
529 |
markdown_text = st.text_area(
|
530 |
"Edit Markdown:",
|
531 |
value=st.session_state["markdown_text"],
|
|
|
533 |
)
|
534 |
st.session_state["markdown_text"] = markdown_text
|
535 |
|
|
|
536 |
colA, colB = st.columns(2)
|
537 |
with colA:
|
538 |
if st.button("🔄 Refresh Markdown"):
|
|
|
546 |
st.markdown("**Preview:**")
|
547 |
st.markdown(markdown_text)
|
548 |
|
549 |
+
# --- Right: Mermaid Editor
|
550 |
with right_col:
|
551 |
st.subheader("Mermaid Side 🧜♂️")
|
552 |
+
|
553 |
+
# We store the final code in session state, so user can edit
|
554 |
if "current_mermaid" not in st.session_state:
|
555 |
+
st.session_state["current_mermaid"] = mermaid_code
|
556 |
|
557 |
mermaid_input = st.text_area(
|
558 |
"Edit Mermaid Code:",
|
559 |
value=st.session_state["current_mermaid"],
|
560 |
height=300
|
561 |
)
|
562 |
+
|
563 |
colC, colD = st.columns(2)
|
564 |
with colC:
|
565 |
if st.button("🎨 Refresh Diagram"):
|
|
|
575 |
st.markdown("**Mermaid Source:**")
|
576 |
st.code(mermaid_input, language="python", line_numbers=True)
|
577 |
|
578 |
+
# ---------------------------------------------
|
579 |
+
# Media Galleries
|
580 |
+
# ---------------------------------------------
|
581 |
st.markdown("---")
|
582 |
st.header("Media Galleries")
|
583 |
+
|
584 |
num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
|
585 |
display_images_and_wikipedia_summaries(num_columns_images)
|
586 |
|
587 |
num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
|
588 |
display_videos_and_links(num_columns_video)
|
589 |
|
590 |
+
# (Optionally) Extended text interface
|
591 |
showExtendedTextInterface = False
|
592 |
if showExtendedTextInterface:
|
593 |
+
# For example:
|
594 |
+
# display_glossary_grid(roleplaying_glossary)
|
595 |
# num_columns_text = st.slider("Choose Number of Text Columns", 1, 15, 4)
|
596 |
# display_buttons_with_scores(num_columns_text)
|
597 |
pass
|
598 |
|
599 |
+
# ---------------------------------------------
|
600 |
+
# File Sidebar
|
601 |
+
# ---------------------------------------------
|
602 |
FileSidebar()
|
603 |
|
604 |
+
# ---------------------------------------------
|
605 |
+
# Random Title at the bottom
|
606 |
+
# ---------------------------------------------
|
607 |
titles = [
|
608 |
"🧠🎭 Semantic Symphonies & Episodic Encores",
|
609 |
"🌌🎼 AI Rhythms of Memory Lane",
|