awacke1 commited on
Commit
9be2fe6
·
verified ·
1 Parent(s): ad82dc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -266
app.py CHANGED
@@ -15,12 +15,12 @@ from urllib.parse import quote
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
- # 🏰 If you do model inference via huggingface_hub
19
- # from huggingface_hub import InferenceClient
20
 
21
- # =====================================================================================
22
- # 1) GLOBAL CONFIG & PLACEHOLDERS
23
- # =====================================================================================
24
  BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
25
 
26
  PromptPrefix = "AI-Search: "
@@ -42,45 +42,42 @@ transhuman_glossary = {
42
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
43
  }
44
 
 
 
 
45
 
46
  def process_text(text):
47
  """🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
48
  st.write(f"process_text called with: {text}")
49
 
50
-
51
  def search_arxiv(text):
52
  """🔭 search_arxiv: pretend to search ArXiv, just prints debug for now."""
53
  st.write(f"search_arxiv called with: {text}")
54
 
55
-
56
  def SpeechSynthesis(text):
57
- """🗣 SpeechSynthesis: read lines out loud? Here, we log them for demonstration."""
58
  st.write(f"SpeechSynthesis called with: {text}")
59
 
60
-
61
  def process_image(image_file, prompt):
62
  """📷 process_image: imagine an AI pipeline for images, here we just log."""
63
  return f"[process_image placeholder] {image_file} => {prompt}"
64
 
65
-
66
  def process_video(video_file, seconds_per_frame):
67
  """🎞 process_video: placeholder for video tasks, logs to Streamlit."""
68
  st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
69
 
70
-
71
  API_URL = "https://huggingface-inference-endpoint-placeholder"
72
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
73
 
74
-
75
  @st.cache_resource
76
  def InferenceLLM(prompt):
77
  """🔮 InferenceLLM: a stub returning a mock response for 'prompt'."""
78
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
79
 
 
 
 
80
 
81
- # =====================================================================================
82
- # 2) GLOSSARY & FILE UTILITY
83
- # =====================================================================================
84
  @st.cache_resource
85
  def display_glossary_entity(k):
86
  """
@@ -88,20 +85,19 @@ def display_glossary_entity(k):
88
  Each link might point to /?q=..., /?q=<prefix>..., or external sites.
89
  """
90
  search_urls = {
91
- "🚀🌌ArXiv": lambda x: f"/?q={quote(x)}",
92
- "🃏Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
93
- "📚PyCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix2)}",
94
- "🔬JSCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix3)}",
95
- "📖": lambda x: f"https://en.wikipedia.org/wiki/{quote(x)}",
96
- "🔍": lambda x: f"https://www.google.com/search?q={quote(x)}",
97
- "🔎": lambda x: f"https://www.bing.com/search?q={quote(x)}",
98
- "🎥": lambda x: f"https://www.youtube.com/results?search_query={quote(x)}",
99
- "🐦": lambda x: f"https://twitter.com/search?q={quote(x)}",
100
  }
101
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
102
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
103
 
104
-
105
  def display_content_or_image(query):
106
  """
107
  If 'query' is in transhuman_glossary or there's an image matching 'images/<query>.png',
@@ -120,15 +116,14 @@ def display_content_or_image(query):
120
  st.warning("No matching content or image found.")
121
  return False
122
 
123
-
124
  def clear_query_params():
125
- """For fully clearing, you'd do a redirect or st.experimental_set_query_params()."""
126
  st.warning("Define a redirect or link without query params if you want to truly clear them.")
127
 
 
 
 
128
 
129
- # =====================================================================================
130
- # 3) FILE-HANDLING (MD files, etc.)
131
- # =====================================================================================
132
  def load_file(file_path):
133
  """Load file contents as UTF-8 text, or return empty on error."""
134
  try:
@@ -137,7 +132,6 @@ def load_file(file_path):
137
  except:
138
  return ""
139
 
140
-
141
  @st.cache_resource
142
  def create_zip_of_files(files):
143
  """Combine multiple local files into a single .zip for user to download."""
@@ -147,7 +141,6 @@ def create_zip_of_files(files):
147
  zipf.write(file)
148
  return zip_name
149
 
150
-
151
  @st.cache_resource
152
  def get_zip_download_link(zip_file):
153
  """Return an <a> link to download the given zip_file (base64-encoded)."""
@@ -156,7 +149,6 @@ def get_zip_download_link(zip_file):
156
  b64 = base64.b64encode(data).decode()
157
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
158
 
159
-
160
  def get_table_download_link(file_path):
161
  """
162
  Creates a download link for a single file from your snippet.
@@ -169,36 +161,31 @@ def get_table_download_link(file_path):
169
  file_name = os.path.basename(file_path)
170
  ext = os.path.splitext(file_name)[1]
171
  mime_map = {
172
- '.txt': 'text/plain',
173
- '.py': 'text/plain',
174
  '.xlsx': 'text/plain',
175
- '.csv': 'text/plain',
176
- '.htm': 'text/html',
177
- '.md': 'text/markdown',
178
- '.wav': 'audio/wav'
179
  }
180
  mime_type = mime_map.get(ext, 'application/octet-stream')
181
  return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
182
  except:
183
  return ''
184
 
185
-
186
  def get_file_size(file_path):
187
  """Get file size in bytes."""
188
  return os.path.getsize(file_path)
189
 
190
-
191
  def FileSidebar():
192
  """
193
  Renders .md files in the sidebar with open/view/run/delete logic.
194
  """
195
  all_files = glob.glob("*.md")
196
- # If you want to filter out short-named or special files:
197
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
198
- # sorting in place
199
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
200
 
201
- # Buttons for "Delete All" and "Download"
202
  Files1, Files2 = st.sidebar.columns(2)
203
  with Files1:
204
  if st.button("🗑 Delete All"):
@@ -214,11 +201,10 @@ def FileSidebar():
214
  file_name = ''
215
  next_action = ''
216
 
217
- # Each file row
218
  for file in all_files:
219
- col1, col2, col3, col4, col5 = st.sidebar.columns([1, 6, 1, 1, 1])
220
  with col1:
221
- if st.button("🌐", key="md_" + file):
222
  file_contents = load_file(file)
223
  file_name = file
224
  next_action = 'md'
@@ -226,7 +212,7 @@ def FileSidebar():
226
  with col2:
227
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
228
  with col3:
229
- if st.button("📂", key="open_" + file):
230
  file_contents = load_file(file)
231
  file_name = file
232
  next_action = 'open'
@@ -235,13 +221,13 @@ def FileSidebar():
235
  st.session_state['filetext'] = file_contents
236
  st.session_state['next_action'] = next_action
237
  with col4:
238
- if st.button("▶️", key="read_" + file):
239
  file_contents = load_file(file)
240
  file_name = file
241
  next_action = 'search'
242
  st.session_state['next_action'] = next_action
243
  with col5:
244
- if st.button("🗑", key="delete_" + file):
245
  os.remove(file)
246
  st.rerun()
247
 
@@ -251,7 +237,6 @@ def FileSidebar():
251
  with open1:
252
  file_name_input = st.text_input('File Name:', file_name, key='file_name_input')
253
  file_content_area = st.text_area('File Contents:', file_contents, height=300, key='file_content_area')
254
-
255
  if st.button('💾 Save File'):
256
  with open(file_name_input, 'w', encoding='utf-8') as f:
257
  f.write(file_content_area)
@@ -270,18 +255,15 @@ def FileSidebar():
270
  if st.button("🔍Run"):
271
  st.write("Running GPT logic placeholder...")
272
 
273
-
274
- # =====================================================================================
275
- # 4) SCORING / GLOSSARIES
276
- # =====================================================================================
277
  score_dir = "scores"
278
  os.makedirs(score_dir, exist_ok=True)
279
 
280
-
281
  def generate_key(label, header, idx):
282
  return f"{header}_{label}_{idx}_key"
283
 
284
-
285
  def update_score(key, increment=1):
286
  """Increment the 'score' for a glossary item in JSON storage."""
287
  score_file = os.path.join(score_dir, f"{key}.json")
@@ -296,7 +278,6 @@ def update_score(key, increment=1):
296
  json.dump(score_data, file)
297
  return score_data["score"]
298
 
299
-
300
  def load_score(key):
301
  """Load the stored score from .json if it exists, else 0."""
302
  file_path = os.path.join(score_dir, f"{key}.json")
@@ -306,31 +287,30 @@ def load_score(key):
306
  return score_data["score"]
307
  return 0
308
 
309
-
310
  def display_buttons_with_scores(num_columns_text):
311
  """
312
  Show glossary items as clickable buttons, each increments a 'score'.
313
  """
314
  game_emojis = {
315
  "Dungeons and Dragons": "🐉",
316
- "Call of Cthulhu": "🐙",
317
- "GURPS": "🎲",
318
- "Pathfinder": "🗺️",
319
- "Kindred of the East": "🌅",
320
- "Changeling": "🍃",
321
  }
322
  topic_emojis = {
323
- "Core Rulebooks": "📚",
324
- "Maps & Settings": "🗺️",
325
- "Game Mechanics & Tools": "⚙️",
326
- "Monsters & Adversaries": "👹",
327
- "Campaigns & Adventures": "📜",
328
- "Creatives & Assets": "🎨",
329
- "Game Master Resources": "🛠️",
330
- "Lore & Background": "📖",
331
- "Character Development": "🧍",
332
- "Homebrew Content": "🔧",
333
- "General Topics": "🌍",
334
  }
335
 
336
  for category, games in roleplaying_glossary.items():
@@ -345,10 +325,10 @@ def display_buttons_with_scores(num_columns_text):
345
  newscore = update_score(key.replace('?', ''))
346
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
347
 
 
 
 
348
 
349
- # =====================================================================================
350
- # 5) IMAGES & VIDEOS
351
- # =====================================================================================
352
  def display_images_and_wikipedia_summaries(num_columns=4):
353
  """Display .png images in a grid, referencing the name as a 'keyword'."""
354
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
@@ -359,7 +339,6 @@ def display_images_and_wikipedia_summaries(num_columns=4):
359
  image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
360
  cols = st.columns(num_columns)
361
  col_index = 0
362
-
363
  for image_file in image_files_sorted:
364
  with cols[col_index % num_columns]:
365
  try:
@@ -375,7 +354,6 @@ def display_images_and_wikipedia_summaries(num_columns=4):
375
  st.write(f"Could not open {image_file}")
376
  col_index += 1
377
 
378
-
379
  def display_videos_and_links(num_columns=4):
380
  """Displays all .mp4/.webm in a grid, plus text input for prompts."""
381
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
@@ -386,7 +364,6 @@ def display_videos_and_links(num_columns=4):
386
  video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
387
  cols = st.columns(num_columns)
388
  col_index = 0
389
-
390
  for video_file in video_files_sorted:
391
  with cols[col_index % num_columns]:
392
  k = video_file.split('.')[0]
@@ -401,10 +378,10 @@ def display_videos_and_links(num_columns=4):
401
  st.error("Invalid input for seconds per frame!")
402
  col_index += 1
403
 
 
 
 
404
 
405
- # =====================================================================================
406
- # 6) MERMAID & PARTIAL SUBGRAPH LOGIC
407
- # =====================================================================================
408
  def generate_mermaid_html(mermaid_code: str) -> str:
409
  """Embed mermaid_code in a minimal HTML snippet, centered."""
410
  return f"""
@@ -433,160 +410,82 @@ def generate_mermaid_html(mermaid_code: str) -> str:
433
  </html>
434
  """
435
 
436
-
437
  def append_model_param(url: str, model_selected: bool) -> str:
438
- """If user selects 'model=1', we append &model=1 or ?model=1 if not present."""
439
  if not model_selected:
440
  return url
441
  delimiter = "&" if "?" in url else "?"
442
  return f"{url}{delimiter}model=1"
443
 
444
-
445
  def inject_base_url(url: str) -> str:
446
  """If link doesn't start with 'http', prepend BASE_URL so it's absolute."""
447
  if url.startswith("http"):
448
  return url
449
  return f"{BASE_URL}{url}"
450
 
 
 
 
 
 
 
 
 
451
 
452
- # We'll keep the default mermaid that references /?q=...
453
  DEFAULT_MERMAID = r"""
454
  flowchart LR
455
- U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
456
- click U "/?q=U" "Open 'User 😎'" _blank
457
- click LLM "/?q=LLM%20Agent%20Extract%20Info" "Open LLM Agent" _blank
458
 
459
- LLM -- "Query 🔍" --> HS[Hybrid Search 🔎\nVector+NER+Lexical]
460
- click HS "/?q=Hybrid%20Search%20Vector+NER+Lexical" "Open Hybrid Search" _blank
461
 
462
- HS -- "Reason 🤔" --> RE[Reasoning Engine 🛠️\nNeuralNetwork+Medical]
463
- click RE "/?q=Reasoning%20Engine%20NeuralNetwork+Medical" "Open Reasoning" _blank
464
 
465
- RE -- "Link 📡" --> KG((Knowledge Graph 📚\nOntology+GAR+RAG))
466
- click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" "Open Knowledge Graph" _blank
467
  """
468
 
 
 
 
469
 
470
- def parse_mermaid_edges(mermaid_text: str):
471
- """
472
- 🍿 parse_mermaid_edges:
473
- - Find lines like: A -- "Label" --> B
474
- - Return adjacency dict: edges[A] = [(label, B), ...]
475
- """
476
- adjacency = {}
477
- # e.g. U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
478
- edge_pattern = re.compile(r'(\S+)\s*--\s*"([^"]*)"\s*-->\s*(\S+)')
479
- for line in mermaid_text.split('\n'):
480
- match = edge_pattern.search(line.strip())
481
- if match:
482
- nodeA, label, nodeB = match.groups()
483
- if nodeA not in adjacency:
484
- adjacency[nodeA] = []
485
- adjacency[nodeA].append((label, nodeB))
486
- return adjacency
487
-
488
-
489
- def bfs_subgraph(adjacency, start_node, depth=1):
490
- """
491
- 🍎 bfs_subgraph:
492
- - Gather edges up to 'depth' levels from start_node
493
- - If depth=1, only direct edges from node
494
- """
495
- from collections import deque
496
- visited = set()
497
- queue = deque([(start_node, 0)])
498
- edges = []
499
-
500
- while queue:
501
- current, lvl = queue.popleft()
502
- if current in visited:
503
- continue
504
- visited.add(current)
505
-
506
- if current in adjacency and lvl < depth:
507
- for (label, child) in adjacency[current]:
508
- edges.append((current, label, child))
509
- queue.append((child, lvl + 1))
510
-
511
- return edges
512
-
513
-
514
- def create_subgraph_mermaid(sub_edges, start_node):
515
- """
516
- 🍄 create_subgraph_mermaid:
517
- - build a smaller flowchart snippet with edges from BFS
518
- """
519
- sub_mermaid = "flowchart LR\n"
520
- sub_mermaid += f" %% SearchResult Subgraph for {start_node}\n"
521
- if not sub_edges:
522
- # If no edges, show just the node
523
- sub_mermaid += f" {start_node}\n"
524
- sub_mermaid += " %% End of partial subgraph\n"
525
- return sub_mermaid
526
- for (A, label, B) in sub_edges:
527
- sub_mermaid += f' {A} -- "{label}" --> {B}\n'
528
- sub_mermaid += " %% End of partial subgraph\n"
529
- return sub_mermaid
530
-
531
-
532
- # =====================================================================================
533
- # 7) MAIN APP
534
- # =====================================================================================
535
  def main():
536
- st.set_page_config(page_title="Mermaid + BFS Subgraph + Full Logic", layout="wide")
537
 
538
  # 1) Query param parsing
539
  query_params = st.query_params
540
- query_list = (query_params.get('q') or query_params.get('query') or [''])
541
- q_or_query = query_list[0].strip() if len(query_list) > 0 else ""
542
-
543
- # If 'action' param is present
544
- if 'action' in query_params:
545
- action_list = query_params['action']
546
- if action_list:
547
- action = action_list[0]
548
- if action == 'show_message':
549
- st.success("Showing a message because 'action=show_message' was found in the URL.")
550
- elif action == 'clear':
551
- clear_query_params()
552
-
553
- # If there's a 'query=' param, display content or image
554
- if 'query' in query_params:
555
- query_val = query_params['query'][0]
556
- display_content_or_image(query_val)
557
 
558
  # 2) Let user pick ?model=1
559
  st.sidebar.write("## Diagram Link Settings")
560
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
561
 
562
- # 3) We'll parse adjacency from DEFAULT_MERMAID, then do the injection for base URL
563
- # and possible model param. We'll store the final mermaid code in session.
564
  lines = DEFAULT_MERMAID.strip().split("\n")
565
  new_lines = []
566
  for line in lines:
567
- if "click " in line and '"/?' in line:
568
- # try to parse out the URL
569
- parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+"([^"]+)"\s+(\S+)', line)
570
- # For example:
571
- # 'click U "/?q=U" "Open 'User 😎'" _blank'
572
- # might become:
573
- # parts = [prefix, '/?q=U', "Open 'User 😎'", '_blank', '']
574
- if len(parts) >= 4:
575
- # Reassemble with base URL + optional model param
576
- old_url = parts[1]
577
- tooltip = parts[2]
578
- target = parts[3]
579
-
580
- # 1) base
581
  new_url = inject_base_url(old_url)
582
- # 2) model param
583
  new_url = append_model_param(new_url, model_selected)
584
-
585
- # Rebuild the line
586
- new_line = f"{parts[0]}\"{new_url}\" \"{tooltip}\" {target}"
587
- # If there's a remainder (parts[4]) it might be an empty string
588
- if len(parts) > 4:
589
- new_line += parts[4]
590
  new_lines.append(new_line)
591
  else:
592
  new_lines.append(line)
@@ -594,64 +493,25 @@ def main():
594
  new_lines.append(line)
595
 
596
  final_mermaid = "\n".join(new_lines)
597
- adjacency = parse_mermaid_edges(final_mermaid)
598
-
599
- # 4) If user clicked a shape => we show a partial subgraph as "SearchResult"
600
- partial_subgraph_html = ""
601
- if q_or_query:
602
- # Special-case if user clicked "User" => q=U => we know the node is "U((User 😎))"
603
- if q_or_query == "U":
604
- chosen_node = "U((User 😎))"
605
- st.info(f"process_text called with: {PromptPrefix}{q_or_query} => forcing node U((User 😎))")
606
- else:
607
- st.info(f"process_text called with: {PromptPrefix}{q_or_query}")
608
- # Attempt to find a node whose ID or label includes q_or_query:
609
- possible_keys = []
610
- for nodeKey in adjacency.keys():
611
- # e.g. nodeKey might be 'U((User 😎))'
612
- simplified_key = nodeKey.replace("\\n", " ").replace("[", "").replace("]", "").lower()
613
- simplified_query = q_or_query.lower().replace("%20", " ")
614
- if simplified_query in simplified_key:
615
- possible_keys.append(nodeKey)
616
-
617
- if possible_keys:
618
- chosen_node = possible_keys[0]
619
- else:
620
- chosen_node = None
621
- st.warning("No adjacency node matched the query param's text. Subgraph is empty.")
622
-
623
- if chosen_node:
624
- # BFS subgraph for chosen_node with depth=1
625
- sub_edges = bfs_subgraph(adjacency, chosen_node, depth=1)
626
- sub_mermaid = create_subgraph_mermaid(sub_edges, chosen_node)
627
- partial_subgraph_html = generate_mermaid_html(sub_mermaid)
628
-
629
- # 5) Show partial subgraph top-center if we have any
630
- if partial_subgraph_html:
631
- st.subheader("SearchResult Subgraph")
632
- components.html(partial_subgraph_html, height=300, scrolling=False)
633
-
634
- # 6) Render the top-centered *full* diagram
635
- st.title("Full Mermaid Diagram (with Base URL + model=1 logic)")
636
 
 
 
637
  diagram_html = generate_mermaid_html(final_mermaid)
638
  components.html(diagram_html, height=400, scrolling=True)
639
 
640
- # 7) Editor columns: Markdown & Mermaid
641
  left_col, right_col = st.columns(2)
642
 
643
  with left_col:
644
  st.subheader("Markdown Side 📝")
645
  if "markdown_text" not in st.session_state:
646
  st.session_state["markdown_text"] = "## Hello!\nYou can type some *Markdown* here.\n"
647
- markdown_text = st.text_area(
648
- "Edit Markdown:",
649
- value=st.session_state["markdown_text"],
650
- height=300
651
- )
652
  st.session_state["markdown_text"] = markdown_text
653
 
654
- # Buttons
655
  colA, colB = st.columns(2)
656
  with colA:
657
  if st.button("🔄 Refresh Markdown"):
@@ -667,15 +527,14 @@ def main():
667
 
668
  with right_col:
669
  st.subheader("Mermaid Side 🧜‍♂️")
 
670
  if "current_mermaid" not in st.session_state:
671
  st.session_state["current_mermaid"] = final_mermaid
672
 
673
- # Let user see the final code we built
674
- mermaid_input = st.text_area(
675
- "Edit Mermaid Code:",
676
- value=st.session_state["current_mermaid"],
677
- height=300
678
- )
679
  colC, colD = st.columns(2)
680
  with colC:
681
  if st.button("🎨 Refresh Diagram"):
@@ -691,27 +550,20 @@ def main():
691
  st.markdown("**Mermaid Source:**")
692
  st.code(mermaid_input, language="python", line_numbers=True)
693
 
694
- # 8) Show the galleries
695
  st.markdown("---")
696
  st.header("Media Galleries")
 
697
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
698
  display_images_and_wikipedia_summaries(num_columns_images)
699
 
700
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
701
  display_videos_and_links(num_columns_video)
702
 
703
- # 9) Possibly show extended text interface
704
- showExtendedTextInterface = False
705
- if showExtendedTextInterface:
706
- # e.g. display_glossary_grid(roleplaying_glossary)
707
- # num_columns_text = st.slider("Choose Number of Text Columns", 1, 15, 4)
708
- # display_buttons_with_scores(num_columns_text)
709
- pass
710
-
711
- # 10) Render the file sidebar
712
  FileSidebar()
713
 
714
- # 11) Random title at bottom
715
  titles = [
716
  "🧠🎭 Semantic Symphonies & Episodic Encores",
717
  "🌌🎼 AI Rhythms of Memory Lane",
@@ -724,6 +576,7 @@ def main():
724
  ]
725
  st.markdown(f"**{random.choice(titles)}**")
726
 
 
727
 
728
  if __name__ == "__main__":
729
  main()
 
15
  import streamlit as st
16
  import streamlit.components.v1 as components
17
 
18
+ # huggingface_hub usage if you do model inference
19
+ #from huggingface_hub import InferenceClient
20
 
21
+ ########################################################################################
22
+ # 1) GLOBAL CONFIG & PLACEHOLDERS
23
+ ########################################################################################
24
  BASE_URL = "https://huggingface.co/spaces/awacke1/MermaidMarkdownDiagramEditor"
25
 
26
  PromptPrefix = "AI-Search: "
 
42
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
43
  }
44
 
45
+ ########################################################################################
46
+ # 2) SIMPLE HELPER FUNCS
47
+ ########################################################################################
48
 
49
  def process_text(text):
50
  """🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
51
  st.write(f"process_text called with: {text}")
52
 
 
53
  def search_arxiv(text):
54
  """🔭 search_arxiv: pretend to search ArXiv, just prints debug for now."""
55
  st.write(f"search_arxiv called with: {text}")
56
 
 
57
  def SpeechSynthesis(text):
58
+ """🗣 SpeechSynthesis: read lines out loud? For demo, we just log them."""
59
  st.write(f"SpeechSynthesis called with: {text}")
60
 
 
61
  def process_image(image_file, prompt):
62
  """📷 process_image: imagine an AI pipeline for images, here we just log."""
63
  return f"[process_image placeholder] {image_file} => {prompt}"
64
 
 
65
  def process_video(video_file, seconds_per_frame):
66
  """🎞 process_video: placeholder for video tasks, logs to Streamlit."""
67
  st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
68
 
 
69
  API_URL = "https://huggingface-inference-endpoint-placeholder"
70
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
71
 
 
72
  @st.cache_resource
73
  def InferenceLLM(prompt):
74
  """🔮 InferenceLLM: a stub returning a mock response for 'prompt'."""
75
  return f"[InferenceLLM placeholder response to prompt: {prompt}]"
76
 
77
+ ########################################################################################
78
+ # 3) GLOSSARY & FILE UTILITY
79
+ ########################################################################################
80
 
 
 
 
81
  @st.cache_resource
82
  def display_glossary_entity(k):
83
  """
 
85
  Each link might point to /?q=..., /?q=<prefix>..., or external sites.
86
  """
87
  search_urls = {
88
+ "🚀🌌ArXiv": lambda x: f"/?q={quote(x)}",
89
+ "🃏Analyst": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix)}",
90
+ "📚PyCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix2)}",
91
+ "🔬JSCoder": lambda x: f"/?q={quote(x)}-{quote(PromptPrefix3)}",
92
+ "📖": lambda x: f"https://en.wikipedia.org/wiki/{quote(x)}",
93
+ "🔍": lambda x: f"https://www.google.com/search?q={quote(x)}",
94
+ "🔎": lambda x: f"https://www.bing.com/search?q={quote(x)}",
95
+ "🎥": lambda x: f"https://www.youtube.com/results?search_query={quote(x)}",
96
+ "🐦": lambda x: f"https://twitter.com/search?q={quote(x)}",
97
  }
98
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
99
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
100
 
 
101
  def display_content_or_image(query):
102
  """
103
  If 'query' is in transhuman_glossary or there's an image matching 'images/<query>.png',
 
116
  st.warning("No matching content or image found.")
117
  return False
118
 
 
119
  def clear_query_params():
120
+ """If you want to truly remove them, do a redirect or st.experimental_set_query_params()."""
121
  st.warning("Define a redirect or link without query params if you want to truly clear them.")
122
 
123
+ ########################################################################################
124
+ # 4) FILE-HANDLING (MD files, etc.)
125
+ ########################################################################################
126
 
 
 
 
127
  def load_file(file_path):
128
  """Load file contents as UTF-8 text, or return empty on error."""
129
  try:
 
132
  except:
133
  return ""
134
 
 
135
  @st.cache_resource
136
  def create_zip_of_files(files):
137
  """Combine multiple local files into a single .zip for user to download."""
 
141
  zipf.write(file)
142
  return zip_name
143
 
 
144
  @st.cache_resource
145
  def get_zip_download_link(zip_file):
146
  """Return an <a> link to download the given zip_file (base64-encoded)."""
 
149
  b64 = base64.b64encode(data).decode()
150
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
151
 
 
152
  def get_table_download_link(file_path):
153
  """
154
  Creates a download link for a single file from your snippet.
 
161
  file_name = os.path.basename(file_path)
162
  ext = os.path.splitext(file_name)[1]
163
  mime_map = {
164
+ '.txt': 'text/plain',
165
+ '.py': 'text/plain',
166
  '.xlsx': 'text/plain',
167
+ '.csv': 'text/plain',
168
+ '.htm': 'text/html',
169
+ '.md': 'text/markdown',
170
+ '.wav': 'audio/wav'
171
  }
172
  mime_type = mime_map.get(ext, 'application/octet-stream')
173
  return f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
174
  except:
175
  return ''
176
 
 
177
  def get_file_size(file_path):
178
  """Get file size in bytes."""
179
  return os.path.getsize(file_path)
180
 
 
181
  def FileSidebar():
182
  """
183
  Renders .md files in the sidebar with open/view/run/delete logic.
184
  """
185
  all_files = glob.glob("*.md")
 
186
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
 
187
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
188
 
 
189
  Files1, Files2 = st.sidebar.columns(2)
190
  with Files1:
191
  if st.button("🗑 Delete All"):
 
201
  file_name = ''
202
  next_action = ''
203
 
 
204
  for file in all_files:
205
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
206
  with col1:
207
+ if st.button("🌐", key="md_"+file):
208
  file_contents = load_file(file)
209
  file_name = file
210
  next_action = 'md'
 
212
  with col2:
213
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
214
  with col3:
215
+ if st.button("📂", key="open_"+file):
216
  file_contents = load_file(file)
217
  file_name = file
218
  next_action = 'open'
 
221
  st.session_state['filetext'] = file_contents
222
  st.session_state['next_action'] = next_action
223
  with col4:
224
+ if st.button("▶️", key="read_"+file):
225
  file_contents = load_file(file)
226
  file_name = file
227
  next_action = 'search'
228
  st.session_state['next_action'] = next_action
229
  with col5:
230
+ if st.button("🗑", key="delete_"+file):
231
  os.remove(file)
232
  st.rerun()
233
 
 
237
  with open1:
238
  file_name_input = st.text_input('File Name:', file_name, key='file_name_input')
239
  file_content_area = st.text_area('File Contents:', file_contents, height=300, key='file_content_area')
 
240
  if st.button('💾 Save File'):
241
  with open(file_name_input, 'w', encoding='utf-8') as f:
242
  f.write(file_content_area)
 
255
  if st.button("🔍Run"):
256
  st.write("Running GPT logic placeholder...")
257
 
258
+ ########################################################################################
259
+ # 5) SCORING / GLOSSARIES
260
+ ########################################################################################
 
261
  score_dir = "scores"
262
  os.makedirs(score_dir, exist_ok=True)
263
 
 
264
  def generate_key(label, header, idx):
265
  return f"{header}_{label}_{idx}_key"
266
 
 
267
  def update_score(key, increment=1):
268
  """Increment the 'score' for a glossary item in JSON storage."""
269
  score_file = os.path.join(score_dir, f"{key}.json")
 
278
  json.dump(score_data, file)
279
  return score_data["score"]
280
 
 
281
  def load_score(key):
282
  """Load the stored score from .json if it exists, else 0."""
283
  file_path = os.path.join(score_dir, f"{key}.json")
 
287
  return score_data["score"]
288
  return 0
289
 
 
290
  def display_buttons_with_scores(num_columns_text):
291
  """
292
  Show glossary items as clickable buttons, each increments a 'score'.
293
  """
294
  game_emojis = {
295
  "Dungeons and Dragons": "🐉",
296
+ "Call of Cthulhu": "🐙",
297
+ "GURPS": "🎲",
298
+ "Pathfinder": "🗺️",
299
+ "Kindred of the East": "🌅",
300
+ "Changeling": "🍃",
301
  }
302
  topic_emojis = {
303
+ "Core Rulebooks": "📚",
304
+ "Maps & Settings": "🗺️",
305
+ "Game Mechanics & Tools": "⚙️",
306
+ "Monsters & Adversaries": "👹",
307
+ "Campaigns & Adventures": "📜",
308
+ "Creatives & Assets": "🎨",
309
+ "Game Master Resources": "🛠️",
310
+ "Lore & Background": "📖",
311
+ "Character Development": "🧍",
312
+ "Homebrew Content": "🔧",
313
+ "General Topics": "🌍",
314
  }
315
 
316
  for category, games in roleplaying_glossary.items():
 
325
  newscore = update_score(key.replace('?', ''))
326
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
327
 
328
+ ########################################################################################
329
+ # 6) IMAGES & VIDEOS
330
+ ########################################################################################
331
 
 
 
 
332
  def display_images_and_wikipedia_summaries(num_columns=4):
333
  """Display .png images in a grid, referencing the name as a 'keyword'."""
334
  image_files = [f for f in os.listdir('.') if f.endswith('.png')]
 
339
  image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
340
  cols = st.columns(num_columns)
341
  col_index = 0
 
342
  for image_file in image_files_sorted:
343
  with cols[col_index % num_columns]:
344
  try:
 
354
  st.write(f"Could not open {image_file}")
355
  col_index += 1
356
 
 
357
  def display_videos_and_links(num_columns=4):
358
  """Displays all .mp4/.webm in a grid, plus text input for prompts."""
359
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
 
364
  video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
365
  cols = st.columns(num_columns)
366
  col_index = 0
 
367
  for video_file in video_files_sorted:
368
  with cols[col_index % num_columns]:
369
  k = video_file.split('.')[0]
 
378
  st.error("Invalid input for seconds per frame!")
379
  col_index += 1
380
 
381
+ ########################################################################################
382
+ # 7) MERMAID
383
+ ########################################################################################
384
 
 
 
 
385
  def generate_mermaid_html(mermaid_code: str) -> str:
386
  """Embed mermaid_code in a minimal HTML snippet, centered."""
387
  return f"""
 
410
  </html>
411
  """
412
 
 
413
  def append_model_param(url: str, model_selected: bool) -> str:
414
+ """If user checks 'model=1', we append &model=1 or ?model=1 if not present."""
415
  if not model_selected:
416
  return url
417
  delimiter = "&" if "?" in url else "?"
418
  return f"{url}{delimiter}model=1"
419
 
 
420
  def inject_base_url(url: str) -> str:
421
  """If link doesn't start with 'http', prepend BASE_URL so it's absolute."""
422
  if url.startswith("http"):
423
  return url
424
  return f"{BASE_URL}{url}"
425
 
426
+ ########################################################################################
427
+ # 8) NEW MERMAID CODE - NO DOUBLE QUOTES, USE UPPER/LOWER MERGING
428
+ ########################################################################################
429
+
430
+ # The user wants no double quotes, plus a middle word like OpenUser
431
+ # Example usage:
432
+ # click U /?q=U OpenUser _blank
433
+ # which might cause syntax issues in older Mermaid, but we'll do it anyway.
434
 
 
435
  DEFAULT_MERMAID = r"""
436
  flowchart LR
437
+ U((User😎)) -- Talk🗣 --> LLM[LLMAgent🤖\nExtractInfo]
438
+ click U /?q=U OpenUser _blank
439
+ click LLM /?q=LLM%20Agent%20ExtractInfo OpenLLM _blank
440
 
441
+ LLM -- Query🔍 --> HS[HybridSearch🔎\nVectorNERLexical]
442
+ click HS /?q=HybridSearchVectorNERLexical LaunchSearch _blank
443
 
444
+ HS -- Reason🤔 --> RE[ReasoningEngine🛠️\nNeuralNetworkMedical]
445
+ click RE /?q=ReasoningEngineNeuralNetworkMedical BrainTool _blank
446
 
447
+ RE -- Link📡 --> KG((KnowledgeGraph📚\nOntologyGAR RAG))
448
+ click KG /?q=KnowledgeGraphOntologyGARRAG ShowKG _blank
449
  """
450
 
451
+ ########################################################################################
452
+ # 9) MAIN UI
453
+ ########################################################################################
454
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
  def main():
456
+ st.set_page_config(page_title="Mermaid with No Double Quotes", layout="wide")
457
 
458
  # 1) Query param parsing
459
  query_params = st.query_params
460
+ query = query_params.get('q', [""])[0].strip()
461
+ if query:
462
+ st.write("process_text called with:", PromptPrefix + query)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463
 
464
  # 2) Let user pick ?model=1
465
  st.sidebar.write("## Diagram Link Settings")
466
  model_selected = st.sidebar.checkbox("Append ?model=1 to each link?")
467
 
468
+ # 3) We'll do a minimal injection: if line starts with click and has /?q=, we add base & model
 
469
  lines = DEFAULT_MERMAID.strip().split("\n")
470
  new_lines = []
471
  for line in lines:
472
+ # e.g.: click U /?q=U OpenUser _blank
473
+ if line.strip().startswith("click ") and "/?q=" in line:
474
+ parts = line.split()
475
+ # Example: ["click","U","/?q=U","OpenUser","_blank"]
476
+ if len(parts) >= 5:
477
+ node_name = parts[1] # e.g. U
478
+ old_url = parts[2] # e.g. /?q=U
479
+ middle = parts[3] # e.g. OpenUser
480
+ target = parts[4] # e.g. _blank
481
+
482
+ # inject base
 
 
 
483
  new_url = inject_base_url(old_url)
484
+ # append model if chosen
485
  new_url = append_model_param(new_url, model_selected)
486
+ # reassemble line
487
+ # e.g. "click U <BASEURL> OpenUser _blank"
488
+ new_line = f"click {node_name} {new_url} {middle} {target}"
 
 
 
489
  new_lines.append(new_line)
490
  else:
491
  new_lines.append(line)
 
493
  new_lines.append(line)
494
 
495
  final_mermaid = "\n".join(new_lines)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496
 
497
+ # 4) Render the top-centered diagram
498
+ st.title("Mermaid Diagram - No Double Quotes & 'OpenUser'-style Links")
499
  diagram_html = generate_mermaid_html(final_mermaid)
500
  components.html(diagram_html, height=400, scrolling=True)
501
 
502
+ # 5) Two-column interface: Markdown & (final) Mermaid
503
  left_col, right_col = st.columns(2)
504
 
505
  with left_col:
506
  st.subheader("Markdown Side 📝")
507
  if "markdown_text" not in st.session_state:
508
  st.session_state["markdown_text"] = "## Hello!\nYou can type some *Markdown* here.\n"
509
+
510
+ markdown_text = st.text_area("Edit Markdown:",
511
+ value=st.session_state["markdown_text"],
512
+ height=300)
 
513
  st.session_state["markdown_text"] = markdown_text
514
 
 
515
  colA, colB = st.columns(2)
516
  with colA:
517
  if st.button("🔄 Refresh Markdown"):
 
527
 
528
  with right_col:
529
  st.subheader("Mermaid Side 🧜‍♂️")
530
+
531
  if "current_mermaid" not in st.session_state:
532
  st.session_state["current_mermaid"] = final_mermaid
533
 
534
+ mermaid_input = st.text_area("Edit Mermaid Code:",
535
+ value=st.session_state["current_mermaid"],
536
+ height=300)
537
+
 
 
538
  colC, colD = st.columns(2)
539
  with colC:
540
  if st.button("🎨 Refresh Diagram"):
 
550
  st.markdown("**Mermaid Source:**")
551
  st.code(mermaid_input, language="python", line_numbers=True)
552
 
553
+ # 6) Media Galleries
554
  st.markdown("---")
555
  st.header("Media Galleries")
556
+
557
  num_columns_images = st.slider("Choose Number of Image Columns", 1, 15, 5, key="num_columns_images")
558
  display_images_and_wikipedia_summaries(num_columns_images)
559
 
560
  num_columns_video = st.slider("Choose Number of Video Columns", 1, 15, 5, key="num_columns_video")
561
  display_videos_and_links(num_columns_video)
562
 
563
+ # 7) File Sidebar
 
 
 
 
 
 
 
 
564
  FileSidebar()
565
 
566
+ # 8) Random Title at bottom
567
  titles = [
568
  "🧠🎭 Semantic Symphonies & Episodic Encores",
569
  "🌌🎼 AI Rhythms of Memory Lane",
 
576
  ]
577
  st.markdown(f"**{random.choice(titles)}**")
578
 
579
+ # End of main
580
 
581
  if __name__ == "__main__":
582
  main()