awacke1 commited on
Commit
ad82dc6
·
verified ·
1 Parent(s): 5a8ea54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -40
app.py CHANGED
@@ -42,29 +42,36 @@ transhuman_glossary = {
42
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
43
  }
44
 
 
45
  def process_text(text):
46
  """🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
47
  st.write(f"process_text called with: {text}")
48
 
 
49
  def search_arxiv(text):
50
  """🔭 search_arxiv: pretend to search ArXiv, just prints debug for now."""
51
  st.write(f"search_arxiv called with: {text}")
52
 
 
53
  def SpeechSynthesis(text):
54
  """🗣 SpeechSynthesis: read lines out loud? Here, we log them for demonstration."""
55
  st.write(f"SpeechSynthesis called with: {text}")
56
 
 
57
  def process_image(image_file, prompt):
58
  """📷 process_image: imagine an AI pipeline for images, here we just log."""
59
  return f"[process_image placeholder] {image_file} => {prompt}"
60
 
 
61
  def process_video(video_file, seconds_per_frame):
62
  """🎞 process_video: placeholder for video tasks, logs to Streamlit."""
63
  st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
64
 
 
65
  API_URL = "https://huggingface-inference-endpoint-placeholder"
66
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
67
 
 
68
  @st.cache_resource
69
  def InferenceLLM(prompt):
70
  """🔮 InferenceLLM: a stub returning a mock response for 'prompt'."""
@@ -77,7 +84,7 @@ def InferenceLLM(prompt):
77
  @st.cache_resource
78
  def display_glossary_entity(k):
79
  """
80
- Creates multiple link emojis for a single entity.
81
  Each link might point to /?q=..., /?q=<prefix>..., or external sites.
82
  """
83
  search_urls = {
@@ -94,6 +101,7 @@ def display_glossary_entity(k):
94
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
95
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
96
 
 
97
  def display_content_or_image(query):
98
  """
99
  If 'query' is in transhuman_glossary or there's an image matching 'images/<query>.png',
@@ -112,6 +120,7 @@ def display_content_or_image(query):
112
  st.warning("No matching content or image found.")
113
  return False
114
 
 
115
  def clear_query_params():
116
  """For fully clearing, you'd do a redirect or st.experimental_set_query_params()."""
117
  st.warning("Define a redirect or link without query params if you want to truly clear them.")
@@ -128,6 +137,7 @@ def load_file(file_path):
128
  except:
129
  return ""
130
 
 
131
  @st.cache_resource
132
  def create_zip_of_files(files):
133
  """Combine multiple local files into a single .zip for user to download."""
@@ -137,6 +147,7 @@ def create_zip_of_files(files):
137
  zipf.write(file)
138
  return zip_name
139
 
 
140
  @st.cache_resource
141
  def get_zip_download_link(zip_file):
142
  """Return an <a> link to download the given zip_file (base64-encoded)."""
@@ -145,6 +156,7 @@ def get_zip_download_link(zip_file):
145
  b64 = base64.b64encode(data).decode()
146
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
147
 
 
148
  def get_table_download_link(file_path):
149
  """
150
  Creates a download link for a single file from your snippet.
@@ -170,10 +182,12 @@ def get_table_download_link(file_path):
170
  except:
171
  return ''
172
 
 
173
  def get_file_size(file_path):
174
  """Get file size in bytes."""
175
  return os.path.getsize(file_path)
176
 
 
177
  def FileSidebar():
178
  """
179
  Renders .md files in the sidebar with open/view/run/delete logic.
@@ -181,6 +195,7 @@ def FileSidebar():
181
  all_files = glob.glob("*.md")
182
  # If you want to filter out short-named or special files:
183
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
 
184
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
185
 
186
  # Buttons for "Delete All" and "Download"
@@ -201,9 +216,9 @@ def FileSidebar():
201
 
202
  # Each file row
203
  for file in all_files:
204
- col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1])
205
  with col1:
206
- if st.button("🌐", key="md_"+file):
207
  file_contents = load_file(file)
208
  file_name = file
209
  next_action = 'md'
@@ -211,7 +226,7 @@ def FileSidebar():
211
  with col2:
212
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
213
  with col3:
214
- if st.button("📂", key="open_"+file):
215
  file_contents = load_file(file)
216
  file_name = file
217
  next_action = 'open'
@@ -220,13 +235,13 @@ def FileSidebar():
220
  st.session_state['filetext'] = file_contents
221
  st.session_state['next_action'] = next_action
222
  with col4:
223
- if st.button("▶️", key="read_"+file):
224
  file_contents = load_file(file)
225
  file_name = file
226
  next_action = 'search'
227
  st.session_state['next_action'] = next_action
228
  with col5:
229
- if st.button("🗑", key="delete_"+file):
230
  os.remove(file)
231
  st.rerun()
232
 
@@ -255,15 +270,18 @@ def FileSidebar():
255
  if st.button("🔍Run"):
256
  st.write("Running GPT logic placeholder...")
257
 
 
258
  # =====================================================================================
259
  # 4) SCORING / GLOSSARIES
260
  # =====================================================================================
261
  score_dir = "scores"
262
  os.makedirs(score_dir, exist_ok=True)
263
 
 
264
  def generate_key(label, header, idx):
265
  return f"{header}_{label}_{idx}_key"
266
 
 
267
  def update_score(key, increment=1):
268
  """Increment the 'score' for a glossary item in JSON storage."""
269
  score_file = os.path.join(score_dir, f"{key}.json")
@@ -278,6 +296,7 @@ def update_score(key, increment=1):
278
  json.dump(score_data, file)
279
  return score_data["score"]
280
 
 
281
  def load_score(key):
282
  """Load the stored score from .json if it exists, else 0."""
283
  file_path = os.path.join(score_dir, f"{key}.json")
@@ -287,6 +306,7 @@ def load_score(key):
287
  return score_data["score"]
288
  return 0
289
 
 
290
  def display_buttons_with_scores(num_columns_text):
291
  """
292
  Show glossary items as clickable buttons, each increments a 'score'.
@@ -325,6 +345,7 @@ def display_buttons_with_scores(num_columns_text):
325
  newscore = update_score(key.replace('?', ''))
326
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
327
 
 
328
  # =====================================================================================
329
  # 5) IMAGES & VIDEOS
330
  # =====================================================================================
@@ -354,6 +375,7 @@ def display_images_and_wikipedia_summaries(num_columns=4):
354
  st.write(f"Could not open {image_file}")
355
  col_index += 1
356
 
 
357
  def display_videos_and_links(num_columns=4):
358
  """Displays all .mp4/.webm in a grid, plus text input for prompts."""
359
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
@@ -379,6 +401,7 @@ def display_videos_and_links(num_columns=4):
379
  st.error("Invalid input for seconds per frame!")
380
  col_index += 1
381
 
 
382
  # =====================================================================================
383
  # 6) MERMAID & PARTIAL SUBGRAPH LOGIC
384
  # =====================================================================================
@@ -410,6 +433,7 @@ def generate_mermaid_html(mermaid_code: str) -> str:
410
  </html>
411
  """
412
 
 
413
  def append_model_param(url: str, model_selected: bool) -> str:
414
  """If user selects 'model=1', we append &model=1 or ?model=1 if not present."""
415
  if not model_selected:
@@ -417,17 +441,19 @@ def append_model_param(url: str, model_selected: bool) -> str:
417
  delimiter = "&" if "?" in url else "?"
418
  return f"{url}{delimiter}model=1"
419
 
 
420
  def inject_base_url(url: str) -> str:
421
  """If link doesn't start with 'http', prepend BASE_URL so it's absolute."""
422
  if url.startswith("http"):
423
  return url
424
  return f"{BASE_URL}{url}"
425
 
 
426
  # We'll keep the default mermaid that references /?q=...
427
  DEFAULT_MERMAID = r"""
428
  flowchart LR
429
  U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
430
- click U "/?q=User%20😎" "Open 'User 😎'" _blank
431
  click LLM "/?q=LLM%20Agent%20Extract%20Info" "Open LLM Agent" _blank
432
 
433
  LLM -- "Query 🔍" --> HS[Hybrid Search 🔎\nVector+NER+Lexical]
@@ -440,10 +466,7 @@ flowchart LR
440
  click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" "Open Knowledge Graph" _blank
441
  """
442
 
443
- # ------------------------------------------------------------------------------------
444
- # 🍁 Parsing and building partial subgraphs from lines like "A -- Label --> B"
445
- # We'll do BFS so we can gather multiple downstream levels if we want.
446
- # ------------------------------------------------------------------------------------
447
  def parse_mermaid_edges(mermaid_text: str):
448
  """
449
  🍿 parse_mermaid_edges:
@@ -462,12 +485,12 @@ def parse_mermaid_edges(mermaid_text: str):
462
  adjacency[nodeA].append((label, nodeB))
463
  return adjacency
464
 
 
465
  def bfs_subgraph(adjacency, start_node, depth=1):
466
  """
467
  🍎 bfs_subgraph:
468
  - Gather edges up to 'depth' levels from start_node
469
  - If depth=1, only direct edges from node
470
- - If depth=2, child and grandchild, etc.
471
  """
472
  from collections import deque
473
  visited = set()
@@ -487,13 +510,19 @@ def bfs_subgraph(adjacency, start_node, depth=1):
487
 
488
  return edges
489
 
 
490
  def create_subgraph_mermaid(sub_edges, start_node):
491
  """
492
  🍄 create_subgraph_mermaid:
493
  - build a smaller flowchart snippet with edges from BFS
494
  """
495
  sub_mermaid = "flowchart LR\n"
496
- sub_mermaid += f" %% SearchResult Subgraph starting at {start_node}\n"
 
 
 
 
 
497
  for (A, label, B) in sub_edges:
498
  sub_mermaid += f' {A} -- "{label}" --> {B}\n'
499
  sub_mermaid += " %% End of partial subgraph\n"
@@ -537,19 +566,27 @@ def main():
537
  for line in lines:
538
  if "click " in line and '"/?' in line:
539
  # try to parse out the URL
540
- parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+"([^"]+)"\s+"([^"]+)"', line)
541
- # For example: parts might be [prefix, '/?q=User%20😎', 'Open User 😎', '_blank', remainder?]
542
- if len(parts) == 5:
 
 
 
543
  # Reassemble with base URL + optional model param
544
  old_url = parts[1]
545
  tooltip = parts[2]
546
  target = parts[3]
 
547
  # 1) base
548
  new_url = inject_base_url(old_url)
549
  # 2) model param
550
  new_url = append_model_param(new_url, model_selected)
551
 
552
- new_line = f"{parts[0]}\"{new_url}\" \"{tooltip}\" \"{target}\"{parts[4]}"
 
 
 
 
553
  new_lines.append(new_line)
554
  else:
555
  new_lines.append(line)
@@ -559,32 +596,35 @@ def main():
559
  final_mermaid = "\n".join(new_lines)
560
  adjacency = parse_mermaid_edges(final_mermaid)
561
 
562
- # 4) If user clicked a shape -> we show a partial subgraph as "SearchResult"
563
- # We'll do BFS with depth=1 or 2 for demonstration:
564
  partial_subgraph_html = ""
565
  if q_or_query:
566
- st.info(f"process_text called with: {PromptPrefix}{q_or_query}")
567
- # Attempt to find a node whose ID or label includes q_or_query:
568
- # This may require advanced logic if your IDs differ from labels.
569
- # We'll do a naive approach: if q_or_query is substring of adjacency keys.
570
- possible_keys = []
571
- for nodeKey in adjacency.keys():
572
- # e.g. nodeKey might be: 'LLM[LLM Agent 🤖\nExtract Info]'
573
- # we'll check if q_or_query is substring ignoring spaces
574
- simplified_key = nodeKey.replace("\\n", " ").replace("[", "").replace("]", "").lower()
575
- simplified_query = q_or_query.lower().replace("%20", " ")
576
- if simplified_query in simplified_key:
577
- possible_keys.append(nodeKey)
578
-
579
- if possible_keys:
580
- chosen_node = possible_keys[0]
581
- st.info(f"Chosen node for subgraph: {chosen_node}")
582
- sub_edges = bfs_subgraph(adjacency, chosen_node, depth=1)
583
- if sub_edges:
584
- sub_mermaid = create_subgraph_mermaid(sub_edges, chosen_node)
585
- partial_subgraph_html = generate_mermaid_html(sub_mermaid)
586
  else:
587
- st.warning("No adjacency node matched the query param's text. Subgraph is empty.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
 
589
  # 5) Show partial subgraph top-center if we have any
590
  if partial_subgraph_html:
@@ -593,6 +633,7 @@ def main():
593
 
594
  # 6) Render the top-centered *full* diagram
595
  st.title("Full Mermaid Diagram (with Base URL + model=1 logic)")
 
596
  diagram_html = generate_mermaid_html(final_mermaid)
597
  components.html(diagram_html, height=400, scrolling=True)
598
 
 
42
  "Cybernetics": ["Robotic Limbs", "Augmented Eyes"],
43
  }
44
 
45
+
46
  def process_text(text):
47
  """🕵️ process_text: detective style—prints lines to Streamlit for debugging."""
48
  st.write(f"process_text called with: {text}")
49
 
50
+
51
  def search_arxiv(text):
52
  """🔭 search_arxiv: pretend to search ArXiv, just prints debug for now."""
53
  st.write(f"search_arxiv called with: {text}")
54
 
55
+
56
  def SpeechSynthesis(text):
57
  """🗣 SpeechSynthesis: read lines out loud? Here, we log them for demonstration."""
58
  st.write(f"SpeechSynthesis called with: {text}")
59
 
60
+
61
  def process_image(image_file, prompt):
62
  """📷 process_image: imagine an AI pipeline for images, here we just log."""
63
  return f"[process_image placeholder] {image_file} => {prompt}"
64
 
65
+
66
  def process_video(video_file, seconds_per_frame):
67
  """🎞 process_video: placeholder for video tasks, logs to Streamlit."""
68
  st.write(f"[process_video placeholder] {video_file}, {seconds_per_frame} sec/frame")
69
 
70
+
71
  API_URL = "https://huggingface-inference-endpoint-placeholder"
72
  API_KEY = "hf_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
73
 
74
+
75
  @st.cache_resource
76
  def InferenceLLM(prompt):
77
  """🔮 InferenceLLM: a stub returning a mock response for 'prompt'."""
 
84
  @st.cache_resource
85
  def display_glossary_entity(k):
86
  """
87
+ Creates multiple link emojis for a single entity.
88
  Each link might point to /?q=..., /?q=<prefix>..., or external sites.
89
  """
90
  search_urls = {
 
101
  links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
102
  st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
103
 
104
+
105
  def display_content_or_image(query):
106
  """
107
  If 'query' is in transhuman_glossary or there's an image matching 'images/<query>.png',
 
120
  st.warning("No matching content or image found.")
121
  return False
122
 
123
+
124
  def clear_query_params():
125
  """For fully clearing, you'd do a redirect or st.experimental_set_query_params()."""
126
  st.warning("Define a redirect or link without query params if you want to truly clear them.")
 
137
  except:
138
  return ""
139
 
140
+
141
  @st.cache_resource
142
  def create_zip_of_files(files):
143
  """Combine multiple local files into a single .zip for user to download."""
 
147
  zipf.write(file)
148
  return zip_name
149
 
150
+
151
  @st.cache_resource
152
  def get_zip_download_link(zip_file):
153
  """Return an <a> link to download the given zip_file (base64-encoded)."""
 
156
  b64 = base64.b64encode(data).decode()
157
  return f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
158
 
159
+
160
  def get_table_download_link(file_path):
161
  """
162
  Creates a download link for a single file from your snippet.
 
182
  except:
183
  return ''
184
 
185
+
186
  def get_file_size(file_path):
187
  """Get file size in bytes."""
188
  return os.path.getsize(file_path)
189
 
190
+
191
  def FileSidebar():
192
  """
193
  Renders .md files in the sidebar with open/view/run/delete logic.
 
195
  all_files = glob.glob("*.md")
196
  # If you want to filter out short-named or special files:
197
  all_files = [f for f in all_files if len(os.path.splitext(f)[0]) >= 5]
198
+ # sorting in place
199
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)
200
 
201
  # Buttons for "Delete All" and "Download"
 
216
 
217
  # Each file row
218
  for file in all_files:
219
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1, 6, 1, 1, 1])
220
  with col1:
221
+ if st.button("🌐", key="md_" + file):
222
  file_contents = load_file(file)
223
  file_name = file
224
  next_action = 'md'
 
226
  with col2:
227
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
228
  with col3:
229
+ if st.button("📂", key="open_" + file):
230
  file_contents = load_file(file)
231
  file_name = file
232
  next_action = 'open'
 
235
  st.session_state['filetext'] = file_contents
236
  st.session_state['next_action'] = next_action
237
  with col4:
238
+ if st.button("▶️", key="read_" + file):
239
  file_contents = load_file(file)
240
  file_name = file
241
  next_action = 'search'
242
  st.session_state['next_action'] = next_action
243
  with col5:
244
+ if st.button("🗑", key="delete_" + file):
245
  os.remove(file)
246
  st.rerun()
247
 
 
270
  if st.button("🔍Run"):
271
  st.write("Running GPT logic placeholder...")
272
 
273
+
274
  # =====================================================================================
275
  # 4) SCORING / GLOSSARIES
276
  # =====================================================================================
277
  score_dir = "scores"
278
  os.makedirs(score_dir, exist_ok=True)
279
 
280
+
281
  def generate_key(label, header, idx):
282
  return f"{header}_{label}_{idx}_key"
283
 
284
+
285
  def update_score(key, increment=1):
286
  """Increment the 'score' for a glossary item in JSON storage."""
287
  score_file = os.path.join(score_dir, f"{key}.json")
 
296
  json.dump(score_data, file)
297
  return score_data["score"]
298
 
299
+
300
  def load_score(key):
301
  """Load the stored score from .json if it exists, else 0."""
302
  file_path = os.path.join(score_dir, f"{key}.json")
 
306
  return score_data["score"]
307
  return 0
308
 
309
+
310
  def display_buttons_with_scores(num_columns_text):
311
  """
312
  Show glossary items as clickable buttons, each increments a 'score'.
 
345
  newscore = update_score(key.replace('?', ''))
346
  st.markdown(f"Scored **{category} - {game} - {term}** -> {newscore}")
347
 
348
+
349
  # =====================================================================================
350
  # 5) IMAGES & VIDEOS
351
  # =====================================================================================
 
375
  st.write(f"Could not open {image_file}")
376
  col_index += 1
377
 
378
+
379
  def display_videos_and_links(num_columns=4):
380
  """Displays all .mp4/.webm in a grid, plus text input for prompts."""
381
  video_files = [f for f in os.listdir('.') if f.endswith(('.mp4', '.webm'))]
 
401
  st.error("Invalid input for seconds per frame!")
402
  col_index += 1
403
 
404
+
405
  # =====================================================================================
406
  # 6) MERMAID & PARTIAL SUBGRAPH LOGIC
407
  # =====================================================================================
 
433
  </html>
434
  """
435
 
436
+
437
  def append_model_param(url: str, model_selected: bool) -> str:
438
  """If user selects 'model=1', we append &model=1 or ?model=1 if not present."""
439
  if not model_selected:
 
441
  delimiter = "&" if "?" in url else "?"
442
  return f"{url}{delimiter}model=1"
443
 
444
+
445
  def inject_base_url(url: str) -> str:
446
  """If link doesn't start with 'http', prepend BASE_URL so it's absolute."""
447
  if url.startswith("http"):
448
  return url
449
  return f"{BASE_URL}{url}"
450
 
451
+
452
  # We'll keep the default mermaid that references /?q=...
453
  DEFAULT_MERMAID = r"""
454
  flowchart LR
455
  U((User 😎)) -- "Talk 🗣️" --> LLM[LLM Agent 🤖\nExtract Info]
456
+ click U "/?q=U" "Open 'User 😎'" _blank
457
  click LLM "/?q=LLM%20Agent%20Extract%20Info" "Open LLM Agent" _blank
458
 
459
  LLM -- "Query 🔍" --> HS[Hybrid Search 🔎\nVector+NER+Lexical]
 
466
  click KG "/?q=Knowledge%20Graph%20Ontology+GAR+RAG" "Open Knowledge Graph" _blank
467
  """
468
 
469
+
 
 
 
470
  def parse_mermaid_edges(mermaid_text: str):
471
  """
472
  🍿 parse_mermaid_edges:
 
485
  adjacency[nodeA].append((label, nodeB))
486
  return adjacency
487
 
488
+
489
  def bfs_subgraph(adjacency, start_node, depth=1):
490
  """
491
  🍎 bfs_subgraph:
492
  - Gather edges up to 'depth' levels from start_node
493
  - If depth=1, only direct edges from node
 
494
  """
495
  from collections import deque
496
  visited = set()
 
510
 
511
  return edges
512
 
513
+
514
  def create_subgraph_mermaid(sub_edges, start_node):
515
  """
516
  🍄 create_subgraph_mermaid:
517
  - build a smaller flowchart snippet with edges from BFS
518
  """
519
  sub_mermaid = "flowchart LR\n"
520
+ sub_mermaid += f" %% SearchResult Subgraph for {start_node}\n"
521
+ if not sub_edges:
522
+ # If no edges, show just the node
523
+ sub_mermaid += f" {start_node}\n"
524
+ sub_mermaid += " %% End of partial subgraph\n"
525
+ return sub_mermaid
526
  for (A, label, B) in sub_edges:
527
  sub_mermaid += f' {A} -- "{label}" --> {B}\n'
528
  sub_mermaid += " %% End of partial subgraph\n"
 
566
  for line in lines:
567
  if "click " in line and '"/?' in line:
568
  # try to parse out the URL
569
+ parts = re.split(r'click\s+\S+\s+"([^"]+)"\s+"([^"]+)"\s+(\S+)', line)
570
+ # For example:
571
+ # 'click U "/?q=U" "Open 'User 😎'" _blank'
572
+ # might become:
573
+ # parts = [prefix, '/?q=U', "Open 'User 😎'", '_blank', '']
574
+ if len(parts) >= 4:
575
  # Reassemble with base URL + optional model param
576
  old_url = parts[1]
577
  tooltip = parts[2]
578
  target = parts[3]
579
+
580
  # 1) base
581
  new_url = inject_base_url(old_url)
582
  # 2) model param
583
  new_url = append_model_param(new_url, model_selected)
584
 
585
+ # Rebuild the line
586
+ new_line = f"{parts[0]}\"{new_url}\" \"{tooltip}\" {target}"
587
+ # If there's a remainder (parts[4]) it might be an empty string
588
+ if len(parts) > 4:
589
+ new_line += parts[4]
590
  new_lines.append(new_line)
591
  else:
592
  new_lines.append(line)
 
596
  final_mermaid = "\n".join(new_lines)
597
  adjacency = parse_mermaid_edges(final_mermaid)
598
 
599
+ # 4) If user clicked a shape => we show a partial subgraph as "SearchResult"
 
600
  partial_subgraph_html = ""
601
  if q_or_query:
602
+ # Special-case if user clicked "User" => q=U => we know the node is "U((User 😎))"
603
+ if q_or_query == "U":
604
+ chosen_node = "U((User 😎))"
605
+ st.info(f"process_text called with: {PromptPrefix}{q_or_query} => forcing node U((User 😎))")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
  else:
607
+ st.info(f"process_text called with: {PromptPrefix}{q_or_query}")
608
+ # Attempt to find a node whose ID or label includes q_or_query:
609
+ possible_keys = []
610
+ for nodeKey in adjacency.keys():
611
+ # e.g. nodeKey might be 'U((User 😎))'
612
+ simplified_key = nodeKey.replace("\\n", " ").replace("[", "").replace("]", "").lower()
613
+ simplified_query = q_or_query.lower().replace("%20", " ")
614
+ if simplified_query in simplified_key:
615
+ possible_keys.append(nodeKey)
616
+
617
+ if possible_keys:
618
+ chosen_node = possible_keys[0]
619
+ else:
620
+ chosen_node = None
621
+ st.warning("No adjacency node matched the query param's text. Subgraph is empty.")
622
+
623
+ if chosen_node:
624
+ # BFS subgraph for chosen_node with depth=1
625
+ sub_edges = bfs_subgraph(adjacency, chosen_node, depth=1)
626
+ sub_mermaid = create_subgraph_mermaid(sub_edges, chosen_node)
627
+ partial_subgraph_html = generate_mermaid_html(sub_mermaid)
628
 
629
  # 5) Show partial subgraph top-center if we have any
630
  if partial_subgraph_html:
 
633
 
634
  # 6) Render the top-centered *full* diagram
635
  st.title("Full Mermaid Diagram (with Base URL + model=1 logic)")
636
+
637
  diagram_html = generate_mermaid_html(final_mermaid)
638
  components.html(diagram_html, height=400, scrolling=True)
639