Meet Patel commited on
Commit
9a6c98c
·
1 Parent(s): a806ca2

Refactor app.py to enhance concept graph visualization with improved error handling and response parsing. Integrate synchronous wrapper for async loading of concept graphs, update Gradio interface for better user experience, and streamline concept details display. Update concept graph tools to support LLM-driven generation with fallback mechanisms for concept retrieval.

Browse files
Files changed (2) hide show
  1. app.py +315 -97
  2. mcp_server/tools/concept_graph_tools.py +262 -27
app.py CHANGED
@@ -3,19 +3,25 @@ Gradio web interface for the TutorX MCP Server with SSE support
3
  """
4
 
5
  import os
6
- import gradio as gr
7
- import numpy as np
8
  import json
9
-
10
- from datetime import datetime
11
  import asyncio
12
- import aiohttp
13
- import sseclient
14
  import requests
 
 
 
 
 
 
 
 
 
15
 
16
- # Import MCP SSE client context managers
17
- from mcp import ClientSession
18
  from mcp.client.sse import sse_client
 
 
19
 
20
  # Server configuration
21
  SERVER_URL = "http://localhost:8000/sse" # Ensure this is the SSE endpoint
@@ -23,88 +29,270 @@ SERVER_URL = "http://localhost:8000/sse" # Ensure this is the SSE endpoint
23
  # Utility functions
24
 
25
 
26
- async def load_concept_graph(concept_id: str = None):
27
  """
28
  Load and visualize the concept graph for a given concept ID.
29
  If no concept_id is provided, returns the first available concept.
30
- Uses call_resource for concept graph retrieval (not a tool).
31
 
 
 
 
32
  Returns:
33
  tuple: (figure, concept_details, related_concepts) or (None, error_dict, [])
34
  """
 
 
35
  try:
36
- print(f"[DEBUG] Loading concept graph for concept_id: {concept_id}")
37
  async with sse_client(SERVER_URL) as (sse, write):
38
  async with ClientSession(sse, write) as session:
39
  await session.initialize()
40
- result = await session.call_tool("get_concept_graph_tool", {"concept_id": concept_id} if concept_id else {})
41
- print(f"[DEBUG] Server response: {result}")
42
- if not result or not isinstance(result, dict):
43
- error_msg = "Invalid server response"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  print(f"[ERROR] {error_msg}")
45
  return None, {"error": error_msg}, []
46
- if "error" in result:
47
- print(f"[ERROR] Server returned error: {result['error']}")
48
- return None, {"error": result["error"]}, []
49
- if "concepts" in result and not concept_id:
50
- if not result["concepts"]:
51
- error_msg = "No concepts available"
52
- print(f"[ERROR] {error_msg}")
53
- return None, {"error": error_msg}, []
54
- concept = result["concepts"][0]
55
- print(f"[DEBUG] Using first concept from list: {concept.get('id')}")
56
- else:
57
- concept = result.get("concept", result)
58
- print(f"[DEBUG] Using direct concept: {concept.get('id')}")
59
- if not isinstance(concept, dict) or not concept.get('id'):
60
- error_msg = "Invalid concept data structure"
61
- print(f"[ERROR] {error_msg}: {concept}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  return None, {"error": error_msg}, []
63
- import matplotlib.pyplot as plt
64
- import networkx as nx
 
 
 
 
 
 
65
  G = nx.DiGraph()
66
- G.add_node(concept["id"], label=concept["name"], type="concept")
67
- related_concepts = []
68
- if "related" in concept:
69
- for rel_id in concept["related"]:
70
- rel_result = await session.call_tool("get_concept_graph_tool", {"concept_id": rel_id})
71
- if "error" not in rel_result:
72
- rel_concept = rel_result.get("concept", {})
73
- G.add_node(rel_id, label=rel_concept.get("name", rel_id), type="related")
74
- G.add_edge(concept["id"], rel_id, relationship="related_to")
75
- related_concepts.append([rel_id, rel_concept.get("name", ""), rel_concept.get("description", "")])
76
- if "prerequisites" in concept:
77
- for prereq_id in concept["prerequisites"]:
78
- prereq_result = await session.call_tool("get_concept_graph_tool", {"concept_id": prereq_id})
79
- if "error" not in prereq_result:
80
- prereq_concept = prereq_result.get("concept", {})
81
- G.add_node(prereq_id, label=prereq_concept.get("name", prereq_id), type="prerequisite")
82
- G.add_edge(prereq_id, concept["id"], relationship="prerequisite_for")
83
- plt.figure(figsize=(10, 8))
84
- pos = nx.spring_layout(G)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  node_colors = []
86
- for node in G.nodes():
87
- if G.nodes[node].get("type") == "concept":
88
- node_colors.append("lightblue")
89
- elif G.nodes[node].get("type") == "prerequisite":
90
- node_colors.append("lightcoral")
91
- else:
92
- node_colors.append("lightgreen")
93
- nx.draw_networkx_nodes(G, pos, node_size=2000, node_color=node_colors, alpha=0.8)
94
- nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
95
- labels = {node: G.nodes[node].get("label", node) for node in G.nodes()}
96
- nx.draw_networkx_labels(G, pos, labels, font_size=10, font_weight="bold")
97
- edge_labels = {(u, v): d["relationship"] for u, v, d in G.edges(data=True)}
98
- nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
99
- plt.title(f"Concept Graph: {concept.get('name', concept_id)}")
100
- plt.axis("off")
101
- concept_details = concept
102
- return plt.gcf(), concept_details, related_concepts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  except Exception as e:
104
  import traceback
105
- traceback.print_exc()
 
106
  return None, {"error": f"Failed to load concept graph: {str(e)}"}, []
107
-
 
 
 
 
 
 
 
 
 
 
 
108
  # Create Gradio interface
109
  with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
110
  gr.Markdown("# 📚 TutorX Educational AI Platform")
@@ -122,43 +310,73 @@ with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
122
  with gr.Tab("Core Features"):
123
  with gr.Blocks() as concept_graph_tab:
124
  gr.Markdown("## Concept Graph Visualization")
 
 
125
  with gr.Row():
 
126
  with gr.Column(scale=3):
127
- # Change from dropdown to textbox for concept input
128
- concept_input_box = gr.Textbox(
129
- label="Enter Concept Name",
130
- placeholder="e.g., python, functions, oop, data_structures",
131
- lines=1,
132
- interactive=True
133
- )
134
- load_concept_btn = gr.Button("Load Concept Graph", variant="primary")
135
 
136
  # Concept details
137
- concept_details = gr.JSON(label="Concept Details")
 
 
 
 
138
 
139
- # Related concepts
140
- related_concepts = gr.Dataframe(
141
- headers=["ID", "Name", "Description"],
142
- datatype=["str", "str", "str"],
143
- label="Related Concepts"
144
- )
 
 
 
 
145
 
146
  # Graph visualization
147
  with gr.Column(scale=7):
148
- graph_output = gr.Plot(label="Concept Graph")
 
 
 
 
149
 
150
- # Button click handler
151
- load_concept_btn.click(
152
- fn=load_concept_graph,
153
- inputs=[concept_input_box],
154
- outputs=[graph_output, concept_details, related_concepts]
155
  )
156
 
157
- # Load default concept on tab click
158
- concept_graph_tab.load(
159
- fn=load_concept_graph,
160
- inputs=[concept_input_box],
161
- outputs=[graph_output, concept_details, related_concepts]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  )
163
 
164
  gr.Markdown("## Assessment Generation")
 
3
  """
4
 
5
  import os
 
 
6
  import json
 
 
7
  import asyncio
8
+ import gradio as gr
9
+ from typing import Optional, Dict, Any, List, Union, Tuple
10
  import requests
11
+ import tempfile
12
+ import base64
13
+ import re
14
+ import networkx as nx
15
+ import matplotlib
16
+ import matplotlib.pyplot as plt
17
+
18
+ # Set matplotlib to use 'Agg' backend to avoid GUI issues in Gradio
19
+ matplotlib.use('Agg')
20
 
21
+ # Import MCP client components
 
22
  from mcp.client.sse import sse_client
23
+ from mcp.client.session import ClientSession
24
+ from mcp.types import TextContent, CallToolResult
25
 
26
  # Server configuration
27
  SERVER_URL = "http://localhost:8000/sse" # Ensure this is the SSE endpoint
 
29
  # Utility functions
30
 
31
 
32
+ async def load_concept_graph(concept_id: str = None) -> Tuple[Optional[plt.Figure], Dict, List]:
33
  """
34
  Load and visualize the concept graph for a given concept ID.
35
  If no concept_id is provided, returns the first available concept.
 
36
 
37
+ Args:
38
+ concept_id: The ID or name of the concept to load
39
+
40
  Returns:
41
  tuple: (figure, concept_details, related_concepts) or (None, error_dict, [])
42
  """
43
+ print(f"[DEBUG] Loading concept graph for concept_id: {concept_id}")
44
+
45
  try:
 
46
  async with sse_client(SERVER_URL) as (sse, write):
47
  async with ClientSession(sse, write) as session:
48
  await session.initialize()
49
+
50
+ # Call the concept graph tool
51
+ result = await session.call_tool(
52
+ "get_concept_graph_tool",
53
+ {"concept_id": concept_id} if concept_id else {}
54
+ )
55
+ print(f"[DEBUG] Raw tool response type: {type(result)}")
56
+
57
+ # Extract content if it's a TextContent object
58
+ if hasattr(result, 'content') and isinstance(result.content, list):
59
+ for item in result.content:
60
+ if hasattr(item, 'text') and item.text:
61
+ try:
62
+ result = json.loads(item.text)
63
+ print("[DEBUG] Successfully parsed JSON from TextContent")
64
+ break
65
+ except json.JSONDecodeError as e:
66
+ print(f"[ERROR] Failed to parse JSON from TextContent: {e}")
67
+
68
+ # If result is a string, try to parse it as JSON
69
+ if isinstance(result, str):
70
+ try:
71
+ result = json.loads(result)
72
+ except json.JSONDecodeError as e:
73
+ print(f"[ERROR] Failed to parse result as JSON: {e}")
74
+ return None, {"error": f"Failed to parse concept graph data: {str(e)}"}, []
75
+
76
+ # Debug print for the raw backend response
77
+ print(f"[DEBUG] Raw backend response: {result}")
78
+
79
+ # Handle backend error response
80
+ if isinstance(result, dict) and "error" in result:
81
+ error_msg = f"Backend error: {result['error']}"
82
  print(f"[ERROR] {error_msg}")
83
  return None, {"error": error_msg}, []
84
+
85
+ concept = None
86
+
87
+ # Handle different response formats
88
+ if isinstance(result, dict):
89
+ # Case 1: Direct concept object
90
+ if "id" in result or "name" in result:
91
+ concept = result
92
+ # Case 2: Response with 'concepts' list
93
+ elif "concepts" in result:
94
+ if result["concepts"]:
95
+ concept = result["concepts"][0] if not concept_id else None
96
+ # Try to find the requested concept by ID or name
97
+ if concept_id:
98
+ for c in result["concepts"]:
99
+ if (isinstance(c, dict) and
100
+ (c.get("id") == concept_id or
101
+ str(c.get("name", "")).lower() == concept_id.lower())):
102
+ concept = c
103
+ break
104
+ if not concept:
105
+ error_msg = f"Concept '{concept_id}' not found in the concept graph"
106
+ print(f"[ERROR] {error_msg}")
107
+ return None, {"error": error_msg}, []
108
+ else:
109
+ error_msg = "No concepts found in the concept graph"
110
+ print(f"[ERROR] {error_msg}")
111
+ return None, {"error": error_msg}, []
112
+
113
+ # If we still don't have a valid concept
114
+ if not concept or not isinstance(concept, dict):
115
+ error_msg = "Could not extract valid concept data from response"
116
+ print(f"[ERROR] {error_msg}")
117
  return None, {"error": error_msg}, []
118
+
119
+ # Ensure required fields exist with defaults
120
+ concept.setdefault('related_concepts', [])
121
+ concept.setdefault('prerequisites', [])
122
+
123
+ print(f"[DEBUG] Final concept data: {concept}")
124
+
125
+ # Create a new directed graph
126
  G = nx.DiGraph()
127
+
128
+ # Add the main concept node
129
+ main_node_id = concept["id"]
130
+ G.add_node(main_node_id,
131
+ label=concept["name"],
132
+ type="main",
133
+ description=concept["description"])
134
+
135
+ # Add related concepts and edges
136
+ all_related = []
137
+
138
+ # Process related concepts
139
+ for rel in concept.get('related_concepts', []):
140
+ if isinstance(rel, dict):
141
+ rel_id = rel.get('id', str(hash(str(rel.get('name', '')))))
142
+ rel_name = rel.get('name', 'Unnamed')
143
+ rel_desc = rel.get('description', 'Related concept')
144
+
145
+ G.add_node(rel_id,
146
+ label=rel_name,
147
+ type="related",
148
+ description=rel_desc)
149
+ G.add_edge(main_node_id, rel_id, type="related_to")
150
+
151
+ all_related.append(["Related", rel_name, rel_desc])
152
+
153
+ # Process prerequisites
154
+ for prereq in concept.get('prerequisites', []):
155
+ if isinstance(prereq, dict):
156
+ prereq_id = prereq.get('id', str(hash(str(prereq.get('name', '')))))
157
+ prereq_name = f"[Prerequisite] {prereq.get('name', 'Unnamed')}"
158
+ prereq_desc = prereq.get('description', 'Prerequisite concept')
159
+
160
+ G.add_node(prereq_id,
161
+ label=prereq_name,
162
+ type="prerequisite",
163
+ description=prereq_desc)
164
+ G.add_edge(prereq_id, main_node_id, type="prerequisite_for")
165
+
166
+ all_related.append(["Prerequisite", prereq_name, prereq_desc])
167
+
168
+ # Create the plot
169
+ plt.figure(figsize=(14, 10))
170
+
171
+ # Calculate node positions using spring layout
172
+ pos = nx.spring_layout(G, k=0.5, iterations=50, seed=42)
173
+
174
+ # Define node colors and sizes based on type
175
  node_colors = []
176
+ node_sizes = []
177
+ for node, data in G.nodes(data=True):
178
+ if data.get('type') == 'main':
179
+ node_colors.append('#4e79a7') # Blue for main concept
180
+ node_sizes.append(1500)
181
+ elif data.get('type') == 'prerequisite':
182
+ node_colors.append('#59a14f') # Green for prerequisites
183
+ node_sizes.append(1000)
184
+ else: # related
185
+ node_colors.append('#e15759') # Red for related concepts
186
+ node_sizes.append(1000)
187
+
188
+ # Draw nodes
189
+ nx.draw_networkx_nodes(
190
+ G, pos,
191
+ node_color=node_colors,
192
+ node_size=node_sizes,
193
+ alpha=0.9,
194
+ edgecolors='white',
195
+ linewidths=2
196
+ )
197
+
198
+ # Draw edges with different styles for different relationships
199
+ related_edges = [(u, v) for u, v, d in G.edges(data=True)
200
+ if d.get('type') == 'related_to']
201
+ prereq_edges = [(u, v) for u, v, d in G.edges(data=True)
202
+ if d.get('type') == 'prerequisite_for']
203
+
204
+ # Draw related edges
205
+ nx.draw_networkx_edges(
206
+ G, pos,
207
+ edgelist=related_edges,
208
+ width=1.5,
209
+ alpha=0.7,
210
+ edge_color="#e15759",
211
+ style="solid",
212
+ arrowsize=15,
213
+ arrowstyle='-|>',
214
+ connectionstyle='arc3,rad=0.1'
215
+ )
216
+
217
+ # Draw prerequisite edges
218
+ nx.draw_networkx_edges(
219
+ G, pos,
220
+ edgelist=prereq_edges,
221
+ width=1.5,
222
+ alpha=0.7,
223
+ edge_color="#59a14f",
224
+ style="dashed",
225
+ arrowsize=15,
226
+ arrowstyle='-|>',
227
+ connectionstyle='arc3,rad=0.1'
228
+ )
229
+
230
+ # Draw node labels with white background for better readability
231
+ node_labels = {node: data["label"]
232
+ for node, data in G.nodes(data=True)
233
+ if "label" in data}
234
+
235
+ nx.draw_networkx_labels(
236
+ G, pos,
237
+ labels=node_labels,
238
+ font_size=10,
239
+ font_weight="bold",
240
+ font_family="sans-serif",
241
+ bbox=dict(
242
+ facecolor="white",
243
+ edgecolor='none',
244
+ alpha=0.8,
245
+ boxstyle='round,pad=0.3',
246
+ linewidth=0
247
+ )
248
+ )
249
+
250
+ # Add a legend
251
+ import matplotlib.patches as mpatches
252
+ legend_elements = [
253
+ mpatches.Patch(facecolor='#4e79a7', label='Main Concept', alpha=0.9),
254
+ mpatches.Patch(facecolor='#e15759', label='Related Concept', alpha=0.9),
255
+ mpatches.Patch(facecolor='#59a14f', label='Prerequisite', alpha=0.9)
256
+ ]
257
+
258
+ plt.legend(
259
+ handles=legend_elements,
260
+ loc='upper right',
261
+ bbox_to_anchor=(1.0, 1.0),
262
+ frameon=True,
263
+ framealpha=0.9
264
+ )
265
+
266
+ plt.axis('off')
267
+ plt.tight_layout()
268
+
269
+ # Create concept details dictionary
270
+ concept_details = {
271
+ 'name': concept['name'],
272
+ 'id': concept['id'],
273
+ 'description': concept['description']
274
+ }
275
+
276
+ # Return the figure, concept details, and related concepts
277
+ return plt.gcf(), concept_details, all_related
278
+
279
  except Exception as e:
280
  import traceback
281
+ error_msg = f"Error in load_concept_graph: {str(e)}\n\n{traceback.format_exc()}"
282
+ print(f"[ERROR] {error_msg}")
283
  return None, {"error": f"Failed to load concept graph: {str(e)}"}, []
284
+
285
+ def sync_load_concept_graph(concept_id):
286
+ """Synchronous wrapper for async load_concept_graph, always returns 3 outputs."""
287
+ try:
288
+ result = asyncio.run(load_concept_graph(concept_id))
289
+ if result and len(result) == 3:
290
+ return result
291
+ else:
292
+ return None, {"error": "Unexpected result format"}, []
293
+ except Exception as e:
294
+ return None, {"error": str(e)}, []
295
+
296
  # Create Gradio interface
297
  with gr.Blocks(title="TutorX Educational AI", theme=gr.themes.Soft()) as demo:
298
  gr.Markdown("# 📚 TutorX Educational AI Platform")
 
310
  with gr.Tab("Core Features"):
311
  with gr.Blocks() as concept_graph_tab:
312
  gr.Markdown("## Concept Graph Visualization")
313
+ gr.Markdown("Explore relationships between educational concepts through an interactive graph visualization.")
314
+
315
  with gr.Row():
316
+ # Left panel for controls and details
317
  with gr.Column(scale=3):
318
+ with gr.Row():
319
+ concept_input = gr.Textbox(
320
+ label="Enter Concept",
321
+ placeholder="e.g., machine_learning, calculus, quantum_physics",
322
+ value="machine_learning",
323
+ scale=4
324
+ )
325
+ load_btn = gr.Button("Load Graph", variant="primary", scale=1)
326
 
327
  # Concept details
328
+ with gr.Accordion("Concept Details", open=True):
329
+ concept_details = gr.JSON(
330
+ label=None,
331
+ show_label=False
332
+ )
333
 
334
+ # Related concepts and prerequisites
335
+ with gr.Accordion("Related Concepts & Prerequisites", open=True):
336
+ related_concepts = gr.Dataframe(
337
+ headers=["Type", "Name", "Description"],
338
+ datatype=["str", "str", "str"],
339
+ interactive=False,
340
+ wrap=True,
341
+ # max_height=300, # Fixed height with scroll in Gradio 5.x
342
+ # overflow_row_behaviour="paginate"
343
+ )
344
 
345
  # Graph visualization
346
  with gr.Column(scale=7):
347
+ graph_plot = gr.Plot(
348
+ label="Concept Graph",
349
+ show_label=True,
350
+ container=True
351
+ )
352
 
353
+ # Event handlers
354
+ load_btn.click(
355
+ fn=sync_load_concept_graph,
356
+ inputs=[concept_input],
357
+ outputs=[graph_plot, concept_details, related_concepts]
358
  )
359
 
360
+ # Load initial graph
361
+ demo.load(
362
+ fn=lambda: sync_load_concept_graph("machine_learning"),
363
+ outputs=[graph_plot, concept_details, related_concepts]
364
+ )
365
+ # Help text and examples
366
+ with gr.Row():
367
+ gr.Markdown("""
368
+ **Examples to try:**
369
+ - `machine_learning`
370
+ - `neural_networks`
371
+ - `calculus`
372
+ - `quantum_physics`
373
+ """)
374
+
375
+ # Error display (leave in UI, but not wired up)
376
+ error_output = gr.Textbox(
377
+ label="Error Messages",
378
+ visible=False,
379
+ interactive=False
380
  )
381
 
382
  gr.Markdown("## Assessment Generation")
mcp_server/tools/concept_graph_tools.py CHANGED
@@ -6,15 +6,7 @@ import sys
6
  import os
7
  from pathlib import Path
8
  import json
9
-
10
- # Add the parent directory to the Python path
11
- current_dir = Path(__file__).parent
12
- parent_dir = current_dir.parent.parent
13
- sys.path.insert(0, str(parent_dir))
14
-
15
- import sys
16
- import os
17
- from pathlib import Path
18
 
19
  # Add the parent directory to the Python path
20
  current_dir = Path(__file__).parent
@@ -30,25 +22,268 @@ from mcp_server.model.gemini_flash import GeminiFlash
30
 
31
  MODEL = GeminiFlash()
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  @mcp.tool()
34
- async def get_concept_graph_tool(concept_id: Optional[str] = None) -> dict:
35
  """
36
- Get the complete concept graph or a specific concept, fully LLM-driven.
37
- For a specific concept, use Gemini to generate a JSON object with explanation, related concepts, prerequisites, and summary.
38
- For the full graph, use Gemini to generate a JSON object with a list of all concepts and their relationships.
 
 
 
 
 
39
  """
40
- if concept_id:
41
- prompt = (
42
- f"Provide a JSON object for the concept '{concept_id}' with fields: explanation (string), related_concepts (list of strings), prerequisites (list of strings), and summary (string)."
43
- )
44
- else:
45
- prompt = (
46
- "Provide a JSON object with a list of all concepts in a knowledge graph. "
47
- "Each concept should have fields: id, name, description, related_concepts (list), prerequisites (list)."
48
- )
49
- llm_response = await MODEL.generate_text(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  try:
51
- data = json.loads(llm_response)
52
- except Exception:
53
- data = {"llm_raw": llm_response, "error": "Failed to parse LLM output as JSON"}
54
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import os
7
  from pathlib import Path
8
  import json
9
+ import re
 
 
 
 
 
 
 
 
10
 
11
  # Add the parent directory to the Python path
12
  current_dir = Path(__file__).parent
 
22
 
23
  MODEL = GeminiFlash()
24
 
25
+
26
+ USER_PROMPT_TEMPLATE = """You are an expert educational content creator and knowledge graph expert that helps create detailed concept graphs for educational purposes.
27
+ Your task is to generate a comprehensive concept graph for a given topic, including related concepts and prerequisites.
28
+
29
+ IMPORTANT: Output only valid JSON. Do not include any explanatory text before or after the JSON. Do not include comments. Do not include trailing commas. Double-check that your output is valid JSON and can be parsed by Python's json.loads().
30
+
31
+ Output Format (JSON):
32
+ {{
33
+ "concepts": [
34
+ {{
35
+ "id": "unique_concept_identifier",
36
+ "name": "Concept Name",
37
+ "description": "Clear and concise description of the concept",
38
+ "related_concepts": [
39
+ {{
40
+ "id": "related_concept_id",
41
+ "name": "Related Concept Name",
42
+ "description": "Brief description of the relationship"
43
+ }}
44
+ ],
45
+ "prerequisites": [
46
+ {{
47
+ "id": "prerequisite_id",
48
+ "name": "Prerequisite Concept Name",
49
+ "description": "Why this is a prerequisite"
50
+ }}
51
+ ]
52
+ }}
53
+ ]
54
+ }}
55
+
56
+ Guidelines:
57
+ 1. Keep concept IDs lowercase with underscores (snake_case)
58
+ 2. Include 1 related concepts and 1 prerequisites per concept
59
+ 3. Ensure descriptions are educational and concise
60
+ 4. Maintain consistency in the knowledge domain
61
+ 5. Include fundamental concepts even if not directly mentioned
62
+
63
+ Generate a detailed concept graph for: {concept}
64
+
65
+ Focus on {domain} concepts and provide a comprehensive graph with related concepts and prerequisites.
66
+ Include both broad and specific concepts relevant to this topic.
67
+
68
+ Remember: Return only valid JSON, no additional text. Do not include trailing commas. Do not include comments. Double-check your output is valid JSON."""
69
+
70
+ # Sample concept graph as fallback
71
+ SAMPLE_CONCEPT_GRAPH = {
72
+ "concepts": [
73
+ {
74
+ "id": "machine_learning",
75
+ "name": "Machine Learning",
76
+ "description": "A branch of artificial intelligence that focuses on algorithms that can learn from and make predictions on data",
77
+ "related_concepts": [
78
+ {
79
+ "id": "artificial_intelligence",
80
+ "name": "Artificial Intelligence",
81
+ "description": "The broader field that encompasses machine learning"
82
+ },
83
+ {
84
+ "id": "deep_learning",
85
+ "name": "Deep Learning",
86
+ "description": "A subset of machine learning using neural networks"
87
+ }
88
+ ],
89
+ "prerequisites": [
90
+ {
91
+ "id": "statistics",
92
+ "name": "Statistics",
93
+ "description": "Understanding of statistical concepts is fundamental"
94
+ }
95
+ ]
96
+ }
97
+ ]
98
+ }
99
+
100
+ def clean_json_trailing_commas(json_text: str) -> str:
101
+ # Remove trailing commas before } or ]
102
+ return re.sub(r',([ \t\r\n]*[}}\]])', r'\1', json_text)
103
+
104
+ def extract_json_from_text(text: str) -> Optional[dict]:
105
+ if not text or not isinstance(text, str):
106
+ return None
107
+
108
+ try:
109
+ # Remove all code fences (``` or ```json) at the start/end, with optional whitespace
110
+ text = re.sub(r'^\s*```(?:json)?\s*', '', text, flags=re.IGNORECASE)
111
+ text = re.sub(r'\s*```\s*$', '', text, flags=re.IGNORECASE)
112
+ text = text.strip()
113
+
114
+ print(f"[DEBUG] LLM output ends with: {text[-500:]}")
115
+
116
+ # Remove trailing commas
117
+ cleaned = clean_json_trailing_commas(text)
118
+
119
+ # Parse JSON
120
+ return json.loads(cleaned)
121
+ except Exception as e:
122
+ print(f"[DEBUG] Failed JSON extraction: {e}")
123
+ return None
124
+
125
+
126
+ async def generate_text(prompt: str, temperature: float = 0.7):
127
+ """Generate text using the configured model."""
128
+ try:
129
+ print(f"[DEBUG] Calling MODEL.generate_text with prompt length: {len(prompt)}")
130
+ print(f"[DEBUG] MODEL type: {type(MODEL)}")
131
+
132
+ # Check if the model has the expected method
133
+ if not hasattr(MODEL, 'generate_text'):
134
+ print(f"[DEBUG] MODEL does not have generate_text method. Available methods: {dir(MODEL)}")
135
+ raise AttributeError("MODEL does not have generate_text method")
136
+
137
+ # This should call your actual model generation method
138
+ # Adjust this based on your GeminiFlash implementation
139
+ response = await MODEL.generate_text(
140
+ prompt=prompt,
141
+ temperature=temperature
142
+ )
143
+ print(f"[DEBUG] generate_text response type: {type(response)}")
144
+ return response
145
+ except Exception as e:
146
+ print(f"[DEBUG] Error in generate_text: {e}")
147
+ print(f"[DEBUG] Error type: {type(e)}")
148
+ raise
149
+
150
+
151
  @mcp.tool()
152
+ async def get_concept_graph_tool(concept_id: Optional[str] = None, domain: str = "computer science") -> dict:
153
  """
154
+ Generate or retrieve a concept graph for a given concept ID or name.
155
+
156
+ Args:
157
+ concept_id: The ID or name of the concept to retrieve
158
+ domain: The knowledge domain (e.g., 'computer science', 'mathematics')
159
+
160
+ Returns:
161
+ dict: A single concept dictionary with keys: id, name, description, related_concepts, prerequisites
162
  """
163
+ print(f"[DEBUG] get_concept_graph_tool called with concept_id: {concept_id}, domain: {domain}")
164
+
165
+ if not concept_id:
166
+ print(f"[DEBUG] No concept_id provided, returning sample concept")
167
+ return SAMPLE_CONCEPT_GRAPH["concepts"][0]
168
+
169
+ # Create a fallback custom concept based on the requested concept_id
170
+ fallback_concept = {
171
+ "id": concept_id.lower().replace(" ", "_"),
172
+ "name": concept_id.title(),
173
+ "description": f"A {domain} concept related to {concept_id}",
174
+ "related_concepts": [
175
+ {
176
+ "id": "related_concept_1",
177
+ "name": "Related Concept 1",
178
+ "description": f"A concept related to {concept_id}"
179
+ },
180
+ {
181
+ "id": "related_concept_2",
182
+ "name": "Related Concept 2",
183
+ "description": f"Another concept related to {concept_id}"
184
+ }
185
+ ],
186
+ "prerequisites": [
187
+ {
188
+ "id": "basic_prerequisite",
189
+ "name": "Basic Prerequisite",
190
+ "description": f"Basic knowledge required for understanding {concept_id}"
191
+ }
192
+ ]
193
+ }
194
+
195
+ # Try LLM generation first, fallback to custom concept if it fails
196
  try:
197
+ print(f"[DEBUG] Attempting LLM generation for: {concept_id} in domain: {domain}")
198
+
199
+ # Generate the concept graph using LLM
200
+ prompt = USER_PROMPT_TEMPLATE.format(concept=concept_id, domain=domain)
201
+ print(f"[DEBUG] Prompt created, length: {len(prompt)}")
202
+
203
+ try:
204
+ # Call the LLM to generate the concept graph
205
+ print(f"[DEBUG] About to call generate_text...")
206
+ response = await generate_text(
207
+ prompt=prompt,
208
+ temperature=0.7
209
+ )
210
+ print(f"[DEBUG] generate_text completed successfully")
211
+
212
+ except Exception as gen_error:
213
+ print(f"[DEBUG] Error in generate_text call: {gen_error}")
214
+ print(f"[DEBUG] Returning fallback concept due to generation error")
215
+ return fallback_concept
216
+
217
+ # Extract and validate the JSON response
218
+ print(f"[DEBUG] Full LLM response object type: {type(response)}")
219
+
220
+ # Handle different response formats
221
+ response_text = None
222
+ try:
223
+ if hasattr(response, 'content'):
224
+ if isinstance(response.content, list) and response.content:
225
+ if hasattr(response.content[0], 'text'):
226
+ response_text = response.content[0].text
227
+ else:
228
+ response_text = str(response.content[0])
229
+ elif isinstance(response.content, str):
230
+ response_text = response.content
231
+ elif hasattr(response, 'text'):
232
+ response_text = response.text
233
+ elif isinstance(response, str):
234
+ response_text = response
235
+ else:
236
+ response_text = str(response)
237
+
238
+ print(f"[DEBUG] Extracted response_text type: {type(response_text)}")
239
+ print(f"[DEBUG] Response text length: {len(response_text) if response_text else 0}")
240
+
241
+ except Exception as extract_error:
242
+ print(f"[DEBUG] Error extracting response text: {extract_error}")
243
+ print(f"[DEBUG] Returning fallback concept due to extraction error")
244
+ return fallback_concept
245
+
246
+ if not response_text:
247
+ print(f"[DEBUG] LLM response is empty, returning fallback concept")
248
+ return fallback_concept
249
+
250
+ print(f"[DEBUG] LLM raw response text (first 200 chars): {response_text}...")
251
+
252
+ try:
253
+ result = extract_json_from_text(response_text)
254
+ print(f"[DEBUG] JSON extraction result: {result is not None}")
255
+ if result:
256
+ print(f"[DEBUG] Extracted JSON keys: {result.keys() if isinstance(result, dict) else 'Not a dict'}")
257
+ except Exception as json_error:
258
+ print(f"[DEBUG] Error in extract_json_from_text: {json_error}")
259
+ print(f"[DEBUG] Returning fallback concept due to JSON extraction error")
260
+ return fallback_concept
261
+
262
+ if not result:
263
+ print(f"[DEBUG] No valid JSON extracted, returning fallback concept")
264
+ return fallback_concept
265
+
266
+ if "concepts" in result and isinstance(result["concepts"], list) and result["concepts"]:
267
+ print(f"[DEBUG] Found {len(result['concepts'])} concepts in LLM response")
268
+ # Find the requested concept or return the first
269
+ for concept in result["concepts"]:
270
+ if (concept.get("id") == concept_id or
271
+ concept.get("name", "").lower() == concept_id.lower()):
272
+ print(f"[DEBUG] Found matching LLM concept: {concept.get('name')}")
273
+ return concept
274
+ # If not found, return the first concept
275
+ first_concept = result["concepts"][0]
276
+ print(f"[DEBUG] Concept not found, returning first LLM concept: {first_concept.get('name')}")
277
+ return first_concept
278
+ else:
279
+ print(f"[DEBUG] LLM JSON does not contain valid 'concepts' list, returning fallback")
280
+ return fallback_concept
281
+
282
+ except Exception as e:
283
+ import traceback
284
+ error_msg = f"Error generating concept graph: {str(e)}"
285
+ print(f"[DEBUG] Exception in get_concept_graph_tool: {error_msg}")
286
+ print(f"[DEBUG] Full traceback: {traceback.format_exc()}")
287
+ # Return fallback concept instead of error
288
+ print(f"[DEBUG] Returning fallback concept due to exception")
289
+ return fallback_concept