openfree commited on
Commit
8e56712
ยท
verified ยท
1 Parent(s): 89fc81b

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ NanumGothic-Regular[[:space:]](1).ttf filter=lfs diff=lfs merge=lfs -text
NanumGothic-Regular (1).ttf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf050025dcce823de644153981ff8b171d5b78d7d0ddd6e3c9f39e814fad3564
3
+ size 2053328
concept_map_generator.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_concept_map(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a concept map from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the concept map structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "Artificial Intelligence (AI)",
18
+ "nodes": [
19
+ {
20
+ "id": "ml_fundamental",
21
+ "label": "Machine Learning",
22
+ "relationship": "is essential for",
23
+ "subnodes": [
24
+ {
25
+ "id": "dl_branch",
26
+ "label": "Deep Learning",
27
+ "relationship": "for example",
28
+ "subnodes": [
29
+ {
30
+ "id": "cnn_example",
31
+ "label": "CNNs",
32
+ "relationship": "for example"
33
+ },
34
+ {
35
+ "id": "rnn_example",
36
+ "label": "RNNs",
37
+ "relationship": "for example"
38
+ }
39
+ ]
40
+ },
41
+ {
42
+ "id": "rl_branch",
43
+ "label": "Reinforcement Learning",
44
+ "relationship": "for example",
45
+ "subnodes": [
46
+ {
47
+ "id": "qlearning_example",
48
+ "label": "Q-Learning",
49
+ "relationship": "example"
50
+ },
51
+ {
52
+ "id": "pg_example",
53
+ "label": "Policy Gradients",
54
+ "relationship": "example"
55
+ }
56
+ ]
57
+ }
58
+ ]
59
+ },
60
+ {
61
+ "id": "ai_types",
62
+ "label": "Types",
63
+ "relationship": "formed by",
64
+ "subnodes": [
65
+ {
66
+ "id": "agi_type",
67
+ "label": "AGI",
68
+ "relationship": "this is",
69
+ "subnodes": [
70
+ {
71
+ "id": "strong_ai",
72
+ "label": "Strong AI",
73
+ "relationship": "provoked by",
74
+ "subnodes": [
75
+ {
76
+ "id": "human_intel",
77
+ "label": "Human-level Intel.",
78
+ "relationship": "of"
79
+ }
80
+ ]
81
+ }
82
+ ]
83
+ },
84
+ {
85
+ "id": "ani_type",
86
+ "label": "ANI",
87
+ "relationship": "this is",
88
+ "subnodes": [
89
+ {
90
+ "id": "weak_ai",
91
+ "label": "Weak AI",
92
+ "relationship": "provoked by",
93
+ "subnodes": [
94
+ {
95
+ "id": "narrow_tasks",
96
+ "label": "Narrow Tasks",
97
+ "relationship": "of"
98
+ }
99
+ ]
100
+ }
101
+ ]
102
+ }
103
+ ]
104
+ },
105
+ {
106
+ "id": "ai_capabilities",
107
+ "label": "Capabilities",
108
+ "relationship": "change",
109
+ "subnodes": [
110
+ {
111
+ "id": "data_proc",
112
+ "label": "Data Processing",
113
+ "relationship": "can",
114
+ "subnodes": [
115
+ {
116
+ "id": "big_data",
117
+ "label": "Big Data",
118
+ "relationship": "as",
119
+ "subnodes": [
120
+ {
121
+ "id": "analysis_example",
122
+ "label": "Data Analysis",
123
+ "relationship": "example"
124
+ },
125
+ {
126
+ "id": "prediction_example",
127
+ "label": "Prediction",
128
+ "relationship": "example"
129
+ }
130
+ ]
131
+ }
132
+ ]
133
+ },
134
+ {
135
+ "id": "decision_making",
136
+ "label": "Decision Making",
137
+ "relationship": "can be",
138
+ "subnodes": [
139
+ {
140
+ "id": "automation",
141
+ "label": "Automation",
142
+ "relationship": "as",
143
+ "subnodes": [
144
+ {
145
+ "id": "robotics_example",
146
+ "label": "Robotics",
147
+ "relationship": "Example"},
148
+ {
149
+ "id": "autonomous_example",
150
+ "label": "Autonomous Vehicles",
151
+ "relationship": "of one"
152
+ }
153
+ ]
154
+ }
155
+ ]
156
+ },
157
+ {
158
+ "id": "problem_solving",
159
+ "label": "Problem Solving",
160
+ "relationship": "can",
161
+ "subnodes": [
162
+ {
163
+ "id": "optimization",
164
+ "label": "Optimization",
165
+ "relationship": "as is",
166
+ "subnodes": [
167
+ {
168
+ "id": "algorithms_example",
169
+ "label": "Algorithms",
170
+ "relationship": "for example"
171
+ }
172
+ ]
173
+ }
174
+ ]
175
+ }
176
+ ]
177
+ }
178
+ ]
179
+ }
180
+
181
+ Returns:
182
+ str: The filepath to the generated PNG image file.
183
+ """
184
+ try:
185
+ if not json_input.strip():
186
+ return "Error: Empty input"
187
+
188
+ data = json.loads(json_input)
189
+
190
+ if 'central_node' not in data or 'nodes' not in data:
191
+ raise ValueError("Missing required fields: central_node or nodes")
192
+
193
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
194
+ # ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ํฐํŠธ ๊ฒฝ๋กœ ๊ฐ€์ ธ์˜ค๊ธฐ
195
+ font_path = os.environ.get('KOREAN_FONT_PATH', '')
196
+
197
+ # Graphviz๋Š” ์‹œ์Šคํ…œ ํฐํŠธ๋ฅผ ์‚ฌ์šฉํ•˜๋ฏ€๋กœ ํฐํŠธ ์ด๋ฆ„์œผ๋กœ ์ง€์ •
198
+ # NanumGothic์ด ์‹œ์Šคํ…œ์— ์„ค์น˜๋˜์–ด ์žˆ์–ด์•ผ ํ•จ
199
+ korean_font = 'NanumGothic'
200
+
201
+ dot = graphviz.Digraph(
202
+ name='ConceptMap',
203
+ format='png',
204
+ graph_attr={
205
+ 'rankdir': 'TB', # Top-to-Bottom layout (vertical hierarchy)
206
+ 'splines': 'ortho', # Straight lines
207
+ 'bgcolor': 'white', # White background
208
+ 'pad': '0.5', # Padding around the graph
209
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํฐํŠธ ์„ค์ •
210
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
211
+ },
212
+ node_attr={
213
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
214
+ },
215
+ edge_attr={
216
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
217
+ }
218
+ )
219
+
220
+ base_color = '#19191a' # Hardcoded base color
221
+
222
+ # Central node styling (rounded box, dark color)
223
+ dot.node(
224
+ 'central',
225
+ data['central_node'],
226
+ shape='box', # Rectangular shape
227
+ style='filled,rounded', # Filled and rounded corners
228
+ fillcolor=base_color, # Darkest color
229
+ fontcolor='white', # White text for dark background
230
+ fontsize='16', # Larger font for central node
231
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
232
+ )
233
+
234
+ # Add child nodes and edges recursively starting from depth 1
235
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
236
+
237
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
238
+ dot.render(tmp.name, format=output_format, cleanup=True)
239
+ return f"{tmp.name}.{output_format}"
240
+
241
+ except json.JSONDecodeError:
242
+ return "Error: Invalid JSON format"
243
+ except Exception as e:
244
+ return f"Error: {str(e)}"
graph_generator_utils.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import os
3
+
4
+ def add_nodes_and_edges(dot: graphviz.Digraph, parent_id: str, nodes_list: list, current_depth: int, base_color: str):
5
+ """
6
+ Recursively adds nodes and edges to a Graphviz Digraph object,
7
+ applying a color gradient and consistent styling.
8
+ Args:
9
+ dot (graphviz.Digraph): The Graphviz Digraph object to modify.
10
+ parent_id (str): The ID of the parent node for the current set of nodes.
11
+ nodes_list (list): A list of dictionaries, each representing a node
12
+ with 'id', 'label', 'relationship', and optional 'subnodes'.
13
+ current_depth (int): The current depth in the graph hierarchy (0 for central node).
14
+ base_color (str): The hexadecimal base color for the deepest nodes.
15
+ """
16
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
17
+ korean_font = 'NanumGothic'
18
+
19
+ # Calculate color for current depth, making it lighter
20
+ # This factor determines how quickly the color lightens per level.
21
+ lightening_factor = 0.12
22
+
23
+ # Convert base_color hex to RGB for interpolation
24
+ # Ensure base_color is a valid hex string before converting
25
+ if not isinstance(base_color, str) or not base_color.startswith('#') or len(base_color) != 7:
26
+ base_color = '#19191a' # Fallback to default dark if invalid
27
+ base_r = int(base_color[1:3], 16)
28
+ base_g = int(base_color[3:5], 16)
29
+ base_b = int(base_color[5:7], 16)
30
+
31
+ # Calculate current node color by blending towards white
32
+ current_r = base_r + int((255 - base_r) * current_depth * lightening_factor)
33
+ current_g = base_g + int((255 - base_g) * current_depth * lightening_factor)
34
+ current_b = base_b + int((255 - base_b) * current_depth * lightening_factor)
35
+
36
+ # Clamp values to 255 to stay within valid RGB range
37
+ current_r = min(255, current_r)
38
+ current_g = min(255, current_g)
39
+ current_b = min(255, current_b)
40
+
41
+ node_fill_color = f'#{current_r:02x}{current_g:02x}{current_b:02x}'
42
+
43
+ # Font color: white for dark nodes, black for very light nodes for readability
44
+ font_color = 'white' if current_depth * lightening_factor < 0.6 else 'black'
45
+
46
+ # Edge colors and font sizes
47
+ edge_color = '#4a4a4a' # Dark gray for lines
48
+ # Font size adjusts based on depth, ensuring a minimum size
49
+ font_size = max(9, 14 - (current_depth * 2))
50
+ edge_font_size = max(7, 10 - (current_depth * 1))
51
+
52
+ for node in nodes_list:
53
+ node_id = node.get('id')
54
+ label = node.get('label')
55
+ relationship = node.get('relationship')
56
+
57
+ # Basic validation for node data
58
+ if not all([node_id, label, relationship]):
59
+ raise ValueError(f"Invalid node: {node}")
60
+
61
+ # Add node with specified style and Korean font
62
+ dot.node(
63
+ node_id,
64
+ label,
65
+ shape='box', # All nodes are rectangular
66
+ style='filled,rounded', # Filled and rounded corners
67
+ fillcolor=node_fill_color,
68
+ fontcolor=font_color,
69
+ fontsize=str(font_size),
70
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
71
+ )
72
+
73
+ # Add edge from parent to current node with Korean font
74
+ dot.edge(
75
+ parent_id,
76
+ node_id,
77
+ label=relationship,
78
+ color=edge_color,
79
+ fontcolor=edge_color, # Edge label color also dark gray
80
+ fontsize=str(edge_font_size),
81
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
82
+ )
83
+
84
+ # Recursively call for subnodes
85
+ if 'subnodes' in node:
86
+ add_nodes_and_edges(dot, node_id, node['subnodes'], current_depth + 1, base_color)
packages (6).txt ADDED
@@ -0,0 +1 @@
 
 
1
+ graphviz
process_flow_generator.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+
6
+ def generate_process_flow_diagram(json_input: str, output_format: str) -> str:
7
+ """
8
+ Generates a Process Flow Diagram (Flowchart) from JSON input.
9
+
10
+ Args:
11
+ json_input (str): A JSON string describing the process flow structure.
12
+ It must follow the Expected JSON Format Example below.
13
+
14
+ Expected JSON Format Example:
15
+ {
16
+ "start_node": "Start Inference Request",
17
+ "nodes": [
18
+ {
19
+ "id": "user_input",
20
+ "label": "Receive User Input (Data)",
21
+ "type": "io"
22
+ },
23
+ {
24
+ "id": "preprocess_data",
25
+ "label": "Preprocess Data",
26
+ "type": "process"
27
+ },
28
+ {
29
+ "id": "validate_data",
30
+ "label": "Validate Data Format/Type",
31
+ "type": "decision"
32
+ },
33
+ {
34
+ "id": "data_valid_yes",
35
+ "label": "Data Valid?",
36
+ "type": "decision"
37
+ },
38
+ {
39
+ "id": "load_model",
40
+ "label": "Load AI Model (if not cached)",
41
+ "type": "process"
42
+ },
43
+ {
44
+ "id": "run_inference",
45
+ "label": "Run AI Model Inference",
46
+ "type": "process"
47
+ },
48
+ {
49
+ "id": "postprocess_output",
50
+ "label": "Postprocess Model Output",
51
+ "type": "process"
52
+ },
53
+ {
54
+ "id": "send_response",
55
+ "label": "Send Response to User",
56
+ "type": "io"
57
+ },
58
+ {
59
+ "id": "log_error",
60
+ "label": "Log Error & Notify User",
61
+ "type": "process"
62
+ },
63
+ {
64
+ "id": "end_inference_process",
65
+ "label": "End Inference Process",
66
+ "type": "end"
67
+ }
68
+ ],
69
+ "connections": [
70
+ {"from": "start_node", "to": "user_input", "label": "Request"},
71
+ {"from": "user_input", "to": "preprocess_data", "label": "Data Received"},
72
+ {"from": "preprocess_data", "to": "validate_data", "label": "Cleaned"},
73
+ {"from": "validate_data", "to": "data_valid_yes", "label": "Check"},
74
+ {"from": "data_valid_yes", "to": "load_model", "label": "Yes"},
75
+ {"from": "data_valid_yes", "to": "log_error", "label": "No"},
76
+ {"from": "load_model", "to": "run_inference", "label": "Model Ready"},
77
+ {"from": "run_inference", "to": "postprocess_output", "label": "Output Generated"},
78
+ {"from": "postprocess_output", "to": "send_response", "label": "Ready"},
79
+ {"from": "send_response", "to": "end_inference_process", "label": "Response Sent"},
80
+ {"from": "log_error", "to": "end_inference_process", "label": "Error Handled"}
81
+ ]
82
+ }
83
+
84
+ Returns:
85
+ str: The filepath to the generated PNG image file.
86
+ """
87
+ try:
88
+ if not json_input.strip():
89
+ return "Error: Empty input"
90
+
91
+ data = json.loads(json_input)
92
+
93
+ # Validate required top-level keys for a flowchart
94
+ if 'start_node' not in data or 'nodes' not in data or 'connections' not in data:
95
+ raise ValueError("Missing required fields: 'start_node', 'nodes', or 'connections'")
96
+
97
+ # Define specific node shapes for flowchart types
98
+ node_shapes = {
99
+ "process": "box", # Rectangle for processes
100
+ "decision": "diamond", # Diamond for decisions
101
+ "start": "oval", # Oval for start
102
+ "end": "oval", # Oval for end
103
+ "io": "parallelogram", # Input/Output
104
+ "document": "note", # Document symbol
105
+ "default": "box" # Fallback
106
+ }
107
+
108
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
109
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
110
+ korean_font = 'NanumGothic-Regular'
111
+
112
+ dot = graphviz.Digraph(
113
+ name='ProcessFlowDiagram',
114
+ format='png',
115
+ graph_attr={
116
+ 'rankdir': 'TB', # Top-to-Bottom flow is common for flowcharts
117
+ 'splines': 'ortho', # Straight lines with 90-degree bends
118
+ 'bgcolor': 'white', # White background
119
+ 'pad': '0.5', # Padding around the graph
120
+ 'nodesep': '0.6', # Spacing between nodes on same rank
121
+ 'ranksep': '0.8', # Spacing between ranks
122
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
123
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
124
+ },
125
+ node_attr={
126
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
127
+ },
128
+ edge_attr={
129
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
130
+ }
131
+ )
132
+
133
+ base_color = '#19191a' # Hardcoded base color
134
+
135
+ fill_color_for_nodes = base_color
136
+ font_color_for_nodes = 'white' if base_color == '#19191a' or base_color.lower() in ['#000000', '#19191a'] else 'black'
137
+
138
+ # Store all nodes by ID for easy lookup
139
+ all_defined_nodes = {node['id']: node for node in data['nodes']}
140
+
141
+ # Add start node explicitly
142
+ start_node_id = data['start_node']
143
+ dot.node(
144
+ start_node_id,
145
+ start_node_id, # Label is typically the ID itself for start/end
146
+ shape=node_shapes['start'],
147
+ style='filled,rounded',
148
+ fillcolor='#2196F3', # A distinct blue for Start
149
+ fontcolor='white',
150
+ fontsize='14',
151
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
152
+ )
153
+
154
+ # Add all other nodes (process, decision, etc.)
155
+ for node_id, node_info in all_defined_nodes.items():
156
+ if node_id == start_node_id: # Skip if it's the start node, already added
157
+ continue
158
+
159
+ node_type = node_info.get("type", "default")
160
+ shape = node_shapes.get(node_type, "box")
161
+
162
+ node_label = node_info['label']
163
+
164
+ # Use distinct color for end node if it exists
165
+ if node_type == 'end':
166
+ dot.node(
167
+ node_id,
168
+ node_label,
169
+ shape=shape,
170
+ style='filled,rounded',
171
+ fillcolor='#F44336', # A distinct red for End
172
+ fontcolor='white',
173
+ fontsize='14',
174
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
175
+ )
176
+ else: # Regular process, decision, etc. nodes use the selected base color
177
+ dot.node(
178
+ node_id,
179
+ node_label,
180
+ shape=shape,
181
+ style='filled,rounded',
182
+ fillcolor=fill_color_for_nodes,
183
+ fontcolor=font_color_for_nodes,
184
+ fontsize='14',
185
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
186
+ )
187
+
188
+ # Add connections (edges)
189
+ for connection in data['connections']:
190
+ dot.edge(
191
+ connection['from'],
192
+ connection['to'],
193
+ label=connection.get('label', ''),
194
+ color='#4a4a4a', # Dark gray for lines
195
+ fontcolor='#4a4a4a',
196
+ fontsize='10',
197
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
198
+ )
199
+
200
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
201
+ dot.render(tmp.name, format=output_format, cleanup=True)
202
+ return f"{tmp.name}.{output_format}"
203
+
204
+ except json.JSONDecodeError:
205
+ return "Error: Invalid JSON format"
206
+ except Exception as e:
207
+ return f"Error: {str(e)}"
radial_diagram_generator.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_radial_diagram(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a radial (center-expanded) diagram from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the radial diagram structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "AI Core Concepts & Domains",
18
+ "nodes": [
19
+ {
20
+ "id": "foundational_ml",
21
+ "label": "Foundational ML",
22
+ "relationship": "builds on",
23
+ "subnodes": [
24
+ {"id": "supervised_l", "label": "Supervised Learning", "relationship": "e.g."},
25
+ {"id": "unsupervised_l", "label": "Unsupervised Learning", "relationship": "e.g."}
26
+ ]
27
+ },
28
+ {
29
+ "id": "dl_architectures",
30
+ "label": "Deep Learning Arch.",
31
+ "relationship": "evolved from",
32
+ "subnodes": [
33
+ {"id": "cnns_rad", "label": "CNNs", "relationship": "e.g."},
34
+ {"id": "rnns_rad", "label": "RNNs", "relationship": "e.g."}
35
+ ]
36
+ },
37
+ {
38
+ "id": "major_applications",
39
+ "label": "Major AI Applications",
40
+ "relationship": "applied in",
41
+ "subnodes": [
42
+ {"id": "nlp_rad", "label": "Natural Language Processing", "relationship": "e.g."},
43
+ {"id": "cv_rad", "label": "Computer Vision", "relationship": "e.g."}
44
+ ]
45
+ },
46
+ {
47
+ "id": "ethical_concerns",
48
+ "label": "Ethical AI Concerns",
49
+ "relationship": "addresses",
50
+ "subnodes": [
51
+ {"id": "fairness_rad", "label": "Fairness & Bias", "relationship": "e.g."},
52
+ {"id": "explainability", "label": "Explainability (XAI)", "relationship": "e.g."}
53
+ ]
54
+ },
55
+ {
56
+ "id": "future_trends",
57
+ "label": "Future AI Trends",
58
+ "relationship": "looking at",
59
+ "subnodes": [
60
+ {"id": "agi_future", "label": "AGI Development", "relationship": "e.g."},
61
+ {"id": "quantum_ai", "label": "Quantum AI", "relationship": "e.g."}
62
+ ]
63
+ }
64
+ ]
65
+ }
66
+
67
+ Returns:
68
+ str: The filepath to the generated PNG image file.
69
+ """
70
+ try:
71
+ if not json_input.strip():
72
+ return "Error: Empty input"
73
+
74
+ data = json.loads(json_input)
75
+
76
+ if 'central_node' not in data or 'nodes' not in data:
77
+ raise ValueError("Missing required fields: central_node or nodes")
78
+
79
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
80
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
81
+ korean_font = 'NanumGothic-Regular'
82
+
83
+ dot = graphviz.Digraph(
84
+ name='RadialDiagram',
85
+ format='png',
86
+ engine='neato', # Use 'neato' or 'fdp' for radial/force-directed layout
87
+ graph_attr={
88
+ 'overlap': 'false', # Prevent node overlap
89
+ 'splines': 'true', # Smooth splines for edges
90
+ 'bgcolor': 'white', # White background
91
+ 'pad': '0.5', # Padding around the graph
92
+ 'layout': 'neato', # Explicitly set layout engine for consistency
93
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
94
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
95
+ },
96
+ node_attr={
97
+ 'fixedsize': 'false', # Allow nodes to resize based on content
98
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
99
+ },
100
+ edge_attr={
101
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
102
+ }
103
+ )
104
+
105
+ base_color = '#19191a' # Hardcoded base color
106
+
107
+ dot.node(
108
+ 'central',
109
+ data['central_node'],
110
+ shape='box', # Rectangular shape
111
+ style='filled,rounded', # Filled and rounded corners
112
+ fillcolor=base_color, # Darkest color
113
+ fontcolor='white', # White text for dark background
114
+ fontsize='16', # Larger font for central node
115
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
116
+ )
117
+
118
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
119
+
120
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
121
+ dot.render(tmp.name, format=output_format, cleanup=True)
122
+ return f"{tmp.name}.{output_format}"
123
+
124
+ except json.JSONDecodeError:
125
+ return "Error: Invalid JSON format"
126
+ except Exception as e:
127
+ return f"Error: {str(e)}"
sample_data.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONCEPT_MAP_JSON = """
2
+ {
3
+ "central_node": "Artificial Intelligence (AI)",
4
+ "nodes": [
5
+ {
6
+ "id": "ml_fundamental",
7
+ "label": "Machine Learning",
8
+ "relationship": "is essential for",
9
+ "subnodes": [
10
+ {
11
+ "id": "dl_branch",
12
+ "label": "Deep Learning",
13
+ "relationship": "for example",
14
+ "subnodes": [
15
+ {
16
+ "id": "cnn_example",
17
+ "label": "CNNs",
18
+ "relationship": "for example"
19
+ },
20
+ {
21
+ "id": "rnn_example",
22
+ "label": "RNNs",
23
+ "relationship": "for example"
24
+ }
25
+ ]
26
+ },
27
+ {
28
+ "id": "rl_branch",
29
+ "label": "Reinforcement Learning",
30
+ "relationship": "for example",
31
+ "subnodes": [
32
+ {
33
+ "id": "qlearning_example",
34
+ "label": "Q-Learning",
35
+ "relationship": "example"
36
+ },
37
+ {
38
+ "id": "pg_example",
39
+ "label": "Policy Gradients",
40
+ "relationship": "example"
41
+ }
42
+ ]
43
+ }
44
+ ]
45
+ },
46
+ {
47
+ "id": "ai_types",
48
+ "label": "Types",
49
+ "relationship": "formed by",
50
+ "subnodes": [
51
+ {
52
+ "id": "agi_type",
53
+ "label": "AGI",
54
+ "relationship": "this is",
55
+ "subnodes": [
56
+ {
57
+ "id": "strong_ai",
58
+ "label": "Strong AI",
59
+ "relationship": "provoked by",
60
+ "subnodes": [
61
+ {
62
+ "id": "human_intel",
63
+ "label": "Human-level Intel.",
64
+ "relationship": "of"
65
+ }
66
+ ]
67
+ }
68
+ ]
69
+ },
70
+ {
71
+ "id": "ani_type",
72
+ "label": "ANI",
73
+ "relationship": "this is",
74
+ "subnodes": [
75
+ {
76
+ "id": "weak_ai",
77
+ "label": "Weak AI",
78
+ "relationship": "provoked by",
79
+ "subnodes": [
80
+ {
81
+ "id": "narrow_tasks",
82
+ "label": "Narrow Tasks",
83
+ "relationship": "of"
84
+ }
85
+ ]
86
+ }
87
+ ]
88
+ }
89
+ ]
90
+ },
91
+ {
92
+ "id": "ai_capabilities",
93
+ "label": "Capabilities",
94
+ "relationship": "change",
95
+ "subnodes": [
96
+ {
97
+ "id": "data_proc",
98
+ "label": "Data Processing",
99
+ "relationship": "can",
100
+ "subnodes": [
101
+ {
102
+ "id": "big_data",
103
+ "label": "Big Data",
104
+ "relationship": "as",
105
+ "subnodes": [
106
+ {
107
+ "id": "analysis_example",
108
+ "label": "Data Analysis",
109
+ "relationship": "example"
110
+ },
111
+ {
112
+ "id": "prediction_example",
113
+ "label": "Prediction",
114
+ "relationship": "example"
115
+ }
116
+ ]
117
+ }
118
+ ]
119
+ },
120
+ {
121
+ "id": "decision_making",
122
+ "label": "Decision Making",
123
+ "relationship": "can be",
124
+ "subnodes": [
125
+ {
126
+ "id": "automation",
127
+ "label": "Automation",
128
+ "relationship": "as",
129
+ "subnodes": [
130
+ {
131
+ "id": "robotics_example",
132
+ "label": "Robotics",
133
+ "relationship": "Example"},
134
+ {
135
+ "id": "autonomous_example",
136
+ "label": "Autonomous Vehicles",
137
+ "relationship": "of one"
138
+ }
139
+ ]
140
+ }
141
+ ]
142
+ },
143
+ {
144
+ "id": "problem_solving",
145
+ "label": "Problem Solving",
146
+ "relationship": "can",
147
+ "subnodes": [
148
+ {
149
+ "id": "optimization",
150
+ "label": "Optimization",
151
+ "relationship": "as is",
152
+ "subnodes": [
153
+ {
154
+ "id": "algorithms_example",
155
+ "label": "Algorithms",
156
+ "relationship": "for example"
157
+ }
158
+ ]
159
+ }
160
+ ]
161
+ }
162
+ ]
163
+ }
164
+ ]
165
+ }
166
+ """
167
+
168
+ # JSON for Synoptic Chart (horizontal hierarchy) - AI related, 4 levels
169
+ SYNOPTIC_CHART_JSON = """
170
+ {
171
+ "central_node": "AI Project Lifecycle",
172
+ "nodes": [
173
+ {
174
+ "id": "phase1",
175
+ "label": "I. Problem Definition & Data Acquisition",
176
+ "relationship": "Starts with",
177
+ "subnodes": [
178
+ {
179
+ "id": "sub1_1",
180
+ "label": "1. Problem Formulation",
181
+ "relationship": "Involves",
182
+ "subnodes": [
183
+ {"id": "sub1_1_1", "label": "1.1. Identify Business Need", "relationship": "e.g."},
184
+ {"id": "sub1_1_2", "label": "1.2. Define KPIs", "relationship": "e.g."}
185
+ ]
186
+ },
187
+ {
188
+ "id": "sub1_2",
189
+ "label": "2. Data Collection",
190
+ "relationship": "Followed by",
191
+ "subnodes": [
192
+ {"id": "sub1_2_1", "label": "2.1. Source Data", "relationship": "from"},
193
+ {"id": "sub1_2_2", "label": "2.2. Data Cleaning", "relationship": "includes"}
194
+ ]
195
+ }
196
+ ]
197
+ },
198
+ {
199
+ "id": "phase2",
200
+ "label": "II. Model Development",
201
+ "relationship": "Proceeds to",
202
+ "subnodes": [
203
+ {
204
+ "id": "sub2_1",
205
+ "label": "1. Feature Engineering",
206
+ "relationship": "Comprises",
207
+ "subnodes": [
208
+ {"id": "sub2_1_1", "label": "1.1. Feature Selection", "relationship": "e.g."},
209
+ {"id": "sub2_1_2", "label": "1.2. Feature Transformation", "relationship": "e.g."}
210
+ ]
211
+ },
212
+ {
213
+ "id": "sub2_2",
214
+ "label": "2. Model Training",
215
+ "relationship": "Involves",
216
+ "subnodes": [
217
+ {"id": "sub2_2_1", "label": "2.1. Algorithm Selection", "relationship": "uses"},
218
+ {"id": "sub2_2_2", "label": "2.2. Hyperparameter Tuning", "relationship": "optimizes"}
219
+ ]
220
+ }
221
+ ]
222
+ },
223
+ {
224
+ "id": "phase3",
225
+ "label": "III. Evaluation & Deployment",
226
+ "relationship": "Culminates in",
227
+ "subnodes": [
228
+ {
229
+ "id": "sub3_1",
230
+ "label": "1. Model Evaluation",
231
+ "relationship": "Includes",
232
+ "subnodes": [
233
+ {"id": "sub3_1_1", "label": "1.1. Performance Metrics", "relationship": "measures"},
234
+ {"id": "sub3_1_2", "label": "1.2. Bias & Fairness Audits", "relationship": "ensures"}
235
+ ]
236
+ },
237
+ {
238
+ "id": "sub3_2",
239
+ "label": "2. Deployment & Monitoring",
240
+ "relationship": "Requires",
241
+ "subnodes": [
242
+ {"id": "sub3_2_1", "label": "2.1. API/Integration Development", "relationship": "for"},
243
+ {"id": "sub3_2_2", "label": "2.2. Continuous Monitoring", "relationship": "ensures"}
244
+ ]
245
+ }
246
+ ]
247
+ }
248
+ ]
249
+ }
250
+ """
251
+
252
+ # JSON for Radial Diagram (central expansion) - AI related, 3 levels with 5->10 structure
253
+ RADIAL_DIAGRAM_JSON = """
254
+ {
255
+ "central_node": "AI Core Concepts & Domains",
256
+ "nodes": [
257
+ {
258
+ "id": "foundational_ml",
259
+ "label": "Foundational ML",
260
+ "relationship": "builds on",
261
+ "subnodes": [
262
+ {"id": "supervised_l", "label": "Supervised Learning", "relationship": "e.g."},
263
+ {"id": "unsupervised_l", "label": "Unsupervised Learning", "relationship": "e.g."}
264
+ ]
265
+ },
266
+ {
267
+ "id": "dl_architectures",
268
+ "label": "Deep Learning Arch.",
269
+ "relationship": "evolved from",
270
+ "subnodes": [
271
+ {"id": "cnns_rad", "label": "CNNs", "relationship": "e.g."},
272
+ {"id": "rnns_rad", "label": "RNNs", "relationship": "e.g."}
273
+ ]
274
+ },
275
+ {
276
+ "id": "major_applications",
277
+ "label": "Major AI Applications",
278
+ "relationship": "applied in",
279
+ "subnodes": [
280
+ {"id": "nlp_rad", "label": "Natural Language Processing", "relationship": "e.g."},
281
+ {"id": "cv_rad", "label": "Computer Vision", "relationship": "e.g."}
282
+ ]
283
+ },
284
+ {
285
+ "id": "ethical_concerns",
286
+ "label": "Ethical AI Concerns",
287
+ "relationship": "addresses",
288
+ "subnodes": [
289
+ {"id": "fairness_rad", "label": "Fairness & Bias", "relationship": "e.g."},
290
+ {"id": "explainability", "label": "Explainability (XAI)", "relationship": "e.g."}
291
+ ]
292
+ },
293
+ {
294
+ "id": "future_trends",
295
+ "label": "Future AI Trends",
296
+ "relationship": "looking at",
297
+ "subnodes": [
298
+ {"id": "agi_future", "label": "AGI Development", "relationship": "e.g."},
299
+ {"id": "quantum_ai", "label": "Quantum AI", "relationship": "e.g."}
300
+ ]
301
+ }
302
+ ]
303
+ }
304
+ """
305
+
306
+ PROCESS_FLOW_JSON = """
307
+ {
308
+ "start_node": "Start Inference Request",
309
+ "nodes": [
310
+ {
311
+ "id": "user_input",
312
+ "label": "Receive User Input (Data)",
313
+ "type": "io"
314
+ },
315
+ {
316
+ "id": "preprocess_data",
317
+ "label": "Preprocess Data",
318
+ "type": "process"
319
+ },
320
+ {
321
+ "id": "validate_data",
322
+ "label": "Validate Data Format/Type",
323
+ "type": "decision"
324
+ },
325
+ {
326
+ "id": "data_valid_yes",
327
+ "label": "Data Valid?",
328
+ "type": "decision"
329
+ },
330
+ {
331
+ "id": "load_model",
332
+ "label": "Load AI Model (if not cached)",
333
+ "type": "process"
334
+ },
335
+ {
336
+ "id": "run_inference",
337
+ "label": "Run AI Model Inference",
338
+ "type": "process"
339
+ },
340
+ {
341
+ "id": "postprocess_output",
342
+ "label": "Postprocess Model Output",
343
+ "type": "process"
344
+ },
345
+ {
346
+ "id": "send_response",
347
+ "label": "Send Response to User",
348
+ "type": "io"
349
+ },
350
+ {
351
+ "id": "log_error",
352
+ "label": "Log Error & Notify User",
353
+ "type": "process"
354
+ },
355
+ {
356
+ "id": "end_inference_process",
357
+ "label": "End Inference Process",
358
+ "type": "end"
359
+ }
360
+ ],
361
+ "connections": [
362
+ {"from": "start_node", "to": "user_input", "label": "Request"},
363
+ {"from": "user_input", "to": "preprocess_data", "label": "Data Received"},
364
+ {"from": "preprocess_data", "to": "validate_data", "label": "Cleaned"},
365
+ {"from": "validate_data", "to": "data_valid_yes", "label": "Check"},
366
+ {"from": "data_valid_yes", "to": "load_model", "label": "Yes"},
367
+ {"from": "data_valid_yes", "to": "log_error", "label": "No"},
368
+ {"from": "load_model", "to": "run_inference", "label": "Model Ready"},
369
+ {"from": "run_inference", "to": "postprocess_output", "label": "Output Generated"},
370
+ {"from": "postprocess_output", "to": "send_response", "label": "Ready"},
371
+ {"from": "send_response", "to": "end_inference_process", "label": "Response Sent"},
372
+ {"from": "log_error", "to": "end_inference_process", "label": "Error Handled"}
373
+ ]
374
+ }
375
+ """
376
+
377
+ # New JSON for Work Breakdown Structure (WBS) Diagram - similar to image, but not identical
378
+ WBS_DIAGRAM_JSON = """
379
+ {
380
+ "project_title": "AI Model Development Project",
381
+ "phases": [
382
+ {
383
+ "id": "phase_prep",
384
+ "label": "Preparation",
385
+ "tasks": [
386
+ {
387
+ "id": "task_1_1_vision",
388
+ "label": "Identify Vision",
389
+ "subtasks": [
390
+ {
391
+ "id": "subtask_1_1_1_design_staff",
392
+ "label": "Design & Staffing",
393
+ "sub_subtasks": [
394
+ {
395
+ "id": "ss_task_1_1_1_1_env_setup",
396
+ "label": "Environment Setup",
397
+ "sub_sub_subtasks": [
398
+ {
399
+ "id": "sss_task_1_1_1_1_1_lib_install",
400
+ "label": "Install Libraries",
401
+ "final_level_tasks": [
402
+ {"id": "ft_1_1_1_1_1_1_data_access", "label": "Grant Data Access"}
403
+ ]
404
+ }
405
+ ]
406
+ }
407
+ ]
408
+ }
409
+ ]
410
+ }
411
+ ]
412
+ },
413
+ {
414
+ "id": "phase_plan",
415
+ "label": "Planning",
416
+ "tasks": [
417
+ {
418
+ "id": "task_2_1_cost_analysis",
419
+ "label": "Cost Analysis",
420
+ "subtasks": [
421
+ {
422
+ "id": "subtask_2_1_1_benefit_analysis",
423
+ "label": "Benefit Analysis",
424
+ "sub_subtasks": [
425
+ {
426
+ "id": "ss_task_2_1_1_1_risk_assess",
427
+ "label": "AI Risk Assessment",
428
+ "sub_sub_subtasks": [
429
+ {
430
+ "id": "sss_task_2_1_1_1_1_model_selection",
431
+ "label": "Model Selection",
432
+ "final_level_tasks": [
433
+ {"id": "ft_2_1_1_1_1_1_data_strategy", "label": "Data Strategy"}
434
+ ]
435
+ }
436
+ ]
437
+ }
438
+ ]
439
+ }
440
+ ]
441
+ }
442
+ ]
443
+ },
444
+ {
445
+ "id": "phase_dev",
446
+ "label": "Development",
447
+ "tasks": [
448
+ {
449
+ "id": "task_3_1_change_mgmt",
450
+ "label": "Data Preprocessing",
451
+ "subtasks": [
452
+ {
453
+ "id": "subtask_3_1_1_implementation",
454
+ "label": "Feature Engineering",
455
+ "sub_subtasks": [
456
+ {
457
+ "id": "ss_task_3_1_1_1_beta_testing",
458
+ "label": "Model Training",
459
+ "sub_sub_subtasks": [
460
+ {
461
+ "id": "sss_task_3_1_1_1_1_other_task",
462
+ "label": "Model Evaluation",
463
+ "final_level_tasks": [
464
+ {"id": "ft_3_1_1_1_1_1_hyperparam_tune", "label": "Hyperparameter Tuning"}
465
+ ]
466
+ }
467
+ ]
468
+ }
469
+ ]
470
+ }
471
+ ]
472
+ }
473
+ ]
474
+ }
475
+ ]
476
+ }
477
+
478
+ """
synoptic_chart_generator.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+ from graph_generator_utils import add_nodes_and_edges
6
+
7
+ def generate_synoptic_chart(json_input: str, output_format: str) -> str:
8
+ """
9
+ Generates a synoptic chart (horizontal flowchart) from JSON input.
10
+
11
+ Args:
12
+ json_input (str): A JSON string describing the synoptic chart structure.
13
+ It must follow the Expected JSON Format Example below.
14
+
15
+ Expected JSON Format Example:
16
+ {
17
+ "central_node": "AI Project Lifecycle",
18
+ "nodes": [
19
+ {
20
+ "id": "phase1",
21
+ "label": "I. Problem Definition & Data Acquisition",
22
+ "relationship": "Starts with",
23
+ "subnodes": [
24
+ {
25
+ "id": "sub1_1",
26
+ "label": "1. Problem Formulation",
27
+ "relationship": "Involves",
28
+ "subnodes": [
29
+ {"id": "sub1_1_1", "label": "1.1. Identify Business Need", "relationship": "e.g."},
30
+ {"id": "sub1_1_2", "label": "1.2. Define KPIs", "relationship": "e.g."}
31
+ ]
32
+ },
33
+ {
34
+ "id": "sub1_2",
35
+ "label": "2. Data Collection",
36
+ "relationship": "Followed by",
37
+ "subnodes": [
38
+ {"id": "sub1_2_1", "label": "2.1. Source Data", "relationship": "from"},
39
+ {"id": "sub1_2_2", "label": "2.2. Data Cleaning", "relationship": "includes"}
40
+ ]
41
+ }
42
+ ]
43
+ },
44
+ {
45
+ "id": "phase2",
46
+ "label": "II. Model Development",
47
+ "relationship": "Proceeds to",
48
+ "subnodes": [
49
+ {
50
+ "id": "sub2_1",
51
+ "label": "1. Feature Engineering",
52
+ "relationship": "Comprises",
53
+ "subnodes": [
54
+ {"id": "sub2_1_1", "label": "1.1. Feature Selection", "relationship": "e.g."},
55
+ {"id": "sub2_1_2", "label": "1.2. Feature Transformation", "relationship": "e.g."}
56
+ ]
57
+ },
58
+ {
59
+ "id": "sub2_2",
60
+ "label": "2. Model Training",
61
+ "relationship": "Involves",
62
+ "subnodes": [
63
+ {"id": "sub2_2_1", "label": "2.1. Algorithm Selection", "relationship": "uses"},
64
+ {"id": "sub2_2_2", "label": "2.2. Hyperparameter Tuning", "relationship": "optimizes"}
65
+ ]
66
+ }
67
+ ]
68
+ },
69
+ {
70
+ "id": "phase3",
71
+ "label": "III. Evaluation & Deployment",
72
+ "relationship": "Culminates in",
73
+ "subnodes": [
74
+ {
75
+ "id": "sub3_1",
76
+ "label": "1. Model Evaluation",
77
+ "relationship": "Includes",
78
+ "subnodes": [
79
+ {"id": "sub3_1_1", "label": "1.1. Performance Metrics", "relationship": "measures"},
80
+ {"id": "sub3_1_2", "label": "1.2. Bias & Fairness Audits", "relationship": "ensures"}
81
+ ]
82
+ },
83
+ {
84
+ "id": "sub3_2",
85
+ "label": "2. Deployment & Monitoring",
86
+ "relationship": "Requires",
87
+ "subnodes": [
88
+ {"id": "sub3_2_1", "label": "2.1. API/Integration Development", "relationship": "for"},
89
+ {"id": "sub3_2_2", "label": "2.2. Continuous Monitoring", "relationship": "ensures"}
90
+ ]
91
+ }
92
+ ]
93
+ }
94
+ ]
95
+ }
96
+
97
+ Returns:
98
+ str: The filepath to the generated PNG image file.
99
+ """
100
+ try:
101
+ if not json_input.strip():
102
+ return "Error: Empty input"
103
+
104
+ data = json.loads(json_input)
105
+
106
+ if 'central_node' not in data or 'nodes' not in data:
107
+ raise ValueError("Missing required fields: central_node or nodes")
108
+
109
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
110
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
111
+ korean_font = 'NanumGothic-Regular'
112
+
113
+ dot = graphviz.Digraph(
114
+ name='SynopticChart',
115
+ format='png',
116
+ graph_attr={
117
+ 'rankdir': 'LR', # Left-to-Right layout (horizontal hierarchy)
118
+ 'splines': 'ortho', # Straight lines
119
+ 'bgcolor': 'white', # White background
120
+ 'pad': '0.5', # Padding around the graph
121
+ 'ranksep': '0.7', # Reduced horizontal separation between ranks (columns)
122
+ 'nodesep': '0.3', # Adjusted vertical separation between nodes in the same rank
123
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
124
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
125
+ },
126
+ node_attr={
127
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
128
+ },
129
+ edge_attr={
130
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
131
+ }
132
+ )
133
+
134
+ base_color = '#19191a'
135
+
136
+ dot.node(
137
+ 'central',
138
+ data['central_node'],
139
+ shape='box', # Rectangular shape
140
+ style='filled,rounded', # Filled and rounded corners
141
+ fillcolor=base_color, # Darkest color
142
+ fontcolor='white', # White text for dark background
143
+ fontsize='16', # Larger font for central node
144
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ๋ช…์‹œ์  ์ง€์ •
145
+ )
146
+
147
+ add_nodes_and_edges(dot, 'central', data.get('nodes', []), current_depth=1, base_color=base_color)
148
+
149
+ with NamedTemporaryFile(delete=False, suffix=f'.{output_format}') as tmp:
150
+ dot.render(tmp.name, format=output_format, cleanup=True)
151
+ return f"{tmp.name}.{output_format}"
152
+
153
+ except json.JSONDecodeError:
154
+ return "Error: Invalid JSON format"
155
+ except Exception as e:
156
+ return f"Error: {str(e)}"
wbs_diagram_generator.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import graphviz
2
+ import json
3
+ from tempfile import NamedTemporaryFile
4
+ import os
5
+
6
+ def generate_wbs_diagram(json_input: str, output_format: str) -> str:
7
+ """
8
+ Generates a Work Breakdown Structure (WBS) Diagram from JSON input.
9
+
10
+ Args:
11
+ json_input (str): A JSON string describing the WBS structure.
12
+ It must follow the Expected JSON Format Example below.
13
+
14
+ Expected JSON Format Example:
15
+ {
16
+ "project_title": "AI Model Development Project",
17
+ "phases": [
18
+ {
19
+ "id": "phase_prep",
20
+ "label": "Preparation",
21
+ "tasks": [
22
+ {
23
+ "id": "task_1_1_vision",
24
+ "label": "Identify Vision",
25
+ "subtasks": [
26
+ {
27
+ "id": "subtask_1_1_1_design_staff",
28
+ "label": "Design & Staffing",
29
+ "sub_subtasks": [
30
+ {
31
+ "id": "ss_task_1_1_1_1_env_setup",
32
+ "label": "Environment Setup",
33
+ "sub_sub_subtasks": [
34
+ {
35
+ "id": "sss_task_1_1_1_1_1_lib_install",
36
+ "label": "Install Libraries",
37
+ "final_level_tasks": [
38
+ {"id": "ft_1_1_1_1_1_1_data_access", "label": "Grant Data Access"}
39
+ ]
40
+ }
41
+ ]
42
+ }
43
+ ]
44
+ }
45
+ ]
46
+ }
47
+ ]
48
+ },
49
+ {
50
+ "id": "phase_plan",
51
+ "label": "Planning",
52
+ "tasks": [
53
+ {
54
+ "id": "task_2_1_cost_analysis",
55
+ "label": "Cost Analysis",
56
+ "subtasks": [
57
+ {
58
+ "id": "subtask_2_1_1_benefit_analysis",
59
+ "label": "Benefit Analysis",
60
+ "sub_subtasks": [
61
+ {
62
+ "id": "ss_task_2_1_1_1_risk_assess",
63
+ "label": "AI Risk Assessment",
64
+ "sub_sub_subtasks": [
65
+ {
66
+ "id": "sss_task_2_1_1_1_1_model_selection",
67
+ "label": "Model Selection",
68
+ "final_level_tasks": [
69
+ {"id": "ft_2_1_1_1_1_1_data_strategy", "label": "Data Strategy"}
70
+ ]
71
+ }
72
+ ]
73
+ }
74
+ ]
75
+ }
76
+ ]
77
+ }
78
+ ]
79
+ },
80
+ {
81
+ "id": "phase_dev",
82
+ "label": "Development",
83
+ "tasks": [
84
+ {
85
+ "id": "task_3_1_change_mgmt",
86
+ "label": "Data Preprocessing",
87
+ "subtasks": [
88
+ {
89
+ "id": "subtask_3_1_1_implementation",
90
+ "label": "Feature Engineering",
91
+ "sub_subtasks": [
92
+ {
93
+ "id": "ss_task_3_1_1_1_beta_testing",
94
+ "label": "Model Training",
95
+ "sub_sub_subtasks": [
96
+ {
97
+ "id": "sss_task_3_1_1_1_1_other_task",
98
+ "label": "Model Evaluation",
99
+ "final_level_tasks": [
100
+ {"id": "ft_3_1_1_1_1_1_hyperparam_tune", "label": "Hyperparameter Tuning"}
101
+ ]
102
+ }
103
+ ]
104
+ }
105
+ ]
106
+ }
107
+ ]
108
+ }
109
+ ]
110
+ }
111
+ ]
112
+ }
113
+
114
+ Returns:
115
+ str: The filepath to the generated PNG image file.
116
+ """
117
+ try:
118
+ if not json_input.strip():
119
+ return "Error: Empty input"
120
+
121
+ data = json.loads(json_input)
122
+
123
+ if 'project_title' not in data or 'phases' not in data:
124
+ raise ValueError("Missing required fields: project_title or phases")
125
+
126
+ # ํ•œ๊ธ€ ํฐํŠธ ์„ค์ •
127
+ # GDFONTPATH๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์œผ๋ฉด ํฐํŠธ ํŒŒ์ผ๋ช…(ํ™•์žฅ์ž ์ œ์™ธ) ์‚ฌ์šฉ
128
+ korean_font = 'NanumGothic-Regular'
129
+
130
+ dot = graphviz.Digraph(
131
+ name='WBSDiagram',
132
+ graph_attr={
133
+ 'rankdir': 'TB', # Top-to-Bottom hierarchy
134
+ 'splines': 'polyline', # polyline์œผ๋กœ ๋ณ€๊ฒฝ (ortho ๋Œ€์‹ )
135
+ 'bgcolor': 'white', # White background
136
+ 'pad': '0.5', # Padding
137
+ 'ranksep': '0.6', # Adjust vertical separation between ranks
138
+ 'nodesep': '0.5', # Adjust horizontal separation between nodes
139
+ 'fontname': korean_font, # ๊ทธ๋ž˜ํ”„ ์ „์ฒด ํ•œ๊ธ€ ํฐํŠธ
140
+ 'charset': 'UTF-8' # UTF-8 ์ธ์ฝ”๋”ฉ
141
+ },
142
+ node_attr={
143
+ 'fontname': korean_font # ๋ชจ๋“  ๋…ธ๋“œ์˜ ๊ธฐ๋ณธ ํฐํŠธ
144
+ },
145
+ edge_attr={
146
+ 'fontname': korean_font # ๋ชจ๋“  ์—ฃ์ง€์˜ ๊ธฐ๋ณธ ํฐํŠธ
147
+ }
148
+ )
149
+
150
+ base_color = '#19191a' # Hardcoded base color
151
+
152
+ # ID ์ •๊ทœํ™” ํ•จ์ˆ˜ - ํ•œ๊ธ€ ID๋ฅผ ์•ˆ์ „ํ•œ ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜
153
+ def normalize_id(id_str):
154
+ """๋…ธ๋“œ ID๋ฅผ ์•ˆ์ „ํ•œ ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜"""
155
+ import re
156
+ # ์˜๋ฌธ, ์ˆซ์ž, ์–ธ๋”์Šค์ฝ”์–ด๋งŒ ํ—ˆ์šฉ
157
+ safe_id = re.sub(r'[^a-zA-Z0-9_]', '_', str(id_str))
158
+ # ์ˆซ์ž๋กœ ์‹œ์ž‘ํ•˜๋ฉด 'n_' ์ ‘๋‘์‚ฌ ์ถ”๊ฐ€
159
+ if safe_id and safe_id[0].isdigit():
160
+ safe_id = 'n_' + safe_id
161
+ # ๋นˆ ๋ฌธ์ž์—ด์ด๋ฉด ๊ธฐ๋ณธ๊ฐ’
162
+ if not safe_id:
163
+ safe_id = 'node_' + str(hash(id_str))
164
+ return safe_id
165
+
166
+ # Project Title node (main node)
167
+ dot.node(
168
+ 'project_root',
169
+ data['project_title'],
170
+ shape='box',
171
+ style='filled,rounded',
172
+ fillcolor=base_color,
173
+ fontcolor='white',
174
+ fontsize='18',
175
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
176
+ )
177
+
178
+ # Helper for color and font based on depth for WBS
179
+ def get_gradient_color(depth, base_hex_color, lightening_factor=0.12):
180
+ base_r = int(base_hex_color[1:3], 16)
181
+ base_g = int(base_hex_color[3:5], 16)
182
+ base_b = int(base_hex_color[5:7], 16)
183
+
184
+ current_r = base_r + int((255 - base_r) * depth * lightening_factor)
185
+ current_g = base_g + int((255 - base_g) * depth * lightening_factor)
186
+ current_b = base_b + int((255 - base_b) * depth * lightening_factor)
187
+
188
+ return f'#{min(255, current_r):02x}{min(255, current_g):02x}{min(255, current_b):02x}'
189
+
190
+ def get_font_color_for_background(depth, base_hex_color, lightening_factor=0.12):
191
+ base_r = int(base_hex_color[1:3], 16)
192
+ base_g = int(base_hex_color[3:5], 16)
193
+ base_b = int(base_hex_color[5:7], 16)
194
+ current_r = base_r + (255 - base_r) * depth * lightening_factor
195
+ current_g = base_g + (255 - base_g) * depth * lightening_factor
196
+ current_b = base_b + (255 - base_b) * depth * lightening_factor
197
+
198
+ luminance = (0.2126 * current_r + 0.7152 * current_g + 0.0722 * current_b) / 255
199
+ return 'white' if luminance < 0.5 else 'black'
200
+
201
+ def _add_wbs_nodes_recursive(parent_id, current_level_tasks, current_depth):
202
+ for task_data in current_level_tasks:
203
+ task_id = task_data.get('id')
204
+ task_label = task_data.get('label')
205
+
206
+ if not all([task_id, task_label]):
207
+ raise ValueError(f"Invalid task data at depth {current_depth}: {task_data}")
208
+
209
+ # ID ์ •๊ทœํ™”
210
+ safe_task_id = normalize_id(task_id)
211
+
212
+ node_fill_color = get_gradient_color(current_depth, base_color)
213
+ node_font_color = get_font_color_for_background(current_depth, base_color)
214
+ font_size = max(9, 14 - (current_depth * 2))
215
+
216
+ dot.node(
217
+ safe_task_id,
218
+ task_label,
219
+ shape='box',
220
+ style='filled,rounded',
221
+ fillcolor=node_fill_color,
222
+ fontcolor=node_font_color,
223
+ fontsize=str(font_size),
224
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
225
+ )
226
+ dot.edge(parent_id, safe_task_id, color='#4a4a4a', arrowhead='none', fontname=korean_font)
227
+
228
+ # Recursively call for next level of tasks (subtasks, sub_subtasks, etc.)
229
+ # This handles arbitrary nested keys like 'subtasks', 'sub_subtasks', 'final_level_tasks'
230
+ next_level_keys = ['tasks', 'subtasks', 'sub_subtasks', 'sub_sub_subtasks', 'final_level_tasks']
231
+ for key_idx, key in enumerate(next_level_keys):
232
+ if key in task_data and isinstance(task_data[key], list):
233
+ _add_wbs_nodes_recursive(safe_task_id, task_data[key], current_depth + 1)
234
+ break # Only process the first found sub-level key
235
+
236
+ # Process phases (level 1 from project_root)
237
+ phase_depth = 1
238
+ for phase in data['phases']:
239
+ phase_id = phase.get('id')
240
+ phase_label = phase.get('label')
241
+
242
+ if not all([phase_id, phase_label]):
243
+ raise ValueError(f"Invalid phase data: {phase}")
244
+
245
+ # ID ์ •๊ทœํ™”
246
+ safe_phase_id = normalize_id(phase_id)
247
+
248
+ phase_fill_color = get_gradient_color(phase_depth, base_color)
249
+ phase_font_color = get_font_color_for_background(phase_depth, base_color)
250
+ font_size_phase = max(9, 14 - (phase_depth * 2))
251
+
252
+ dot.node(
253
+ safe_phase_id,
254
+ phase_label,
255
+ shape='box',
256
+ style='filled,rounded',
257
+ fillcolor=phase_fill_color,
258
+ fontcolor=phase_font_color,
259
+ fontsize=str(font_size_phase),
260
+ fontname=korean_font # ํ•œ๊ธ€ ํฐํŠธ ์ถ”๊ฐ€
261
+ )
262
+ dot.edge('project_root', safe_phase_id, color='#4a4a4a', arrowhead='none', fontname=korean_font)
263
+
264
+ # Start recursion for tasks under this phase
265
+ if 'tasks' in phase and isinstance(phase['tasks'], list):
266
+ _add_wbs_nodes_recursive(safe_phase_id, phase['tasks'], phase_depth + 1)
267
+
268
+ # ๋ Œ๋”๋ง
269
+ try:
270
+ with NamedTemporaryFile(delete=False, suffix='.gv', prefix='wbs_') as tmp:
271
+ # ํŒŒ์ผ ์ด๋ฆ„์—์„œ .gv ํ™•์žฅ์ž ์ œ๊ฑฐ
272
+ output_filename = tmp.name[:-3] # '.gv' ์ œ๊ฑฐ
273
+ output_path = dot.render(output_filename, format=output_format, cleanup=True)
274
+ return output_path
275
+ except Exception as render_error:
276
+ # ๋ Œ๋”๋ง ์‹คํŒจ ์‹œ ๊ฐ„๋‹จํ•œ ์—๋Ÿฌ ๋ฉ”์‹œ์ง€
277
+ return f"Error: Failed to render diagram - {str(render_error).split(';')[0]}"
278
+
279
+ except json.JSONDecodeError as e:
280
+ return "Error: Invalid JSON format"
281
+ except Exception as e:
282
+ # ์—๋Ÿฌ ๋ฉ”์‹œ์ง€๋ฅผ ๊ฐ„๋‹จํ•˜๊ฒŒ ์œ ์ง€
283
+ error_msg = str(e).split('\n')[0] # ์ฒซ ์ค„๋งŒ ์‚ฌ์šฉ
284
+ if len(error_msg) > 100:
285
+ error_msg = error_msg[:100] + "..."
286
+ return f"Error: {error_msg}"