File size: 9,416 Bytes
56cf2e9
 
f41635e
e513a41
 
56cf2e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0968aa
 
56cf2e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353683
56cf2e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0968aa
56cf2e9
 
 
 
 
 
 
 
 
 
 
1353683
56cf2e9
 
 
b2852ba
56cf2e9
 
 
 
 
 
 
 
 
b2852ba
 
 
dfe2d8f
b2852ba
 
 
 
 
 
 
 
 
b0968aa
56cf2e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
074be96
56cf2e9
6568046
 
56cf2e9
 
6568046
 
05891f5
6568046
 
56cf2e9
074be96
 
 
 
56cf2e9
 
 
86e898a
cfc0b8a
86e898a
 
56cf2e9
e513a41
86e898a
56cf2e9
86e898a
e513a41
 
 
 
 
 
 
86e898a
 
 
 
 
 
56cf2e9
e513a41
 
f41635e
fbf786e
e513a41
 
f41635e
 
 
 
 
 
86e898a
 
 
e513a41
 
 
 
 
 
6568046
e513a41
86e898a
 
 
e513a41
daba3de
e513a41
 
fbf786e
05891f5
e513a41
fbf786e
 
 
 
 
 
e513a41
 
 
 
 
 
 
 
 
f45821f
e513a41
 
 
fbf786e
 
 
daba3de
 
 
f41635e
9d40ea2
daba3de
f45821f
daba3de
 
dfe2d8f
86e898a
 
 
 
 
 
e513a41
56cf2e9
 
dfe2d8f
 
 
 
 
 
56cf2e9
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
"""Graph analytics executor and data types."""

import inspect
import os
from lynxkite.core import ops, workspace
import dataclasses
import functools
import networkx as nx
import pandas as pd
import polars as pl
import traceback
import typing


ENV = "LynxKite Graph Analytics"


@dataclasses.dataclass
class RelationDefinition:
    """Defines a set of edges."""

    df: str  # The DataFrame that contains the edges.
    source_column: str  # The column in the edge DataFrame that contains the source node ID.
    target_column: str  # The column in the edge DataFrame that contains the target node ID.
    source_table: str  # The DataFrame that contains the source nodes.
    target_table: str  # The DataFrame that contains the target nodes.
    source_key: str  # The column in the source table that contains the node ID.
    target_key: str  # The column in the target table that contains the node ID.
    name: str | None = None  # Descriptive name for the relation.


@dataclasses.dataclass
class Bundle:
    """A collection of DataFrames and other data.

    Can efficiently represent a knowledge graph (homogeneous or heterogeneous) or tabular data.
    It can also carry other data, such as a trained model.
    """

    dfs: dict[str, pd.DataFrame] = dataclasses.field(default_factory=dict)
    relations: list[RelationDefinition] = dataclasses.field(default_factory=list)
    other: dict[str, typing.Any] = dataclasses.field(default_factory=dict)

    @classmethod
    def from_nx(cls, graph: nx.Graph):
        edges = nx.to_pandas_edgelist(graph)
        d = dict(graph.nodes(data=True))
        nodes = pd.DataFrame(d.values(), index=d.keys())
        nodes["id"] = nodes.index
        if "index" in nodes.columns:
            nodes.drop(columns=["index"], inplace=True)
        return cls(
            dfs={"edges": edges, "nodes": nodes},
            relations=[
                RelationDefinition(
                    df="edges",
                    source_column="source",
                    target_column="target",
                    source_table="nodes",
                    target_table="nodes",
                    source_key="id",
                    target_key="id",
                )
            ],
        )

    @classmethod
    def from_df(cls, df: pd.DataFrame):
        return cls(dfs={"df": df})

    def to_nx(self):
        # TODO: Use relations.
        graph = nx.DiGraph()
        if "nodes" in self.dfs:
            df = self.dfs["nodes"]
            if df.index.name != "id":
                df = df.set_index("id")
            graph.add_nodes_from(df.to_dict("index").items())
        if "edges" in self.dfs:
            edges = self.dfs["edges"]
            graph.add_edges_from(
                [
                    (
                        e["source"],
                        e["target"],
                        {k: e[k] for k in edges.columns if k not in ["source", "target"]},
                    )
                    for e in edges.to_records()
                ]
            )
        return graph

    def copy(self):
        """Returns a medium depth copy of the bundle. The Bundle is completely new, but the DataFrames and RelationDefinitions are shared."""
        return Bundle(
            dfs=dict(self.dfs),
            relations=list(self.relations),
            other=dict(self.other),
        )

    def to_dict(self, limit: int = 100):
        """JSON-serializable representation of the bundle, including some data."""
        return {
            "dataframes": {
                name: {
                    "columns": [str(c) for c in df.columns],
                    "data": df_for_frontend(df, limit).values.tolist(),
                }
                for name, df in self.dfs.items()
            },
            "relations": [dataclasses.asdict(relation) for relation in self.relations],
            "other": {k: str(v) for k, v in self.other.items()},
        }

    def metadata(self):
        """JSON-serializable information about the bundle, metadata only."""
        return {
            "dataframes": {
                name: {
                    "columns": sorted(str(c) for c in df.columns),
                }
                for name, df in self.dfs.items()
            },
            "relations": [dataclasses.asdict(relation) for relation in self.relations],
            "other": {k: getattr(v, "metadata", lambda: {})() for k, v in self.other.items()},
        }


def nx_node_attribute_func(name):
    """Decorator for wrapping a function that adds a NetworkX node attribute."""

    def decorator(func):
        @functools.wraps(func)
        def wrapper(graph: nx.Graph, **kwargs):
            graph = graph.copy()
            attr = func(graph, **kwargs)
            nx.set_node_attributes(graph, attr, name)
            return graph

        return wrapper

    return decorator


def disambiguate_edges(ws: workspace.Workspace):
    """If an input plug is connected to multiple edges, keep only the last edge."""
    catalog = ops.CATALOGS[ws.env]
    nodes = {node.id: node for node in ws.nodes}
    seen = set()
    for edge in reversed(ws.edges):
        dst_node = nodes[edge.target]
        op = catalog.get(dst_node.data.title)
        if op.get_input(edge.targetHandle).type == list[Bundle]:
            # Takes multiple bundles as an input. No need to disambiguate.
            continue
        if (edge.target, edge.targetHandle) in seen:
            i = ws.edges.index(edge)
            del ws.edges[i]
            if hasattr(ws, "_crdt"):
                del ws._crdt["edges"][i]
        seen.add((edge.target, edge.targetHandle))


# Outputs are tracked by node ID and output ID.
Outputs = dict[tuple[str, str], typing.Any]


@ops.register_executor(ENV)
async def execute(ws: workspace.Workspace):
    catalog = ops.CATALOGS[ws.env]
    disambiguate_edges(ws)
    outputs: Outputs = {}
    nodes = {node.id: node for node in ws.nodes}
    todo = set(nodes.keys())
    progress = True
    while progress:
        progress = False
        for id in list(todo):
            node = nodes[id]
            inputs_done = [
                (edge.source, edge.sourceHandle) in outputs
                for edge in ws.edges
                if edge.target == id
            ]
            if all(inputs_done):
                # All inputs for this node are ready, we can compute the output.
                todo.remove(id)
                progress = True
                await _execute_node(node, ws, catalog, outputs)
    return outputs


async def await_if_needed(obj):
    if inspect.isawaitable(obj):
        obj = await obj
    return obj


async def _execute_node(
    node: workspace.WorkspaceNode, ws: workspace.Workspace, catalog: ops.Catalog, outputs: Outputs
):
    params = {**node.data.params}
    op = catalog.get(node.data.title)
    if not op:
        node.publish_error("Operation not found in catalog")
        return
    node.publish_started()
    # TODO: Handle multi-inputs.
    input_map = {
        edge.targetHandle: outputs[edge.source, edge.sourceHandle]
        for edge in ws.edges
        if edge.target == node.id
    }
    # Convert inputs types to match operation signature.
    try:
        inputs = []
        missing = []
        for p in op.inputs:
            if p.name not in input_map:
                opt_type = ops.get_optional_type(p.type)
                if opt_type is not None:
                    inputs.append(None)
                else:
                    missing.append(p.name)
                continue
            x = input_map[p.name]
            if p.type == nx.Graph and isinstance(x, Bundle):
                x = x.to_nx()
            elif p.type == Bundle and isinstance(x, nx.Graph):
                x = Bundle.from_nx(x)
            elif p.type == Bundle and isinstance(x, pd.DataFrame):
                x = Bundle.from_df(x)
            inputs.append(x)
    except Exception as e:
        if not os.environ.get("LYNXKITE_SUPPRESS_OP_ERRORS"):
            traceback.print_exc()
        node.publish_error(e)
        return
    if missing:
        node.publish_error(f"Missing input: {', '.join(missing)}")
        return
    # Execute op.
    try:
        result = op(*inputs, **params)
        result.output = await await_if_needed(result.output)
        result.display = await await_if_needed(result.display)
    except Exception as e:
        if not os.environ.get("LYNXKITE_SUPPRESS_OP_ERRORS"):
            traceback.print_exc()
        result = ops.Result(error=str(e))
    result.input_metadata = [_get_metadata(i) for i in inputs]
    if isinstance(result.output, dict):
        for k, v in result.output.items():
            outputs[node.id, k] = v
    elif result.output is not None:
        [k] = op.outputs
        outputs[node.id, k.name] = result.output
    node.publish_result(result)


def _get_metadata(x):
    if hasattr(x, "metadata"):
        return x.metadata()
    return {}


def df_for_frontend(df: pd.DataFrame, limit: int) -> pd.DataFrame:
    """Returns a DataFrame with values that are safe to send to the frontend."""
    df = df[:limit]
    if isinstance(df, pl.LazyFrame):
        df = df.collect()
    if isinstance(df, pl.DataFrame):
        df = df.to_pandas()
    # Convert non-numeric columns to strings.
    for c in df.columns:
        if not pd.api.types.is_numeric_dtype(df[c]):
            df[c] = df[c].astype(str)
    return df