Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
File size: 2,282 Bytes
d17161f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import argparse
import json
import os
from pathlib import Path

import numpy as np
import pybktree
from sklearn.model_selection import GroupShuffleSplit
import tqdm
import unionfind
import Levenshtein


def files_list():
    data_path = Path("valid_data")
    files = [f for f in data_path.rglob("*.json") if f.is_file()]
    return files


def main(similarity, split, seed):
    files = files_list()

    if similarity:
        tree = pybktree.BKTree(
            lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b))
        )

    uf = unionfind.UnionFind()
    repo_map = {}
    for schema_file in tqdm.tqdm(files):
        path_str = str(schema_file)
        repo = schema_file.parts[1:4]
        uf.add(str(schema_file))
        if repo not in repo_map:
            repo_map[repo] = str(schema_file)
        else:
            uf.union(repo_map[repo], str(schema_file))

        if similarity:
            tree.add((str(schema_file), open(schema_file).read().strip()))

    del repo_map

    # Optionally group together similar files
    if similarity:
        for schema_file in tqdm.tqdm(files):
            path_str = str(schema_file)
            data = open(schema_file).read().strip()
            for other_path, _ in tree.find(data, similarity):
                uf.union(path_str, other_path)

    all_schemas = list()
    schema_groups = list()
    for group, schemas in enumerate(uf.components()):
        all_schemas.extend(schemas)
        schema_groups.extend([group] * len(schemas))

    all_schemas = np.array(all_schemas)
    schema_groups = np.array(schema_groups)
    gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed)
    (train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups))

    open("train_schemas.json", "w").write(
        json.dumps(all_schemas[train_indexes].tolist())
    )
    open("test_schemas.json", "w").write(json.dumps(all_schemas[test_indexes].tolist()))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--similarity", default=None, type=float)
    parser.add_argument("--seed", default=15, type=int)
    parser.add_argument("--split", default=0.8, type=float)
    args = parser.parse_args()
    main(args.similarity, args.split, args.seed)