import argparse import json import os from pathlib import Path import numpy as np import pybktree from sklearn.model_selection import GroupShuffleSplit import tqdm import unionfind import Levenshtein def files_list(): data_path = Path("valid_data") files = [f for f in data_path.rglob("*.json") if f.is_file()] return files def main(similarity, split, seed): files = files_list() if similarity: tree = pybktree.BKTree( lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b)) ) uf = unionfind.UnionFind() repo_map = {} for schema_file in tqdm.tqdm(files): path_str = str(schema_file) repo = schema_file.parts[1:4] uf.add(str(schema_file)) if repo not in repo_map: repo_map[repo] = str(schema_file) else: uf.union(repo_map[repo], str(schema_file)) if similarity: tree.add((str(schema_file), open(schema_file).read().strip())) del repo_map # Optionally group together similar files if similarity: for schema_file in tqdm.tqdm(files): path_str = str(schema_file) data = open(schema_file).read().strip() for other_path, _ in tree.find(data, similarity): uf.union(path_str, other_path) all_schemas = list() schema_groups = list() for group, schemas in enumerate(uf.components()): all_schemas.extend(schemas) schema_groups.extend([group] * len(schemas)) all_schemas = np.array(all_schemas) schema_groups = np.array(schema_groups) gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed) (train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups)) open("train_schemas.json", "w").write( json.dumps(all_schemas[train_indexes].tolist()) ) open("test_schemas.json", "w").write(json.dumps(all_schemas[test_indexes].tolist())) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--similarity", default=None, type=float) parser.add_argument("--seed", default=15, type=int) parser.add_argument("--split", default=0.8, type=float) args = parser.parse_args() main(args.similarity, args.split, args.seed)