import os import json import tqdm import functools import collections import multiprocessing def extract_domains(filename): domains = set() with open(filename) as f: for line in f: line = json.loads(line.strip()) domains.add(line["domain"]) return filename, list(domains) def valid_train_split(filename, mapping=None): previous_domain = "" train = [] valid = [] domain_data = {"questions": [], "pages": set()} counter = 0 with open(filename) as f: for line_txt in f: counter += 1 line = json.loads(line_txt.strip()) domain = line["domain"] if domain != previous_domain and previous_domain != "": if len(mapping[previous_domain]) > 1: train.extend(domain_data["questions"]) elif len(valid) > 2000: train.extend(domain_data["questions"]) elif len(domain_data["pages"]) > 1: train.extend(domain_data["questions"]) elif len(domain_data["questions"]) < 15: train.extend(domain_data["questions"]) else: valid.extend(domain_data["questions"]) domain_data = {"questions": [], "pages": set()} domain_data["questions"].append(line_txt.strip()) domain_data["pages"].add(line["domain_index"]) previous_domain = domain train.extend(domain_data["questions"]) assert len(train) + len(valid) == counter return train, valid, filename data = [f"data/{e}" for e in os.listdir("data") if e.endswith(".json")] with multiprocessing.Pool(os.cpu_count()) as p: domain_count = collections.defaultdict(list) for filename, domains in tqdm.tqdm(p.imap_unordered(extract_domains, data)): language = filename.split(".")[1] for domain in domains: domain_count[domain].append(language) with multiprocessing.Pool(os.cpu_count()) as p: fn = functools.partial(valid_train_split, mapping=domain_count) for train, valid, filename in tqdm.tqdm(p.imap_unordered(fn, data)): train_filename = filename.replace("data/", "data/train/") with open(train_filename, "w+") as f: train = "\n".join(train) f.write(train) valid_filename = filename.replace("data/", "data/valid/") with open(valid_filename, "w+") as f: valid = "\n".join(valid) f.write(valid)