Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
File size: 1,608 Bytes
5926a2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import json
from datasets import load_from_disk, load_dataset, DatasetDict
from util import PARTITIONING_CATS


def download_dir(repo_name: str = "persius/hicric", output_dir="./arrow_data"):
    """Download the dir from HF hub without cloning, if you like, and save locally."""
    ds_dict = DatasetDict()
    for split in PARTITIONING_CATS:
        ds = load_dataset(repo_name, name=split)
        ds_dict[split] = ds
    ds_dict.save_to_disk(output_dir)
    return None


def repopulate_dir(
    hf_data_dir: str = "./arrow_data", rehydrate_target_dir: str = "./data/processed"
):
    """Rehydrate the HICRIC processed data dir from the HF Dataset.

    This hydrates the data in the same format in which it was/is originally produced in
    the HICRIC repository's code.
    """

    for split in PARTITIONING_CATS:
        dataset = load_from_disk(os.path.join(hf_data_dir, split, "train"))
        # Get individual lines
        for instance in dataset:
            # Extract the output file/directory associated with line
            rel_path = instance["relative_path"]
            output_file_path = os.path.join(rehydrate_target_dir, rel_path)
            output_directory = os.path.join(
                rehydrate_target_dir, os.path.dirname(rel_path)
            )
            os.makedirs(output_directory, exist_ok=True)

            with open(output_file_path, "a") as writer:
                writer.write(json.dumps(instance) + "\n")

    print(f"Repopulated data saved to {rehydrate_target_dir}")
    return None


if __name__ == "__main__":
    download_dir()
    repopulate_dir()