Datasets:

Modalities:
Image
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
File size: 1,659 Bytes
6a59d9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
from datasets import load_dataset
from datasets import config
from datasets.utils.py_utils import convert_file_size_to_int
from datasets.table import embed_table_storage
from tqdm import tqdm


def build_parquet(split):
    # Source: https://discuss.huggingface.co/t/how-to-save-audio-dataset-with-parquet-format-on-disk/66179
    dataset = load_dataset("./src/LADaS.py", split=split, trust_remote_code=True)
    max_shard_size = '500MB'

    dataset_nbytes = dataset._estimate_nbytes()
    max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
    num_shards = int(dataset_nbytes / max_shard_size) + 1
    num_shards = max(num_shards, 1)
    shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))

    def shards_with_embedded_external_files(shards):
        for shard in shards:
            format = shard.format
            shard = shard.with_format("arrow")
            shard = shard.map(
                embed_table_storage,
                batched=True,
                batch_size=1000,
                keep_in_memory=True,
            )
            shard = shard.with_format(**format)
            yield shard

    shards = shards_with_embedded_external_files(shards)

    os.makedirs("data", exist_ok=True)

    for index, shard in tqdm(
        enumerate(shards),
        desc="Save the dataset shards",
        total=num_shards,
    ):
        shard_path = f"data/{split}-{index:05d}-of-{num_shards:05d}.parquet"
        shard.to_parquet(shard_path)


if __name__ == "__main__":
    build_parquet("train")
    build_parquet("validation")
    build_parquet("test")