Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
File size: 697 Bytes
523bcc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import os
import shutil
from fire import Fire
from datasets import load_dataset


def normalize_example(example):
    return {"source": example["input"],
            "target": example["output"],
            "id": example["id"],
            "pid": example["pid"]}


def main(dataset_name, num_proc=5):
    dataset = load_dataset("tau/scrolls", dataset_name)
    dataset = dataset.map(normalize_example, num_proc=num_proc)
    dir_name = f"../data/{dataset_name}"
    os.makedirs(dir_name, exist_ok=True)
    for split in dataset:
        dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
    shutil.make_archive(dir_name, 'zip', dir_name)


if __name__ == '__main__':
    Fire(main)