Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
fs / normalize_raw_data /normalize_scrolls.py
yuvalkirstain's picture
script to normalize scrolls data
523bcc3
raw
history blame
697 Bytes
import os
import shutil
from fire import Fire
from datasets import load_dataset
def normalize_example(example):
return {"source": example["input"],
"target": example["output"],
"id": example["id"],
"pid": example["pid"]}
def main(dataset_name, num_proc=5):
dataset = load_dataset("tau/scrolls", dataset_name)
dataset = dataset.map(normalize_example, num_proc=num_proc)
dir_name = f"../data/{dataset_name}"
os.makedirs(dir_name, exist_ok=True)
for split in dataset:
dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
shutil.make_archive(dir_name, 'zip', dir_name)
if __name__ == '__main__':
Fire(main)