Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
yuvalkirstain commited on
Commit
e7bb7b0
1 Parent(s): 868b3d1

pass tokenizer

Browse files
normalize_raw_data/normalize_scrolls.py CHANGED
@@ -11,14 +11,14 @@ def normalize_example(example):
11
  def main(dataset_name, num_proc=5, data_dir="../data/"):
12
  dataset = load_dataset("tau/scrolls", dataset_name)
13
  dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
14
- ic(dataset_name, dataset["train"][0])
15
  dir_name = os.path.join(data_dir, dataset_name)
16
  os.makedirs(dir_name, exist_ok=True)
17
  for split in dataset:
18
  dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
19
  shutil.make_archive(base_name=dir_name,
20
  format='zip',
21
- root_dir=data_dir)
22
  shutil.rmtree(dir_name)
23
 
24
 
 
11
  def main(dataset_name, num_proc=5, data_dir="../data/"):
12
  dataset = load_dataset("tau/scrolls", dataset_name)
13
  dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
14
+ # ic(dataset_name, dataset["train"][0])
15
  dir_name = os.path.join(data_dir, dataset_name)
16
  os.makedirs(dir_name, exist_ok=True)
17
  for split in dataset:
18
  dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
19
  shutil.make_archive(base_name=dir_name,
20
  format='zip',
21
+ root_dir=dir_name)
22
  shutil.rmtree(dir_name)
23
 
24