Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
yuvalkirstain commited on
Commit
802326d
1 Parent(s): 76632e2
Files changed (2) hide show
  1. fs.py +4 -3
  2. normalize_raw_data/normalize_scrolls.py +6 -3
fs.py CHANGED
@@ -5,6 +5,7 @@
5
  import json
6
  import os
7
  import datasets
 
8
 
9
  _FS_CITATION = """
10
  TBD
@@ -120,8 +121,6 @@ class Fs(datasets.GeneratorBasedBuilder):
120
 
121
  def _split_generators(self, dl_manager):
122
  dl_dir = dl_manager.download_and_extract(self.config.data_url)
123
- task_name = _get_task_name_from_data_url(self.config.data_url)
124
- dl_dir = os.path.join(dl_dir, task_name)
125
 
126
  data_files = {} if self.config.data_files is not None else None
127
  if data_files is not None:
@@ -163,7 +162,9 @@ def _get_task_name_from_data_url(data_url):
163
  return data_url.split("/")[-1].split(".")[0]
164
 
165
 
166
- #if __name__ == '__main__':
 
 
167
  # builder = Scrolls("scrolls", "summ_screen_fd")
168
  # builder.download_and_prepare()
169
  # dataset = builder.as_dataset("validation")
 
5
  import json
6
  import os
7
  import datasets
8
+ from datasets import load_dataset
9
 
10
  _FS_CITATION = """
11
  TBD
 
121
 
122
  def _split_generators(self, dl_manager):
123
  dl_dir = dl_manager.download_and_extract(self.config.data_url)
 
 
124
 
125
  data_files = {} if self.config.data_files is not None else None
126
  if data_files is not None:
 
162
  return data_url.split("/")[-1].split(".")[0]
163
 
164
 
165
+ if __name__ == '__main__':
166
+ dataset = load_dataset("tau/fs", "summ_screen_fd")
167
+ x = 5
168
  # builder = Scrolls("scrolls", "summ_screen_fd")
169
  # builder.download_and_prepare()
170
  # dataset = builder.as_dataset("validation")
normalize_raw_data/normalize_scrolls.py CHANGED
@@ -8,15 +8,18 @@ def normalize_example(example):
8
  return {"source": example["input"], "target": example["output"]}
9
 
10
 
11
- def main(dataset_name, num_proc=5):
12
  dataset = load_dataset("tau/scrolls", dataset_name)
13
  dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
14
  ic(dataset_name, dataset["train"][0])
15
- dir_name = f"../data/{dataset_name}"
16
  os.makedirs(dir_name, exist_ok=True)
17
  for split in dataset:
18
  dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
19
- shutil.make_archive(dir_name, 'zip', dir_name)
 
 
 
20
 
21
 
22
  if __name__ == '__main__':
 
8
  return {"source": example["input"], "target": example["output"]}
9
 
10
 
11
+ def main(dataset_name, num_proc=5, data_dir="../data/"):
12
  dataset = load_dataset("tau/scrolls", dataset_name)
13
  dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
14
  ic(dataset_name, dataset["train"][0])
15
+ dir_name = os.path.join(data_dir, dataset_name)
16
  os.makedirs(dir_name, exist_ok=True)
17
  for split in dataset:
18
  dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
19
+ shutil.make_archive(base_name=dir_name,
20
+ format='zip',
21
+ root_dir=data_dir)
22
+ shutil.rmtree(dir_name)
23
 
24
 
25
  if __name__ == '__main__':