varora commited on
Commit
62d52d8
·
verified ·
1 Parent(s): 96bd861

Update hit.py

Browse files
Files changed (1) hide show
  1. hit.py +2 -0
hit.py CHANGED
@@ -111,6 +111,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
111
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
112
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
113
  rel_path = _PATHS[self.config.name]
 
114
  return [
115
  datasets.SplitGenerator(
116
  name=datasets.Split.TRAIN,
@@ -144,6 +145,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
144
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
145
  # List all files in the path .gz
146
  files = glob(filepath)
 
147
  for subject_path in files:
148
  with gzip.open(subject_path, 'rb') as f:
149
  data = pickle.load(f)
 
111
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
112
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
113
  rel_path = _PATHS[self.config.name]
114
+ print(os.path.join(rel_path, "train", "*.gz"))
115
  return [
116
  datasets.SplitGenerator(
117
  name=datasets.Split.TRAIN,
 
145
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
146
  # List all files in the path .gz
147
  files = glob(filepath)
148
+ print(files)
149
  for subject_path in files:
150
  with gzip.open(subject_path, 'rb') as f:
151
  data = pickle.load(f)