Datasets:

ArXiv:
License:
kylewhy commited on
Commit
a6afd6d
·
1 Parent(s): fc037b7

update urls

Browse files
Files changed (2) hide show
  1. CEED.py +15 -30
  2. example.py +1 -0
CEED.py CHANGED
@@ -123,25 +123,11 @@ _FILES_SC = [
123
  ]
124
 
125
  _URLS = {
126
- "station": [f"{_REPO_NC}/{x}" for x in _FILES_NC] + [f"{_REPO_SC}/{x}" for x in _FILES_SC],
127
- "event": [f"{_REPO_NC}/{x}" for x in _FILES_NC] + [f"{_REPO_SC}/{x}" for x in _FILES_SC],
128
- "station_train": [f"{_REPO_NC}/{x}" for x in _FILES_NC[:-1]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[:-1]],
129
- "event_train": [f"{_REPO_NC}/{x}" for x in _FILES_NC[:-1]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[:-1]],
130
- "station_test": [f"{_REPO_NC}/{x}" for x in _FILES_NC[-1:]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[-1:]],
131
- "event_test": [f"{_REPO_NC}/{x}" for x in _FILES_NC[-1:]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[-1:]],
132
  }
133
 
134
 
135
- class BatchBuilderConfig(datasets.BuilderConfig):
136
- """
137
- yield a batch of event-based sample, so the number of sample stations can vary among batches
138
- Batch Config for CEED
139
- """
140
-
141
- def __init__(self, **kwargs):
142
- super().__init__(**kwargs)
143
-
144
-
145
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
146
  class CEED(datasets.GeneratorBasedBuilder):
147
  """CEED: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
@@ -254,7 +240,15 @@ class CEED(datasets.GeneratorBasedBuilder):
254
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
255
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
256
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
257
- urls = _URLS[self.config.name]
 
 
 
 
 
 
 
 
258
  # files = dl_manager.download(urls)
259
  files = dl_manager.download_and_extract(urls)
260
  # files = ["waveform_h5/1989.h5", "waveform_h5/1990.h5"]
@@ -266,13 +260,13 @@ class CEED(datasets.GeneratorBasedBuilder):
266
  name=datasets.Split.TRAIN,
267
  # These kwargs will be passed to _generate_examples
268
  gen_kwargs={
269
- "filepath": files[:-1],
270
  "split": "train",
271
  },
272
  ),
273
  datasets.SplitGenerator(
274
  name=datasets.Split.TEST,
275
- gen_kwargs={"filepath": files[-1:], "split": "test"},
276
  ),
277
  ]
278
  elif self.config.name == "station_train" or self.config.name == "event_train":
@@ -319,11 +313,7 @@ class CEED(datasets.GeneratorBasedBuilder):
319
  station_ids = list(event.keys())
320
  if len(station_ids) == 0:
321
  continue
322
- if (
323
- (self.config.name == "station")
324
- or (self.config.name == "station_train")
325
- or (self.config.name == "station_test")
326
- ):
327
  waveforms = np.zeros([3, self.nt], dtype="float32")
328
 
329
  for i, sta_id in enumerate(station_ids):
@@ -349,12 +339,7 @@ class CEED(datasets.GeneratorBasedBuilder):
349
  "station_location": station_location,
350
  }
351
 
352
- elif (
353
- (self.config.name == "event")
354
- or (self.config.name == "event_train")
355
- or (self.config.name == "event_test")
356
- ):
357
-
358
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
359
  phase_type = []
360
  phase_time = []
 
123
  ]
124
 
125
  _URLS = {
126
+ "train": [f"{_REPO_NC}/{x}" for x in _FILES_NC[:-1]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[:-1]],
127
+ "test": [f"{_REPO_NC}/{x}" for x in _FILES_NC[-1:]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[-1:]],
 
 
 
 
128
  }
129
 
130
 
 
 
 
 
 
 
 
 
 
 
131
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
132
  class CEED(datasets.GeneratorBasedBuilder):
133
  """CEED: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
 
240
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
241
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
242
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
243
+ if self.config.name in ["station", "event"]:
244
+ urls = _URLS["train"] + _URLS["test"]
245
+ elif self.config.name in ["station_train", "event_train"]:
246
+ urls = _URLS["train"]
247
+ elif self.config.name in ["station_test", "event_test"]:
248
+ urls = _URLS["test"]
249
+ else:
250
+ raise ValueError("config.name is not in BUILDER_CONFIGS")
251
+
252
  # files = dl_manager.download(urls)
253
  files = dl_manager.download_and_extract(urls)
254
  # files = ["waveform_h5/1989.h5", "waveform_h5/1990.h5"]
 
260
  name=datasets.Split.TRAIN,
261
  # These kwargs will be passed to _generate_examples
262
  gen_kwargs={
263
+ "filepath": files[:-2],
264
  "split": "train",
265
  },
266
  ),
267
  datasets.SplitGenerator(
268
  name=datasets.Split.TEST,
269
+ gen_kwargs={"filepath": files[-2:], "split": "test"},
270
  ),
271
  ]
272
  elif self.config.name == "station_train" or self.config.name == "event_train":
 
313
  station_ids = list(event.keys())
314
  if len(station_ids) == 0:
315
  continue
316
+ if ("station" in self.config.name):
 
 
 
 
317
  waveforms = np.zeros([3, self.nt], dtype="float32")
318
 
319
  for i, sta_id in enumerate(station_ids):
 
339
  "station_location": station_location,
340
  }
341
 
342
+ elif ("event" in self.config.name):
 
 
 
 
 
343
  waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
344
  phase_type = []
345
  phase_time = []
example.py CHANGED
@@ -10,6 +10,7 @@ ceed = load_dataset(
10
  # name="event_test",
11
  split="test",
12
  download_mode="force_redownload",
 
13
  )
14
 
15
  # print the first sample of the iterable dataset
 
10
  # name="event_test",
11
  split="test",
12
  download_mode="force_redownload",
13
+ trust_remote_code=True,
14
  )
15
 
16
  # print the first sample of the iterable dataset