Datasets:

ArXiv:
License:
zhuwq0 commited on
Commit
7e5ff96
·
1 Parent(s): e0ff225

add config files

Browse files
Files changed (2) hide show
  1. README.md +2 -0
  2. ceed.py +387 -0
README.md CHANGED
@@ -1,3 +1,5 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+
5
+ ## CEED: *C*alifornia *E*arthquak*E* *D*ataset for Machine Learning and Cloud Computing
ceed.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO: Address all TODOs and remove all explanatory comments
16
+ # Lint as: python3
17
+ """CEED: California Earthquake Dataset for Machine Learning and Cloud Computing"""
18
+
19
+
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import datasets
23
+ import fsspec
24
+ import h5py
25
+ import numpy as np
26
+ import torch
27
+
28
+ # TODO: Add BibTeX citation
29
+ # Find for instance the citation on arxiv or on the dataset repo/website
30
+ _CITATION = """\
31
+ @InProceedings{huggingface:dataset,
32
+ title = {CEED: California Earthquake Dataset for Machine Learning and Cloud Computing},
33
+ author={Zhu et al.},
34
+ year={2025}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _REPO_NC = ("https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/waveform_h5",)
54
+ _FILES_NC = [
55
+ "1987.h5",
56
+ "1988.h5",
57
+ "1989.h5",
58
+ "1990.h5",
59
+ "1991.h5",
60
+ "1992.h5",
61
+ "1993.h5",
62
+ "1994.h5",
63
+ "1995.h5",
64
+ "1996.h5",
65
+ "1997.h5",
66
+ "1998.h5",
67
+ "1999.h5",
68
+ "2000.h5",
69
+ "2001.h5",
70
+ "2002.h5",
71
+ "2003.h5",
72
+ "2004.h5",
73
+ "2005.h5",
74
+ "2006.h5",
75
+ "2007.h5",
76
+ "2008.h5",
77
+ "2009.h5",
78
+ "2010.h5",
79
+ "2011.h5",
80
+ "2012.h5",
81
+ "2013.h5",
82
+ "2014.h5",
83
+ "2015.h5",
84
+ "2016.h5",
85
+ "2017.h5",
86
+ "2018.h5",
87
+ "2019.h5",
88
+ "2020.h5",
89
+ "2021.h5",
90
+ "2022.h5",
91
+ "2023.h5",
92
+ ]
93
+ _REPO_SC = "https://huggingface.co/datasets/AI4EPS/quakeflow_sc/resolve/main/waveform_h5"
94
+ _FILES_SC = [
95
+ "1999.h5",
96
+ "2000.h5",
97
+ "2001.h5",
98
+ "2002.h5",
99
+ "2003.h5",
100
+ "2004.h5",
101
+ "2005.h5",
102
+ "2006.h5",
103
+ "2007.h5",
104
+ "2008.h5",
105
+ "2009.h5",
106
+ "2010.h5",
107
+ "2011.h5",
108
+ "2012.h5",
109
+ "2013.h5",
110
+ "2014.h5",
111
+ "2015.h5",
112
+ "2016.h5",
113
+ "2017.h5",
114
+ "2018.h5",
115
+ "2019_0.h5",
116
+ "2019_1.h5",
117
+ "2019_2.h5",
118
+ "2020_0.h5",
119
+ "2020_1.h5",
120
+ "2021.h5",
121
+ "2022.h5",
122
+ "2023.h5",
123
+ ]
124
+
125
+ _URLS = {
126
+ "station": [f"{_REPO_NC}/{x}" for x in _FILES_NC] + [f"{_REPO_SC}/{x}" for x in _FILES_SC],
127
+ "event": [f"{_REPO_NC}/{x}" for x in _FILES_NC] + [f"{_REPO_SC}/{x}" for x in _FILES_SC],
128
+ "station_train": [f"{_REPO_NC}/{x}" for x in _FILES_NC[:-1]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[:-1]],
129
+ "event_train": [f"{_REPO_NC}/{x}" for x in _FILES_NC[:-1]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[:-1]],
130
+ "station_test": [f"{_REPO_NC}/{x}" for x in _FILES_NC[-1:]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[-1:]],
131
+ "event_test": [f"{_REPO_NC}/{x}" for x in _FILES_NC[-1:]] + [f"{_REPO_SC}/{x}" for x in _FILES_SC[-1:]],
132
+ }
133
+
134
+
135
+ class BatchBuilderConfig(datasets.BuilderConfig):
136
+ """
137
+ yield a batch of event-based sample, so the number of sample stations can vary among batches
138
+ Batch Config for CEED
139
+ """
140
+
141
+ def __init__(self, **kwargs):
142
+ super().__init__(**kwargs)
143
+
144
+
145
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
146
+ class CEED(datasets.GeneratorBasedBuilder):
147
+ """CEED: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
148
+
149
+ VERSION = datasets.Version("1.1.0")
150
+
151
+ nt = 8192
152
+
153
+ # This is an example of a dataset with multiple configurations.
154
+ # If you don't want/need to define several sub-sets in your dataset,
155
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
156
+
157
+ # If you need to make complex sub-parts in the datasets with configurable options
158
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
159
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
160
+
161
+ # You will be able to load one or the other configurations in the following list with
162
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
163
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
164
+
165
+ # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
166
+ BUILDER_CONFIGS = [
167
+ datasets.BuilderConfig(
168
+ name="station", version=VERSION, description="yield station-based samples one by one of whole dataset"
169
+ ),
170
+ datasets.BuilderConfig(
171
+ name="event", version=VERSION, description="yield event-based samples one by one of whole dataset"
172
+ ),
173
+ datasets.BuilderConfig(
174
+ name="station_train",
175
+ version=VERSION,
176
+ description="yield station-based samples one by one of training dataset",
177
+ ),
178
+ datasets.BuilderConfig(
179
+ name="event_train", version=VERSION, description="yield event-based samples one by one of training dataset"
180
+ ),
181
+ datasets.BuilderConfig(
182
+ name="station_test", version=VERSION, description="yield station-based samples one by one of test dataset"
183
+ ),
184
+ datasets.BuilderConfig(
185
+ name="event_test", version=VERSION, description="yield event-based samples one by one of test dataset"
186
+ ),
187
+ ]
188
+
189
+ DEFAULT_CONFIG_NAME = (
190
+ "station_test" # It's not mandatory to have a default configuration. Just use one if it make sense.
191
+ )
192
+
193
+ def _info(self):
194
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
195
+ if (
196
+ (self.config.name == "station")
197
+ or (self.config.name == "station_train")
198
+ or (self.config.name == "station_test")
199
+ ):
200
+ features = datasets.Features(
201
+ {
202
+ "data": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
203
+ "phase_time": datasets.Sequence(datasets.Value("string")),
204
+ "phase_index": datasets.Sequence(datasets.Value("int32")),
205
+ "phase_type": datasets.Sequence(datasets.Value("string")),
206
+ "phase_polarity": datasets.Sequence(datasets.Value("string")),
207
+ "begin_time": datasets.Value("string"),
208
+ "end_time": datasets.Value("string"),
209
+ "event_time": datasets.Value("string"),
210
+ "event_time_index": datasets.Value("int32"),
211
+ "event_location": datasets.Sequence(datasets.Value("float32")),
212
+ "station_location": datasets.Sequence(datasets.Value("float32")),
213
+ },
214
+ )
215
+ elif (self.config.name == "event") or (self.config.name == "event_train") or (self.config.name == "event_test"):
216
+ features = datasets.Features(
217
+ {
218
+ "data": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
219
+ "phase_time": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
220
+ "phase_index": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
221
+ "phase_type": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
222
+ "phase_polarity": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
223
+ "begin_time": datasets.Value("string"),
224
+ "end_time": datasets.Value("string"),
225
+ "event_time": datasets.Value("string"),
226
+ "event_time_index": datasets.Value("int32"),
227
+ "event_location": datasets.Sequence(datasets.Value("float32")),
228
+ "station_location": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
229
+ },
230
+ )
231
+ else:
232
+ raise ValueError(f"config.name = {self.config.name} is not in BUILDER_CONFIGS")
233
+
234
+ return datasets.DatasetInfo(
235
+ # This is the description that will appear on the datasets page.
236
+ description=_DESCRIPTION,
237
+ # This defines the different columns of the dataset and their types
238
+ features=features, # Here we define them above because they are different between the two configurations
239
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
240
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
241
+ # supervised_keys=("sentence", "label"),
242
+ # Homepage of the dataset for documentation
243
+ homepage=_HOMEPAGE,
244
+ # License for the dataset if available
245
+ license=_LICENSE,
246
+ # Citation for the dataset
247
+ citation=_CITATION,
248
+ )
249
+
250
+ def _split_generators(self, dl_manager):
251
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
252
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
253
+
254
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
255
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
256
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
257
+ urls = _URLS[self.config.name]
258
+ # files = dl_manager.download(urls)
259
+ files = dl_manager.download_and_extract(urls)
260
+ # files = ["waveform_h5/1989.h5", "waveform_h5/1990.h5"]
261
+ print(files)
262
+
263
+ if self.config.name == "station" or self.config.name == "event":
264
+ return [
265
+ datasets.SplitGenerator(
266
+ name=datasets.Split.TRAIN,
267
+ # These kwargs will be passed to _generate_examples
268
+ gen_kwargs={
269
+ "filepath": files[:-1],
270
+ "split": "train",
271
+ },
272
+ ),
273
+ datasets.SplitGenerator(
274
+ name=datasets.Split.TEST,
275
+ gen_kwargs={"filepath": files[-1:], "split": "test"},
276
+ ),
277
+ ]
278
+ elif self.config.name == "station_train" or self.config.name == "event_train":
279
+ return [
280
+ datasets.SplitGenerator(
281
+ name=datasets.Split.TRAIN,
282
+ gen_kwargs={
283
+ "filepath": files,
284
+ "split": "train",
285
+ },
286
+ ),
287
+ ]
288
+ elif self.config.name == "station_test" or self.config.name == "event_test":
289
+ return [
290
+ datasets.SplitGenerator(
291
+ name=datasets.Split.TEST,
292
+ gen_kwargs={"filepath": files, "split": "test"},
293
+ ),
294
+ ]
295
+ else:
296
+ raise ValueError("config.name is not in BUILDER_CONFIGS")
297
+
298
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
299
+ def _generate_examples(self, filepath, split):
300
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
301
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
302
+
303
+ for file in filepath:
304
+ with fsspec.open(file, "rb") as fs:
305
+ with h5py.File(fs, "r") as fp:
306
+ event_ids = list(fp.keys())
307
+ for event_id in event_ids:
308
+ event = fp[event_id]
309
+ event_attrs = event.attrs
310
+ begin_time = event_attrs["begin_time"]
311
+ end_time = event_attrs["end_time"]
312
+ event_location = [
313
+ event_attrs["longitude"],
314
+ event_attrs["latitude"],
315
+ event_attrs["depth_km"],
316
+ ]
317
+ event_time = event_attrs["event_time"]
318
+ event_time_index = event_attrs["event_time_index"]
319
+ station_ids = list(event.keys())
320
+ if len(station_ids) == 0:
321
+ continue
322
+ if (
323
+ (self.config.name == "station")
324
+ or (self.config.name == "station_train")
325
+ or (self.config.name == "station_test")
326
+ ):
327
+ waveforms = np.zeros([3, self.nt], dtype="float32")
328
+
329
+ for i, sta_id in enumerate(station_ids):
330
+ waveforms[:, : self.nt] = event[sta_id][:, : self.nt]
331
+ attrs = event[sta_id].attrs
332
+ phase_type = attrs["phase_type"]
333
+ phase_time = attrs["phase_time"]
334
+ phase_index = attrs["phase_index"]
335
+ phase_polarity = attrs["phase_polarity"]
336
+ station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
337
+
338
+ yield f"{event_id}/{sta_id}", {
339
+ "data": waveforms,
340
+ "phase_time": phase_time,
341
+ "phase_index": phase_index,
342
+ "phase_type": phase_type,
343
+ "phase_polarity": phase_polarity,
344
+ "begin_time": begin_time,
345
+ "end_time": end_time,
346
+ "event_time": event_time,
347
+ "event_time_index": event_time_index,
348
+ "event_location": event_location,
349
+ "station_location": station_location,
350
+ }
351
+
352
+ elif (
353
+ (self.config.name == "event")
354
+ or (self.config.name == "event_train")
355
+ or (self.config.name == "event_test")
356
+ ):
357
+
358
+ waveforms = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
359
+ phase_type = []
360
+ phase_time = []
361
+ phase_index = []
362
+ phase_polarity = []
363
+ station_location = []
364
+
365
+ for i, sta_id in enumerate(station_ids):
366
+ waveforms[i, :, : self.nt] = event[sta_id][:, : self.nt]
367
+ attrs = event[sta_id].attrs
368
+ phase_type.append(list(attrs["phase_type"]))
369
+ phase_time.append(list(attrs["phase_time"]))
370
+ phase_index.append(list(attrs["phase_index"]))
371
+ phase_polarity.append(list(attrs["phase_polarity"]))
372
+ station_location.append(
373
+ [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
374
+ )
375
+ yield event_id, {
376
+ "data": waveforms,
377
+ "phase_time": phase_time,
378
+ "phase_index": phase_index,
379
+ "phase_type": phase_type,
380
+ "phase_polarity": phase_polarity,
381
+ "begin_time": begin_time,
382
+ "end_time": end_time,
383
+ "event_time": event_time,
384
+ "event_time_index": event_time_index,
385
+ "event_location": event_location,
386
+ "station_location": station_location,
387
+ }