|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
import h5py |
|
import numpy as np |
|
|
|
|
|
_CITATION = """\ |
|
WIP |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
QuakeSet is a dataset of earthquake images from the Copernicus Sentinel-1 satellites. |
|
It contains images from before, after an earthquake, and a sample before the "before" sample. |
|
Ground truth contains magnitudes and locations of earthquakes provided by ISC. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/DarthReca/quakeset" |
|
|
|
_LICENSE = "OPENRAIL" |
|
|
|
|
|
|
|
_URLS = "earthquakes.h5" |
|
|
|
|
|
class QuakeSet(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="default", |
|
version=VERSION, |
|
description="Default configuration. No other configuration is available", |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"pre_post_image": datasets.Array3D( |
|
shape=(512, 513, 2), dtype="float32" |
|
), |
|
"affected": datasets.ClassLabel(num_classes=2), |
|
"magnitude": datasets.Value("float32"), |
|
"hypocenter": datasets.Sequence(datasets.Value("float32"), length=3), |
|
"epsg": datasets.Value("int32"), |
|
"x": datasets.Sequence(datasets.Value("float32"), length=512), |
|
"y": datasets.Sequence(datasets.Value("float32"), length=512), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
urls = _URLS |
|
data_dir = dl_manager.download(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "earthquakes.h5"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "earthquakes.h5"), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "earthquakes.h5"), |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
sample_ids = [] |
|
with h5py.File(filepath) as f: |
|
for key, patches in f.items(): |
|
attributes = dict(f[key].attrs) |
|
if attributes["split"] != split: |
|
continue |
|
sample_ids += [(f"{key}/{p}", 1, attributes) for p in patches.keys()] |
|
sample_ids += [ |
|
(f"{key}/{p}", 0, attributes) |
|
for p, v in patches.items() |
|
if "before" in v |
|
] |
|
|
|
for sample_id, label, attributes in sample_ids: |
|
if "x" in sample_id or "y" in sample_id: |
|
continue |
|
|
|
resource_id, patch_id = sample_id.split("/") |
|
x = f[resource_id]["x"][...] |
|
y = f[resource_id]["y"][...] |
|
x_start = int(patch_id.split("_")[1]) % (x.shape[0] // 512) |
|
y_start = int(patch_id.split("_")[1]) // (x.shape[0] // 512) |
|
x = x[x_start * 512 : (x_start + 1) * 512] |
|
y = y[y_start * 512 : (y_start + 1) * 512] |
|
|
|
pre_key = "pre" if label == 1 else "before" |
|
post_key = "post" if label == 1 else "pre" |
|
pre_sample = f[sample_id][pre_key][...] |
|
post_sample = f[sample_id][post_key][...] |
|
pre_sample = np.nan_to_num(pre_sample, nan=0).transpose(2, 0, 1) |
|
post_sample = np.nan_to_num(post_sample, nan=0).transpose(2, 0, 1) |
|
sample = np.concatenate( |
|
[pre_sample, post_sample], axis=0, dtype=np.float32 |
|
) |
|
|
|
yield f"{sample_id}/{post_key}", { |
|
"pre_post_image": sample, |
|
"affected": label, |
|
"magnitude": np.float32(attributes["magnitude"]), |
|
"hypocenter": attributes["hypocenter"], |
|
"epsg": attributes["epsg"], |
|
"x": x.flatten(), |
|
"y": y.flatten(), |
|
} |
|
|