Datasets:

Modalities:
Image
Languages:
English
DOI:
Libraries:
Datasets
License:
xhagrg commited on
Commit
eb4bbe4
·
1 Parent(s): 24aa452

Add dataset loading script.

Browse files
Files changed (1) hide show
  1. hls_burn_scars.py +92 -0
hls_burn_scars.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+
5
+
6
+ _CITATION = """\
7
+ @software{HLS_Foundation_2023,
8
+ author = {Phillips, Christopher and Roy, Sujit and Ankur, Kumar and Ramachandran, Rahul},
9
+ doi = {10.57967/hf/0956},
10
+ month = aug,
11
+ title = {{HLS Foundation Burnscars Dataset}},
12
+ url = {https://huggingface.co/ibm-nasa-geospatial/hls_burn_scars},
13
+ year = {2023}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This dataset contains Harmonized Landsat and Sentinel-2 imagery of burn scars and the associated masks for the years 2018-2021 over the contiguous United States. There are 804 512x512 scenes. Its primary purpose is for training geospatial machine learning models.
19
+ """
20
+
21
+ _HOMEPAGE = "https://huggingface.co/datasets/ibm-nasa-geospatial/hls_burn_scars"
22
+
23
+ _LICENSE = "cc-by-4.0"
24
+
25
+ _URLS = {
26
+ "burn_scars": {
27
+ "train/val": "https://huggingface.co/datasets/ibm-nasa-geospatial/hls_burn_scars/resolve/main/hls_burn_scars.tar.gz"
28
+ }
29
+ }
30
+
31
+ class HLSBurnScars(datasets.GeneratorBasedBuilder):
32
+ """MIT Scene Parsing Benchmark dataset."""
33
+
34
+ VERSION = datasets.Version("0.0.1")
35
+
36
+ BUILDER_CONFIGS = [
37
+ datasets.BuilderConfig(name="hls_burn_scars", version=VERSION, description=_DESCRIPTION),
38
+ ]
39
+
40
+ def _info(self):
41
+ features = datasets.Features(
42
+ {
43
+ "image": datasets.Image(),
44
+ "annotation": datasets.Image(),
45
+ }
46
+ )
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=features,
50
+ homepage=_HOMEPAGE,
51
+ license=_LICENSE,
52
+ citation=_CITATION,
53
+ )
54
+
55
+ def _split_generators(self, dl_manager):
56
+ urls = _URLS[self.config.name]
57
+
58
+ data_dirs = dl_manager.download_and_extract(urls)
59
+ train_data = os.path.join(data_dirs["training"], "hls_burn_scars")
60
+ val_data = os.path.join(data_dirs["validation"], "hls_burn_scars")
61
+
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ gen_kwargs={
66
+ "data": train_data,
67
+ "split": "training",
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "data": val_data,
74
+ "split": "validation",
75
+ },
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.VALIDATION,
79
+ gen_kwargs={
80
+ "data": val_data,
81
+ "split": "testing",
82
+ },
83
+ )
84
+ ]
85
+
86
+ def _generate_examples(self, data, split):
87
+ for idx, (path, file) in enumerate(data):
88
+ if path.endswith("_merged.tif"):
89
+ yield idx, {
90
+ "image": {"path": path, "bytes": file.read()},
91
+ "annotation": {"path": path.replace('_merged.tif', '.mask.tif'), "bytes": file.read()},
92
+ }