changed setup code to calculate footprints,filename
Browse files- GBI_16_4D.py +208 -0
GBI_16_4D.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
from glob import glob
|
4 |
+
import json
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
from astropy.io import fits
|
9 |
+
from astropy.wcs import WCS
|
10 |
+
import datasets
|
11 |
+
from datasets import DownloadManager
|
12 |
+
from fsspec.core import url_to_fs
|
13 |
+
|
14 |
+
|
15 |
+
_DESCRIPTION = (
|
16 |
+
"GBI-16-4D is a dataset which is part of the AstroCompress project. It contains data "
|
17 |
+
"assembled from the Sloan Digital SkySurvey (SDSS). Each FITS file contains a series "
|
18 |
+
"of 800x800 pixel uint16 observations of the same portion of the Stripe82 field, "
|
19 |
+
"taken in 5 bandpass filters (u, g, r, i, z) over time. The filenames give the "
|
20 |
+
"starting run, field, camcol of the observations, the number of filtered images per "
|
21 |
+
"timestep, and the number of timesteps. For example: "
|
22 |
+
"`cube_center_run4203_camcol6_f44_35-5-800-800.fits` contains 35 frames of 800x800 "
|
23 |
+
"pixel images in 5 bandpasses starting with run 4203, field 44, and camcol 6. "
|
24 |
+
"The images are stored in the FITS standard."
|
25 |
+
)
|
26 |
+
|
27 |
+
_HOMEPAGE = "https://google.github.io/AstroCompress"
|
28 |
+
|
29 |
+
_LICENSE = "CC BY 4.0"
|
30 |
+
|
31 |
+
_URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-4D/resolve/main/"
|
32 |
+
|
33 |
+
_URLS = {
|
34 |
+
"tiny": {
|
35 |
+
"train": "./splits/tiny_train.jsonl",
|
36 |
+
"test": "./splits/tiny_test.jsonl",
|
37 |
+
},
|
38 |
+
"full": {
|
39 |
+
"train": "./splits/full_train.jsonl",
|
40 |
+
"test": "./splits/full_test.jsonl",
|
41 |
+
}
|
42 |
+
}
|
43 |
+
|
44 |
+
_REPO_ID = "AstroCompress/GBI-16-4D"
|
45 |
+
|
46 |
+
class GBI_16_4D(datasets.GeneratorBasedBuilder):
|
47 |
+
"""GBI-16-4D Dataset"""
|
48 |
+
|
49 |
+
VERSION = datasets.Version("1.0.0")
|
50 |
+
|
51 |
+
BUILDER_CONFIGS = [
|
52 |
+
datasets.BuilderConfig(
|
53 |
+
name="tiny",
|
54 |
+
version=VERSION,
|
55 |
+
description="A small subset of the data, to test downsteam workflows.",
|
56 |
+
),
|
57 |
+
datasets.BuilderConfig(
|
58 |
+
name="full",
|
59 |
+
version=VERSION,
|
60 |
+
description="The full dataset",
|
61 |
+
),
|
62 |
+
]
|
63 |
+
|
64 |
+
DEFAULT_CONFIG_NAME = "tiny"
|
65 |
+
|
66 |
+
def __init__(self, **kwargs):
|
67 |
+
super().__init__(version=self.VERSION, **kwargs)
|
68 |
+
|
69 |
+
def _info(self):
|
70 |
+
return datasets.DatasetInfo(
|
71 |
+
description=_DESCRIPTION,
|
72 |
+
features=datasets.Features(
|
73 |
+
{
|
74 |
+
"image": datasets.Array4D(shape=(None, 5, 800, 800), dtype="uint16"),
|
75 |
+
"ra": datasets.Value("float64"),
|
76 |
+
"dec": datasets.Value("float64"),
|
77 |
+
"pixscale": datasets.Value("float64"),
|
78 |
+
"ntimes": datasets.Value("int64"),
|
79 |
+
"nbands": datasets.Value("int64"),
|
80 |
+
"image_id": datasets.Value("string"),
|
81 |
+
}
|
82 |
+
),
|
83 |
+
supervised_keys=None,
|
84 |
+
homepage=_HOMEPAGE,
|
85 |
+
license=_LICENSE,
|
86 |
+
citation="TBD",
|
87 |
+
)
|
88 |
+
|
89 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
90 |
+
|
91 |
+
ret = []
|
92 |
+
base_path = dl_manager._base_path
|
93 |
+
locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
|
94 |
+
_, path = url_to_fs(base_path)
|
95 |
+
|
96 |
+
for split in ["train", "test"]:
|
97 |
+
if locally_run:
|
98 |
+
split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split]))
|
99 |
+
split_file = dl_manager.download_and_extract(split_file_location)
|
100 |
+
else:
|
101 |
+
split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset")
|
102 |
+
with open(split_file, encoding="utf-8") as f:
|
103 |
+
data_filenames = []
|
104 |
+
data_metadata = []
|
105 |
+
for line in f:
|
106 |
+
item = json.loads(line)
|
107 |
+
data_filenames.append(item["image"])
|
108 |
+
data_metadata.append({"ra": item["ra"],
|
109 |
+
"dec": item["dec"],
|
110 |
+
"pixscale": item["pixscale"],
|
111 |
+
"ntimes": item["ntimes"],
|
112 |
+
"nbands": item["nbands"],
|
113 |
+
"image_id": item["image_id"]})
|
114 |
+
if locally_run:
|
115 |
+
data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames]
|
116 |
+
data_files = [dl_manager.download(data_url) for data_url in data_urls]
|
117 |
+
else:
|
118 |
+
data_urls = data_filenames
|
119 |
+
data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls]
|
120 |
+
ret.append(
|
121 |
+
datasets.SplitGenerator(
|
122 |
+
name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
|
123 |
+
gen_kwargs={"filepaths": data_files,
|
124 |
+
"split_file": split_file,
|
125 |
+
"split": split,
|
126 |
+
"data_metadata": data_metadata},
|
127 |
+
),
|
128 |
+
)
|
129 |
+
return ret
|
130 |
+
|
131 |
+
def _generate_examples(self, filepaths, split_file, split, data_metadata):
|
132 |
+
"""Generate GBI-16-4D examples"""
|
133 |
+
|
134 |
+
for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
|
135 |
+
task_instance_key = f"{self.config.name}-{split}-{idx}"
|
136 |
+
with fits.open(filepath, memmap=False, ignore_missing_simple=True) as hdul:
|
137 |
+
image_data = hdul[0].data.tolist()
|
138 |
+
yield task_instance_key, {**{"image": image_data}, **item}
|
139 |
+
|
140 |
+
|
141 |
+
def get_fits_footprint(fits_path):
|
142 |
+
"""
|
143 |
+
Process a FITS file to extract WCS information and calculate the footprint.
|
144 |
+
|
145 |
+
Parameters:
|
146 |
+
fits_path (str): Path to the FITS file.
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
tuple: A tuple containing the WCS footprint coordinates.
|
150 |
+
"""
|
151 |
+
with fits.open(fits_path) as hdul:
|
152 |
+
hdul[0].data = hdul[0].data[0, 0]
|
153 |
+
wcs = WCS(hdul[0].header)
|
154 |
+
shape = sorted(tuple(wcs.pixel_shape))[:2]
|
155 |
+
footprint = wcs.calc_footprint(axes=shape)
|
156 |
+
coords = list(footprint.flatten())
|
157 |
+
return coords
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
def make_split_jsonl_files(config_type="tiny", data_dir="./data",
|
162 |
+
outdir="./splits", seed=42):
|
163 |
+
"""
|
164 |
+
Create jsonl files for the GBI-16-4D dataset.
|
165 |
+
|
166 |
+
config_type: str, default="tiny"
|
167 |
+
The type of split to create. Options are "tiny" and "full".
|
168 |
+
data_dir: str, default="./data"
|
169 |
+
The directory where the FITS files are located.
|
170 |
+
outdir: str, default="./splits"
|
171 |
+
The directory where the jsonl files will be created.
|
172 |
+
seed: int, default=42
|
173 |
+
The seed for the random split.
|
174 |
+
"""
|
175 |
+
random.seed(seed)
|
176 |
+
os.makedirs(outdir, exist_ok=True)
|
177 |
+
|
178 |
+
fits_files = glob(os.path.join(data_dir, "*.fits"))
|
179 |
+
random.shuffle(fits_files)
|
180 |
+
if config_type == "tiny":
|
181 |
+
train_files = fits_files[:2]
|
182 |
+
test_files = fits_files[2:3]
|
183 |
+
elif config_type == "full":
|
184 |
+
split_idx = int(0.8 * len(fits_files))
|
185 |
+
train_files = fits_files[:split_idx]
|
186 |
+
test_files = fits_files[split_idx:]
|
187 |
+
else:
|
188 |
+
raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
|
189 |
+
|
190 |
+
def create_jsonl(files, split_name):
|
191 |
+
output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
|
192 |
+
with open(output_file, "w") as out_f:
|
193 |
+
for file in tqdm(files):
|
194 |
+
print(file, flush=True, end="...")
|
195 |
+
with fits.open(file, memmap=False, ignore_missing_simple=True) as hdul:
|
196 |
+
image_id = os.path.basename(file).split(".fits")[0]
|
197 |
+
ra = hdul[0].header.get('CRVAL1', 0)
|
198 |
+
dec = hdul[0].header.get('CRVAL2', 0)
|
199 |
+
pixscale = hdul[0].header.get('CD1_2', 0.396)
|
200 |
+
ntimes = hdul[0].data.shape[0]
|
201 |
+
nbands = hdul[0].data.shape[1]
|
202 |
+
footprint = get_fits_footprint(file)
|
203 |
+
item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec,
|
204 |
+
"pixscale": pixscale, "ntimes": ntimes, "nbands": nbands, "footprint": footprint}
|
205 |
+
out_f.write(json.dumps(item) + "\n")
|
206 |
+
|
207 |
+
create_jsonl(train_files, "train")
|
208 |
+
create_jsonl(test_files, "test")
|