rithwiks commited on
Commit
35db85e
·
2 Parent(s): 352e0d3 3b9a488

Merge branch 'main' of https://huggingface.co/datasets/AstroCompress/SBI-16-3D into main

Browse files
Files changed (2) hide show
  1. README.md +10 -2
  2. SBI-16-3D.py +152 -25
README.md CHANGED
@@ -14,7 +14,7 @@ dataset_info:
14
  shape:
15
  - 2048
16
  - 2048
17
- dtype: uint16
18
  - name: ra
19
  dtype: float64
20
  - name: dec
@@ -69,8 +69,16 @@ Then in your python script:
69
 
70
  ```python
71
  from datasets import load_dataset
 
72
  dataset = load_dataset("AstroCompress/SBI-16-3D", "tiny")
73
- ds = dataset.with_format("np")
 
 
 
 
 
 
 
74
  ```
75
 
76
  ## Local Use
 
14
  shape:
15
  - 2048
16
  - 2048
17
+ dtype: uint8
18
  - name: ra
19
  dtype: float64
20
  - name: dec
 
69
 
70
  ```python
71
  from datasets import load_dataset
72
+ import numpy
73
  dataset = load_dataset("AstroCompress/SBI-16-3D", "tiny")
74
+ ds = dataset.with_format("np", columns=["image"], dtype=numpy.uint8)
75
+
76
+ # or torch
77
+ import torch
78
+ dst = dataset.with_format("torch", columns=["image"], dtype=torch.uint8)
79
+
80
+ # or pandas
81
+ dsp = dataset.with_format("pandas", columns=["image"], dtype=numpy.uint8)
82
  ```
83
 
84
  ## Local Use
SBI-16-3D.py CHANGED
@@ -13,9 +13,9 @@ from datasets import DownloadManager
13
  from fsspec.core import url_to_fs
14
 
15
  _DESCRIPTION = (
16
- """SBI-16-3D is a dataset which is part of the AstroCompress project. """
17
- """It contains data assembled from the James Webb Space Telescope (JWST). """
18
- """<TODO>Describe data format</TODO>"""
19
  )
20
 
21
  _HOMEPAGE = "https://google.github.io/AstroCompress"
@@ -32,15 +32,16 @@ _URLS = {
32
  "full": {
33
  "train": "./splits/full_train.jsonl",
34
  "test": "./splits/full_test.jsonl",
35
- }
36
  }
37
 
38
  _REPO_ID = "AstroCompress/SBI-16-3D"
39
 
40
- class SBI_16_4D(datasets.GeneratorBasedBuilder):
41
- """SBI-16-4D Dataset"""
42
 
43
- VERSION = datasets.Version("1.0.1")
 
 
 
44
 
45
  BUILDER_CONFIGS = [
46
  datasets.BuilderConfig(
@@ -83,39 +84,65 @@ class SBI_16_4D(datasets.GeneratorBasedBuilder):
83
 
84
  ret = []
85
  base_path = dl_manager._base_path
86
- locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
87
  _, path = url_to_fs(base_path)
88
 
89
  for split in ["train", "test"]:
90
  if locally_run:
91
- split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split]))
 
 
92
  split_file = dl_manager.download_and_extract(split_file_location)
93
  else:
94
- split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset")
 
 
 
 
95
  with open(split_file, encoding="utf-8") as f:
96
  data_filenames = []
97
  data_metadata = []
98
  for line in f:
99
  item = json.loads(line)
100
  data_filenames.append(item["image"])
101
- data_metadata.append({"ra": item["ra"],
102
- "dec": item["dec"],
103
- "pixscale": item["pixscale"],
104
- "ntimes": item["ntimes"],
105
- "image_id": item["image_id"]})
 
 
 
 
106
  if locally_run:
107
- data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames]
108
- data_files = [dl_manager.download(data_url) for data_url in data_urls]
 
 
 
 
 
109
  else:
110
  data_urls = data_filenames
111
- data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls]
 
 
 
 
 
112
  ret.append(
113
  datasets.SplitGenerator(
114
- name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
115
- gen_kwargs={"filepaths": data_files,
116
- "split_file": split_file,
117
- "split": split,
118
- "data_metadata": data_metadata},
 
 
 
 
 
 
119
  ),
120
  )
121
  return ret
@@ -128,5 +155,105 @@ class SBI_16_4D(datasets.GeneratorBasedBuilder):
128
  with fits.open(filepath, memmap=False) as hdul:
129
  # the first axis is integrations one, so we take the first element
130
  # the second axis is the groups (time) axis and varies between images
131
- image_data = hdul["SCI"].data[0,:,:,:].tolist()
132
- yield task_instance_key, {**{"image": image_data}, **item}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  from fsspec.core import url_to_fs
14
 
15
  _DESCRIPTION = (
16
+ """SBI-16-3D is a dataset which is part of the AstroCompress project. """
17
+ """It contains data assembled from the James Webb Space Telescope (JWST). """
18
+ """<TODO>Describe data format</TODO>"""
19
  )
20
 
21
  _HOMEPAGE = "https://google.github.io/AstroCompress"
 
32
  "full": {
33
  "train": "./splits/full_train.jsonl",
34
  "test": "./splits/full_test.jsonl",
35
+ },
36
  }
37
 
38
  _REPO_ID = "AstroCompress/SBI-16-3D"
39
 
 
 
40
 
41
+ class SBI_16_3D(datasets.GeneratorBasedBuilder):
42
+ """SBI-16-3D Dataset"""
43
+
44
+ VERSION = datasets.Version("1.0.2")
45
 
46
  BUILDER_CONFIGS = [
47
  datasets.BuilderConfig(
 
84
 
85
  ret = []
86
  base_path = dl_manager._base_path
87
+ locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
88
  _, path = url_to_fs(base_path)
89
 
90
  for split in ["train", "test"]:
91
  if locally_run:
92
+ split_file_location = os.path.normpath(
93
+ os.path.join(path, _URLS[self.config.name][split])
94
+ )
95
  split_file = dl_manager.download_and_extract(split_file_location)
96
  else:
97
+ split_file = hf_hub_download(
98
+ repo_id=_REPO_ID,
99
+ filename=_URLS[self.config.name][split],
100
+ repo_type="dataset",
101
+ )
102
  with open(split_file, encoding="utf-8") as f:
103
  data_filenames = []
104
  data_metadata = []
105
  for line in f:
106
  item = json.loads(line)
107
  data_filenames.append(item["image"])
108
+ data_metadata.append(
109
+ {
110
+ "ra": item["ra"],
111
+ "dec": item["dec"],
112
+ "pixscale": item["pixscale"],
113
+ "ntimes": item["ntimes"],
114
+ "image_id": item["image_id"],
115
+ }
116
+ )
117
  if locally_run:
118
+ data_urls = [
119
+ os.path.normpath(os.path.join(path, data_filename))
120
+ for data_filename in data_filenames
121
+ ]
122
+ data_files = [
123
+ dl_manager.download(data_url) for data_url in data_urls
124
+ ]
125
  else:
126
  data_urls = data_filenames
127
+ data_files = [
128
+ hf_hub_download(
129
+ repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
130
+ )
131
+ for data_url in data_urls
132
+ ]
133
  ret.append(
134
  datasets.SplitGenerator(
135
+ name=(
136
+ datasets.Split.TRAIN
137
+ if split == "train"
138
+ else datasets.Split.TEST
139
+ ),
140
+ gen_kwargs={
141
+ "filepaths": data_files,
142
+ "split_file": split_file,
143
+ "split": split,
144
+ "data_metadata": data_metadata,
145
+ },
146
  ),
147
  )
148
  return ret
 
155
  with fits.open(filepath, memmap=False) as hdul:
156
  # the first axis is integrations one, so we take the first element
157
  # the second axis is the groups (time) axis and varies between images
158
+ image_data = hdul["SCI"].data[0, :, :, :] # .tolist()
159
+ yield task_instance_key, {**{"image": image_data}, **item}
160
+
161
+
162
+ def get_fits_footprint(fits_path):
163
+ """
164
+ Process a FITS file to extract WCS information and calculate the footprint.
165
+
166
+ Parameters:
167
+ fits_path (str): Path to the FITS file.
168
+
169
+ Returns:
170
+ tuple: A tuple containing the WCS footprint coordinates.
171
+ """
172
+ with fits.open(fits_path) as hdul:
173
+ hdul[1].data = hdul[1].data[0, 0]
174
+ wcs = WCS(hdul[1].header)
175
+ shape = sorted(tuple(wcs.pixel_shape))[:2]
176
+ footprint = wcs.calc_footprint(axes=shape)
177
+ coords = list(footprint.flatten())
178
+ return coords
179
+
180
+
181
+ def calculate_pixel_scale(header):
182
+ """
183
+ Calculate the pixel scale in arcseconds per pixel from a FITS header.
184
+
185
+ Parameters:
186
+ header (astropy.io.fits.header.Header): The FITS header containing WCS information.
187
+
188
+ Returns:
189
+ Mean of the pixel scales in x and y.
190
+ """
191
+
192
+ # Calculate the pixel scales in arcseconds per pixel
193
+ pixscale_x = header.get("CDELT1", np.nan)
194
+ pixscale_y = header.get("CDELT2", np.nan)
195
+
196
+ return np.mean([pixscale_x, pixscale_y])
197
+
198
+
199
+ def make_split_jsonl_files(
200
+ config_type="tiny", data_dir="./data", outdir="./splits", seed=42
201
+ ):
202
+ """
203
+ Create jsonl files for the SBI-16-3D dataset.
204
+
205
+ config_type: str, default="tiny"
206
+ The type of split to create. Options are "tiny" and "full".
207
+ data_dir: str, default="./data"
208
+ The directory where the FITS files are located.
209
+ outdir: str, default="./splits"
210
+ The directory where the jsonl files will be created.
211
+ seed: int, default=42
212
+ The seed for the random split.
213
+ """
214
+ random.seed(seed)
215
+ os.makedirs(outdir, exist_ok=True)
216
+
217
+ fits_files = glob(os.path.join(data_dir, "*.fits"))
218
+ random.shuffle(fits_files)
219
+ if config_type == "tiny":
220
+ train_files = fits_files[:2]
221
+ test_files = fits_files[2:3]
222
+ elif config_type == "full":
223
+ split_idx = int(0.8 * len(fits_files))
224
+ train_files = fits_files[:split_idx]
225
+ test_files = fits_files[split_idx:]
226
+ else:
227
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
228
+
229
+ def create_jsonl(files, split_name):
230
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
231
+ with open(output_file, "w") as out_f:
232
+ for file in tqdm(files):
233
+ # print(file, flush=True, end="...")
234
+ with fits.open(file, memmap=False) as hdul:
235
+ image_id = os.path.basename(file).split(".fits")[0]
236
+ ra = hdul["SCI"].header.get("CRVAL1", 0)
237
+ dec = hdul["SCI"].header.get("CRVAL2", 0)
238
+ pixscale = calculate_pixel_scale(hdul["SCI"].header)
239
+ footprint = get_fits_footprint(file)
240
+ # get the number of groups per int
241
+ ntimes = hdul["SCI"].data.shape[1]
242
+ item = {
243
+ "image_id": image_id,
244
+ "image": file,
245
+ "ra": ra,
246
+ "dec": dec,
247
+ "pixscale": pixscale,
248
+ "ntimes": ntimes,
249
+ "footprint": footprint,
250
+ }
251
+ out_f.write(json.dumps(item) + "\n")
252
+
253
+ create_jsonl(train_files, "train")
254
+ create_jsonl(test_files, "test")
255
+
256
+
257
+ if __name__ == "__main__":
258
+ make_split_jsonl_files("tiny")
259
+ make_split_jsonl_files("full")