rithwiks commited on
Commit
140f1f2
·
1 Parent(s): 86d3fd8

removed old filename

Browse files
Files changed (1) hide show
  1. GBI-16-4D.py +0 -184
GBI-16-4D.py DELETED
@@ -1,184 +0,0 @@
1
- import os
2
- import random
3
- from glob import glob
4
- import json
5
- from huggingface_hub import hf_hub_download
6
-
7
-
8
- from astropy.io import fits
9
- import datasets
10
- from datasets import DownloadManager
11
- from fsspec.core import url_to_fs
12
-
13
- _DESCRIPTION = (
14
- "GBI-16-4D is a dataset which is part of the AstroCompress project. It contains data "
15
- "assembled from the Sloan Digital SkySurvey (SDSS). Each FITS file contains a series "
16
- "of 800x800 pixel uint16 observations of the same portion of the Stripe82 field, "
17
- "taken in 5 bandpass filters (u, g, r, i, z) over time. The filenames give the "
18
- "starting run, field, camcol of the observations, the number of filtered images per "
19
- "timestep, and the number of timesteps. For example: "
20
- "`cube_center_run4203_camcol6_f44_35-5-800-800.fits` contains 35 frames of 800x800 "
21
- "pixel images in 5 bandpasses starting with run 4203, field 44, and camcol 6. "
22
- "The images are stored in the FITS standard."
23
- )
24
-
25
- _HOMEPAGE = "https://google.github.io/AstroCompress"
26
-
27
- _LICENSE = "CC BY 4.0"
28
-
29
- _URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-4D/resolve/main/"
30
-
31
- _URLS = {
32
- "tiny": {
33
- "train": "./splits/tiny_train.jsonl",
34
- "test": "./splits/tiny_test.jsonl",
35
- },
36
- "full": {
37
- "train": "./splits/full_train.jsonl",
38
- "test": "./splits/full_test.jsonl",
39
- }
40
- }
41
-
42
- _REPO_ID = "AstroCompress/GBI-16-4D"
43
-
44
- class GBI_16_4D(datasets.GeneratorBasedBuilder):
45
- """GBI-16-4D Dataset"""
46
-
47
- VERSION = datasets.Version("1.0.0")
48
-
49
- BUILDER_CONFIGS = [
50
- datasets.BuilderConfig(
51
- name="tiny",
52
- version=VERSION,
53
- description="A small subset of the data, to test downsteam workflows.",
54
- ),
55
- datasets.BuilderConfig(
56
- name="full",
57
- version=VERSION,
58
- description="The full dataset",
59
- ),
60
- ]
61
-
62
- DEFAULT_CONFIG_NAME = "tiny"
63
-
64
- def __init__(self, **kwargs):
65
- super().__init__(version=self.VERSION, **kwargs)
66
-
67
- def _info(self):
68
- return datasets.DatasetInfo(
69
- description=_DESCRIPTION,
70
- features=datasets.Features(
71
- {
72
- "image": datasets.Array4D(shape=(None, 5, 800, 800), dtype="uint16"),
73
- "ra": datasets.Value("float64"),
74
- "dec": datasets.Value("float64"),
75
- "pixscale": datasets.Value("float64"),
76
- "ntimes": datasets.Value("int64"),
77
- "nbands": datasets.Value("int64"),
78
- "image_id": datasets.Value("string"),
79
- }
80
- ),
81
- supervised_keys=None,
82
- homepage=_HOMEPAGE,
83
- license=_LICENSE,
84
- citation="TBD",
85
- )
86
-
87
- def _split_generators(self, dl_manager: DownloadManager):
88
-
89
- ret = []
90
- base_path = dl_manager._base_path
91
- locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
92
- _, path = url_to_fs(base_path)
93
-
94
- for split in ["train", "test"]:
95
- if locally_run:
96
- split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split]))
97
- split_file = dl_manager.download_and_extract(split_file_location)
98
- else:
99
- split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset")
100
- with open(split_file, encoding="utf-8") as f:
101
- data_filenames = []
102
- data_metadata = []
103
- for line in f:
104
- item = json.loads(line)
105
- data_filenames.append(item["image"])
106
- data_metadata.append({"ra": item["ra"],
107
- "dec": item["dec"],
108
- "pixscale": item["pixscale"],
109
- "ntimes": item["ntimes"],
110
- "nbands": item["nbands"],
111
- "image_id": item["image_id"]})
112
- if locally_run:
113
- data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames]
114
- data_files = [dl_manager.download(data_url) for data_url in data_urls]
115
- else:
116
- data_urls = data_filenames
117
- data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls]
118
- ret.append(
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
121
- gen_kwargs={"filepaths": data_files,
122
- "split_file": split_file,
123
- "split": split,
124
- "data_metadata": data_metadata},
125
- ),
126
- )
127
- return ret
128
-
129
- def _generate_examples(self, filepaths, split_file, split, data_metadata):
130
- """Generate GBI-16-4D examples"""
131
-
132
- for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
133
- task_instance_key = f"{self.config.name}-{split}-{idx}"
134
- with fits.open(filepath, memmap=False) as hdul:
135
- image_data = hdul[0].data.tolist()
136
- yield task_instance_key, {**{"image": image_data}, **item}
137
-
138
- def make_split_jsonl_files(config_type="tiny", data_dir="./data",
139
- outdir="./splits", seed=42):
140
- """
141
- Create jsonl files for the GBI-16-4D dataset.
142
-
143
- config_type: str, default="tiny"
144
- The type of split to create. Options are "tiny" and "full".
145
- data_dir: str, default="./data"
146
- The directory where the FITS files are located.
147
- outdir: str, default="./splits"
148
- The directory where the jsonl files will be created.
149
- seed: int, default=42
150
- The seed for the random split.
151
- """
152
- random.seed(seed)
153
- os.makedirs(outdir, exist_ok=True)
154
-
155
- fits_files = glob(os.path.join(data_dir, "*.fits"))
156
- random.shuffle(fits_files)
157
- if config_type == "tiny":
158
- train_files = fits_files[:2]
159
- test_files = fits_files[2:3]
160
- elif config_type == "full":
161
- split_idx = int(0.8 * len(fits_files))
162
- train_files = fits_files[:split_idx]
163
- test_files = fits_files[split_idx:]
164
- else:
165
- raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
166
-
167
- def create_jsonl(files, split_name):
168
- output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
169
- with open(output_file, "w") as out_f:
170
- for file in files:
171
- print(file, flush=True, end="...")
172
- with fits.open(file, memmap=False) as hdul:
173
- image_id = os.path.basename(file).split(".fits")[0]
174
- ra = hdul[0].header.get('CRVAL1', 0)
175
- dec = hdul[0].header.get('CRVAL2', 0)
176
- pixscale = hdul[0].header.get('CD1_2', 0.396)
177
- ntimes = hdul[0].data.shape[0]
178
- nbands = hdul[0].data.shape[1]
179
- item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec,
180
- "pixscale": pixscale, "ntimes": ntimes, "nbands": nbands}
181
- out_f.write(json.dumps(item) + "\n")
182
-
183
- create_jsonl(train_files, "train")
184
- create_jsonl(test_files, "test")