jbloom commited on
Commit
13d6592
·
1 Parent(s): 6679da0

initial commit

Browse files
Files changed (3) hide show
  1. .gitattributes +2 -0
  2. GBI-16-2D.py +208 -0
  3. README.md +70 -3
.gitattributes CHANGED
@@ -49,6 +49,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
 
 
52
  # Image files - compressed
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ *.fits filter=lfs diff=lfs merge=lfs -text
53
+ *.fit filter=lfs diff=lfs merge=lfs -text
54
  # Image files - compressed
55
  *.jpg filter=lfs diff=lfs merge=lfs -text
56
  *.jpeg filter=lfs diff=lfs merge=lfs -text
GBI-16-2D.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+
8
+ from astropy.io import fits
9
+ import datasets
10
+ from datasets import DownloadManager
11
+ from fsspec.core import url_to_fs
12
+
13
+ _DESCRIPTION = (
14
+ """SBI-16-2D is a dataset which is part of the AstroCompress project. """
15
+ """It contains data assembled from the Keck Telescope. """
16
+ """<TODO>Describe data format</TODO>"""
17
+ )
18
+
19
+ _HOMEPAGE = "https://google.github.io/AstroCompress"
20
+
21
+ _LICENSE = "CC BY 4.0"
22
+
23
+ _URL = "https://huggingface.co/datasets/AstroCompress/GBI-16-2D/resolve/main/"
24
+
25
+ _URLS = {
26
+ "tiny": {
27
+ "train": "./splits/tiny_train.jsonl",
28
+ "test": "./splits/tiny_test.jsonl",
29
+ },
30
+ "full": {
31
+ "train": "./splits/full_train.jsonl",
32
+ "test": "./splits/full_test.jsonl",
33
+ },
34
+ }
35
+
36
+ _REPO_ID = "AstroCompress/SBI-16-3D"
37
+
38
+
39
+ class GBI_16_2D(datasets.GeneratorBasedBuilder):
40
+ """GBI-16-2D Dataset"""
41
+
42
+ VERSION = datasets.Version("1.0.0")
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name="tiny",
47
+ version=VERSION,
48
+ description="A small subset of the data, to test downsteam workflows.",
49
+ ),
50
+ datasets.BuilderConfig(
51
+ name="full",
52
+ version=VERSION,
53
+ description="The full dataset",
54
+ ),
55
+ ]
56
+
57
+ DEFAULT_CONFIG_NAME = "tiny"
58
+
59
+ def __init__(self, **kwargs):
60
+ super().__init__(version=self.VERSION, **kwargs)
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {
67
+ "image": datasets.Image(decode=True, mode="I;16"),
68
+ "ra": datasets.Value("float64"),
69
+ "dec": datasets.Value("float64"),
70
+ "pixscale": datasets.Value("float64"),
71
+ "image_id": datasets.Value("string"),
72
+ }
73
+ ),
74
+ supervised_keys=None,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation="TBD",
78
+ )
79
+
80
+ def _split_generators(self, dl_manager: DownloadManager):
81
+
82
+ ret = []
83
+ base_path = dl_manager._base_path
84
+ locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
85
+ _, path = url_to_fs(base_path)
86
+
87
+ for split in ["train", "test"]:
88
+ if locally_run:
89
+ split_file_location = os.path.normpath(
90
+ os.path.join(path, _URLS[self.config.name][split])
91
+ )
92
+ split_file = dl_manager.download_and_extract(split_file_location)
93
+ else:
94
+ split_file = hf_hub_download(
95
+ repo_id=_REPO_ID,
96
+ filename=_URLS[self.config.name][split],
97
+ repo_type="dataset",
98
+ )
99
+ with open(split_file, encoding="utf-8") as f:
100
+ data_filenames = []
101
+ data_metadata = []
102
+ for line in f:
103
+ item = json.loads(line)
104
+ data_filenames.append(item["image"])
105
+ data_metadata.append(
106
+ {
107
+ "ra": item["ra"],
108
+ "dec": item["dec"],
109
+ "pixscale": item["pixscale"],
110
+ "image_id": item["image_id"],
111
+ }
112
+ )
113
+ if locally_run:
114
+ data_urls = [
115
+ os.path.normpath(os.path.join(path, data_filename))
116
+ for data_filename in data_filenames
117
+ ]
118
+ data_files = [
119
+ dl_manager.download(data_url) for data_url in data_urls
120
+ ]
121
+ else:
122
+ data_urls = data_filenames
123
+ data_files = [
124
+ hf_hub_download(
125
+ repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
126
+ )
127
+ for data_url in data_urls
128
+ ]
129
+ ret.append(
130
+ datasets.SplitGenerator(
131
+ name=(
132
+ datasets.Split.TRAIN
133
+ if split == "train"
134
+ else datasets.Split.TEST
135
+ ),
136
+ gen_kwargs={
137
+ "filepaths": data_files,
138
+ "split_file": split_file,
139
+ "split": split,
140
+ "data_metadata": data_metadata,
141
+ },
142
+ ),
143
+ )
144
+ return ret
145
+
146
+ def _generate_examples(self, filepaths, split_file, split, data_metadata):
147
+ """Generate GBI-16-2D examples"""
148
+
149
+ for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
150
+ task_instance_key = f"{self.config.name}-{split}-{idx}"
151
+ with fits.open(filepath, memmap=False) as hdul:
152
+ # the first axis is length one, so we take the first element
153
+ # the second axis is the time axis and varies between images
154
+ image_data = hdul["SCI"].data[:, :].tolist()
155
+ yield task_instance_key, {**{"image": image_data}, **item}
156
+
157
+
158
+ def make_split_jsonl_files(
159
+ config_type="tiny", data_dir="./data", outdir="./splits", seed=42
160
+ ):
161
+ """
162
+ Create jsonl files for the GBI-16-2D dataset.
163
+
164
+ config_type: str, default="tiny"
165
+ The type of split to create. Options are "tiny" and "full".
166
+ data_dir: str, default="./data"
167
+ The directory where the FITS files are located.
168
+ outdir: str, default="./splits"
169
+ The directory where the jsonl files will be created.
170
+ seed: int, default=42
171
+ The seed for the random split.
172
+ """
173
+ random.seed(seed)
174
+ os.makedirs(outdir, exist_ok=True)
175
+
176
+ fits_files = glob(os.path.join(data_dir, "*.fits"))
177
+ random.shuffle(fits_files)
178
+ if config_type == "tiny":
179
+ train_files = fits_files[:2]
180
+ test_files = fits_files[2:3]
181
+ elif config_type == "full":
182
+ split_idx = int(0.8 * len(fits_files))
183
+ train_files = fits_files[:split_idx]
184
+ test_files = fits_files[split_idx:]
185
+ else:
186
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
187
+
188
+ def create_jsonl(files, split_name):
189
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
190
+ with open(output_file, "w") as out_f:
191
+ for file in files:
192
+ print(file, flush=True, end="...")
193
+ with fits.open(file, memmap=False) as hdul:
194
+ image_id = os.path.basename(file).split(".fits")[0]
195
+ ra = hdul["SCI"].header.get("CRVAL1", 0)
196
+ dec = hdul["SCI"].header.get("CRVAL2", 0)
197
+ pixscale = hdul["SCI"].header.get("CD1_2", 0.396)
198
+ item = {
199
+ "image_id": image_id,
200
+ "image": file,
201
+ "ra": ra,
202
+ "dec": dec,
203
+ "pixscale": pixscale,
204
+ }
205
+ out_f.write(json.dumps(item) + "\n")
206
+
207
+ create_jsonl(train_files, "train")
208
+ create_jsonl(test_files, "test")
README.md CHANGED
@@ -1,3 +1,70 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ pretty_name: Ground-based Imaging data
4
+ tags:
5
+ - astronomy
6
+ - compression
7
+ - images
8
+ ---
9
+
10
+ # GBI-16-2D Dataset
11
+
12
+ SGBI-16-2D is a dataset which is part of the AstroCompress project. It contains data assembled from the Keck Telescope. <TODO>Describe data format</TODO>
13
+
14
+ # Usage
15
+
16
+ You first need to install the `datasets` and `astropy` packages:
17
+
18
+ ```bash
19
+ pip install datasets astropy
20
+ ```
21
+
22
+ There are two datasets: `tiny` and `full`, each with `train` and `test` splits. The `tiny` dataset has 2 2D images in the `train` and 1 in the `test`. The `full` dataset contains all the images in the `data/` directory.
23
+
24
+ ## Use from Huggingface Directly
25
+
26
+ To directly use from this data from Huggingface, you'll want to log in on the command line before starting python:
27
+
28
+ ```bash
29
+ huggingface-cli login
30
+ ```
31
+
32
+ or
33
+
34
+ ```
35
+ import huggingface_hub
36
+ huggingface_hub.login(token=token)
37
+ ```
38
+
39
+ Then in your python script:
40
+
41
+ ```python
42
+ from datasets import load_dataset
43
+ dataset = load_dataset("AstroCompress/GBI-16-2D", "tiny")
44
+ ds = dataset.with_format("np")
45
+ ```
46
+
47
+ ## Local Use
48
+
49
+ Alternatively, you can clone this repo and use directly without connecting to hf:
50
+
51
+ ```bash
52
+ git clone https://huggingface.co/datasets/AstroCompress/GBI-16-2D
53
+ ```
54
+
55
+ Then `cd SBI-16-3D` and start python like:
56
+
57
+ ```python
58
+ from datasets import load_dataset
59
+ dataset = load_dataset("./GBI-16-2D.py", "tiny", data_dir="./data/")
60
+ ds = dataset.with_format("np")
61
+ ```
62
+
63
+ Now you should be able to use the `ds` variable like:
64
+
65
+ ```python
66
+ ds["test"][0]["image"].shape # -> (TBD)
67
+ ```
68
+
69
+ Note of course that it will take a long time to download and convert the images in the local cache for the `full` dataset. Afterward, the usage should be quick as the files are memory-mapped from disk.
70
+