File size: 9,554 Bytes
9fa663b
 
 
 
 
 
 
18edf24
9fa663b
 
9c3c821
 
 
 
 
 
 
 
 
 
9fa663b
 
 
 
88109f2
9fa663b
 
 
9c3c821
 
 
 
 
9fa663b
 
 
9c3c821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fa663b
 
9c3c821
9fa663b
9c3c821
4f8ca91
9c3c821
 
9fa663b
9c3c821
9fa663b
 
9c3c821
9fa663b
 
 
9c3c821
 
9fa663b
 
 
 
8b8c6d0
 
9c3c821
 
 
 
9fa663b
 
 
 
 
 
 
 
 
9c3c821
9fa663b
9c3c821
9fa663b
 
9c3c821
9fa663b
 
 
9c3c821
9fa663b
 
 
 
 
 
 
9c3c821
 
9fa663b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c3c821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88109f2
9c3c821
88109f2
 
9c3c821
 
88109f2
 
 
ca06ac5
 
 
 
88109f2
d9e8f74
88109f2
9c3c821
d9e8f74
88109f2
d9e8f74
 
 
 
 
 
88109f2
9c3c821
ca06ac5
07ab3e6
88109f2
 
 
 
 
d9e8f74
88109f2
 
 
 
 
d9e8f74
88109f2
 
 
 
 
d9e8f74
88109f2
 
 
 
d9e8f74
9c3c821
9fa663b
88109f2
 
 
 
9fa663b
9c3c821
 
 
 
 
 
 
fec059c
88109f2
d9e8f74
9fa663b
 
88109f2
 
9c3c821
 
 
 
9fa663b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
from io import BytesIO
import datasets
import pandas as pd
import numpy as np
import json
from astropy.io import fits

from .utils import ParallelZipFile

_DESCRIPTION = (
    "AstroM3 is a multi-modal time-series astronomy dataset containing photometry, spectra, "
    "and metadata features for variable stars. The dataset consists of multiple subsets "
    "('full', 'sub10', 'sub25', 'sub50') and supports different random seeds (42, 66, 0, 12, 123). "
    "\n\nEach sample includes:\n"
    "- **Photometry**: Time-series light curve data with shape `(N, 3)` representing time, flux, "
    "and flux uncertainty.\n"
    "- **Spectra**: Spectral observations with shape `(M, 3)` containing wavelength, flux, and flux uncertainty.\n"
    "- **Metadata**: Auxiliary astrophysical and photometric parameters (e.g., magnitudes, parallax, motion data) "
    "stored as a dictionary.\n"
    "- **Label**: The classification of the star as a string."
)

_HOMEPAGE = "https://huggingface.co/datasets/AstroM3"
_LICENSE = "CC BY 4.0"
_URL = "https://huggingface.co/datasets/MeriDK/AstroM3Dataset/resolve/main"
_VERSION = datasets.Version("1.0.0")

_CITATION = """ 
@article{rizhko2024astrom,
  title={AstroM $\^{} 3$: A self-supervised multimodal model for astronomy},
  author={Rizhko, Mariia and Bloom, Joshua S},
  journal={arXiv preprint arXiv:2411.08842},
  year={2024}
}
"""

_PHOTO_COLS = ['amplitude', 'period', 'lksl_statistic', 'rfr_score']
_METADATA_COLS = [
    'mean_vmag',  'phot_g_mean_mag', 'e_phot_g_mean_mag', 'phot_bp_mean_mag', 'e_phot_bp_mean_mag', 'phot_rp_mean_mag',
    'e_phot_rp_mean_mag', 'bp_rp', 'parallax', 'parallax_error', 'parallax_over_error', 'pmra', 'pmra_error', 'pmdec',
    'pmdec_error', 'j_mag', 'e_j_mag', 'h_mag', 'e_h_mag', 'k_mag', 'e_k_mag', 'w1_mag', 'e_w1_mag',
    'w2_mag', 'e_w2_mag', 'w3_mag', 'w4_mag', 'j_k', 'w1_w2', 'w3_w4', 'pm', 'ruwe', 'l', 'b'
]
_ALL_COLS = _PHOTO_COLS + _METADATA_COLS
_METADATA_FUNC = {
    "abs": [
        "mean_vmag",
        "phot_g_mean_mag",
        "phot_bp_mean_mag",
        "phot_rp_mean_mag",
        "j_mag",
        "h_mag",
        "k_mag",
        "w1_mag",
        "w2_mag",
        "w3_mag",
        "w4_mag",
    ],
    "cos": ["l"],
    "sin": ["b"],
    "log": ["period"]
}


class AstroM3Dataset(datasets.GeneratorBasedBuilder):
    """Hugging Face dataset for AstroM3, a multi-modal variable star dataset."""

    # Default configuration (used if no config is specified)
    DEFAULT_CONFIG_NAME = "full_42"

    # Define dataset configurations (subsets, seeds, and normalization variants)
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=f"{sub}_{seed}{norm}", version=_VERSION)
        for sub in ["full", "sub10", "sub25", "sub50"]
        for seed in [42, 66, 0, 12, 123]
        for norm in ["", "_norm"]
    ]

    def _info(self):
        """Defines the dataset schema, including features and metadata."""

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "photometry": datasets.Array2D(shape=(None, 3), dtype="float32"),
                    "spectra": datasets.Array2D(shape=(None, 3), dtype="float32"),
                    "metadata": {
                        "meta_cols": {el: datasets.Value("float32") for el in _METADATA_COLS},
                        "photo_cols": {el: datasets.Value("float32") for el in _PHOTO_COLS},
                    },
                    "label": datasets.Value("string"),
                }
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _get_photometry(self, file_name):
        """Loads photometric light curve data from a compressed file."""
        csv = BytesIO()
        file_name = file_name.replace(' ', '')  # Ensure filenames are correctly formatted
        data_path = f'vardb_files/{file_name}.dat'

        # Read the photometry file from the compressed ZIP
        csv.write(self.reader_v.read(data_path))
        csv.seek(0)

        # Read light curve data
        lc = pd.read_csv(csv, sep=r'\s+', skiprows=2, names=['HJD', 'MAG', 'MAG_ERR', 'FLUX', 'FLUX_ERR'],
                         dtype={'HJD': float, 'MAG': float, 'MAG_ERR': float, 'FLUX': float, 'FLUX_ERR': float})

        return lc[['HJD', 'FLUX', 'FLUX_ERR']].values

    @staticmethod
    def _get_spectra(file_name):
        """Loads spectral data from a FITS file."""

        hdulist = fits.open(file_name)
        len_list = len(hdulist)

        if len_list == 1:
            head = hdulist[0].header
            scidata = hdulist[0].data
            coeff0 = head['COEFF0']
            coeff1 = head['COEFF1']
            pixel_num = head['NAXIS1']
            specflux = scidata[0,]
            ivar = scidata[1,]
            wavelength = np.linspace(0, pixel_num - 1, pixel_num)
            wavelength = np.power(10, (coeff0 + wavelength * coeff1))
            hdulist.close()
        elif len_list == 2:
            head = hdulist[0].header
            scidata = hdulist[1].data
            wavelength = scidata[0][2]
            ivar = scidata[0][1]
            specflux = scidata[0][0]
        else:
            raise ValueError(f'Wrong number of fits files. {len_list} should be 1 or 2')

        return np.vstack((wavelength, specflux, ivar)).T

    @staticmethod
    def transform(df):
        """Applies transformations to metadata."""

        for transformation_type, value in _METADATA_FUNC.items():
            if transformation_type == "abs":
                for col in value:
                    df[col] = (
                            df[col] - 10 + 5 * np.log10(np.where(df["parallax"] <= 0, 1, df["parallax"]))
                    )
            elif transformation_type == "cos":
                for col in value:
                    df[col] = np.cos(np.radians(df[col]))
            elif transformation_type == "sin":
                for col in value:
                    df[col] = np.sin(np.radians(df[col]))
            elif transformation_type == "log":
                for col in value:
                    df[col] = np.log10(df[col])

    def _split_generators(self, dl_manager):
        """Defines dataset splits and downloads required files."""

        # Get subset and seed info from the name
        name = self.config.name.split("_")
        sub, seed = name[0], name[1]

        # Load the splits and info files
        urls = {
            "train": f"splits/{sub}/{seed}/train.csv",
            "val": f"splits/{sub}/{seed}/val.csv",
            "test": f"splits/{sub}/{seed}/test.csv",
            "info": f"splits/{sub}/{seed}/info.json",
        }
        extracted_path = dl_manager.download(urls)

        # Download all spectra files
        spectra_urls = {}

        for split in ("train", "val", "test"):
            df = pd.read_csv(extracted_path[split])
            for _, row in df.iterrows():
                spectra_urls[row["spec_filename"]] = f"spectra/{row['target']}/{row['spec_filename']}"

        spectra_files = dl_manager.download(spectra_urls)

        # Download photometry data and initialize ZIP reader
        photometry_path = dl_manager.download(f"photometry.zip")
        self.reader_v = ParallelZipFile(photometry_path)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"csv_path": extracted_path["train"],
                                                       "info_path": extracted_path["info"],
                                                       "spectra_files": spectra_files,
                                                       "split": "train"}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"csv_path": extracted_path["val"],
                                                            "info_path": extracted_path["info"],
                                                            "spectra_files": spectra_files,
                                                            "split": "val"}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"csv_path": extracted_path["test"],
                                                      "info_path": extracted_path["info"],
                                                      "spectra_files": spectra_files,
                                                      "split": "test"}
            ),
        ]

    def _generate_examples(self, csv_path, info_path, spectra_files, split):
        """Yields individual dataset examples."""

        df = pd.read_csv(csv_path)

        with open(info_path) as f:
            info = json.loads(f.read())

        if "norm" in self.config.name:
            # Apply metadata transformations
            self.transform(df)

            # Normalize using precomputed mean and standard deviation
            df[_ALL_COLS] = (df[_ALL_COLS] - info["mean"]) / info["std"]

        for idx, row in df.iterrows():
            photometry = self._get_photometry(row["name"])
            spectra = self._get_spectra(spectra_files[row["spec_filename"]])

            yield idx, {
                "photometry": photometry,
                "spectra": spectra,
                "metadata": {
                    "meta_cols": {el: row[el] for el in _METADATA_COLS},
                    "photo_cols": {el: row[el] for el in _PHOTO_COLS},
                },
                "label": row["target"],
            }