File size: 8,178 Bytes
17b9b62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f6a7dfc
17b9b62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f6a7dfc
17b9b62
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
from collections import defaultdict

import datasets
from datasets import load_dataset
import numpy as np
from scipy import stats

METADATA_FUNC = {
    "abs": [
        "mean_vmag",
        "phot_g_mean_mag",
        "phot_bp_mean_mag",
        "phot_rp_mean_mag",
        "j_mag",
        "h_mag",
        "k_mag",
        "w1_mag",
        "w2_mag",
        "w3_mag",
        "w4_mag",
    ],
    "cos": ["l"],
    "sin": ["b"],
    "log": ["period"]
}


def preprocess_spectra(example):
    """
    Preprocess spectral data. Steps:
    - Interpolate flux and flux error to a fixed wavelength grid (3850 to 9000 Å).
    - Normalize flux using mean and median absolute deviation (MAD).
    - Append MAD as an auxiliary feature.
    """
    spectra = example['spectra']
    wavelengths = spectra[:, 0]
    flux = spectra[:, 1]
    flux_err = spectra[:, 2]

    # Interpolate flux and flux error onto a fixed grid
    new_wavelengths = np.arange(3850, 9000, 2)
    flux = np.interp(new_wavelengths, wavelengths, flux)
    flux_err = np.interp(new_wavelengths, wavelengths, flux_err)

    # Normalize flux and flux error
    mean = np.mean(flux)
    mad = stats.median_abs_deviation(flux[flux != 0])

    flux = (flux - mean) / mad
    flux_err = flux_err / mad
    aux_values = np.full_like(flux, np.log10(mad))  # Store MAD as an auxiliary feature

    # Stack processed data into a single array
    spectra = np.vstack([flux, flux_err, aux_values])
    example['spectra'] = spectra

    return example


def preprocess_lc(example):
    """
    Preprocess photometry (light curve) data. Steps:
    - Remove duplicate time entries.
    - Sort by Heliocentric Julian Date (HJD).
    - Normalize flux and flux error using mean and median absolute deviation (MAD).
    - Scale time values between 0 and 1.
    - Append auxiliary features (log MAD and time span delta_t).
    """
    X = example['photometry']
    aux_values = np.stack(list(example['metadata']['photo_cols'].values()))

    # Remove duplicate entries
    X = np.unique(X, axis=0)

    # Sort based on HJD
    sorted_indices = np.argsort(X[:, 0])
    X = X[sorted_indices]

    # Normalize flux and flux error
    mean = X[:, 1].mean()
    mad = stats.median_abs_deviation(X[:, 1])
    X[:, 1] = (X[:, 1] - mean) / mad
    X[:, 2] = X[:, 2] / mad

    # Compute delta_t (time span of the light curve in years)
    delta_t = (X[:, 0].max() - X[:, 0].min()) / 365

    # Scale time from 0 to 1
    X[:, 0] = (X[:, 0] - X[:, 0].min()) / (X[:, 0].max() - X[:, 0].min())

    # Add MAD and delta_t to auxiliary metadata features
    aux_values = np.concatenate((aux_values, [np.log10(mad), delta_t]))

    # Add auxiliary features to the sequence
    aux_values = np.tile(aux_values, (X.shape[0], 1))
    X = np.concatenate((X, aux_values), axis=-1)

    example['photometry'] = X
    return example


def transform_metadata(example):
    """
    Transforms the metadata of an example based on METADATA_FUNC.
    """
    metadata = example["metadata"]

    # Process 'abs' transformation on meta_cols:
    # Note: This transformation uses 'parallax' from meta_cols.
    for col in METADATA_FUNC["abs"]:
        if col in metadata["meta_cols"]:
            # Use np.where to avoid issues when parallax is non-positive.
            metadata["meta_cols"][col] = (
                metadata["meta_cols"][col]
                - 10
                + 5 * np.log10(np.where(metadata["meta_cols"]["parallax"] <= 0, 1, metadata["meta_cols"]["parallax"]))
            )

    # Process 'cos' transformation on meta_cols:
    for col in METADATA_FUNC["cos"]:
        if col in metadata["meta_cols"]:
            metadata["meta_cols"][col] = np.cos(np.radians(metadata["meta_cols"][col]))

    # Process 'sin' transformation on meta_cols:
    for col in METADATA_FUNC["sin"]:
        if col in metadata["meta_cols"]:
            metadata["meta_cols"][col] = np.sin(np.radians(metadata["meta_cols"][col]))

    # Process 'log' transformation on photo_cols:
    for col in METADATA_FUNC["log"]:
        if col in metadata["photo_cols"]:
            metadata["photo_cols"][col] = np.log10(metadata["photo_cols"][col])

    # Update the example with the transformed metadata.
    example["metadata"] = metadata
    return example


def compute_metadata_stats(ds):
    """
    Compute the mean and standard deviation for each column in meta_cols and photo_cols.
    """
    meta_vals = defaultdict(list)
    photo_vals = defaultdict(list)

    # Accumulate values for each column
    for example in ds:
        meta = example["metadata"]["meta_cols"]
        photo = example["metadata"]["photo_cols"]
        for col, value in meta.items():
            meta_vals[col].append(value)
        for col, value in photo.items():
            photo_vals[col].append(value)

    # Compute mean and standard deviation for each column
    stats = {"meta_cols": {}, "photo_cols": {}}
    for col, values in meta_vals.items():
        arr = np.stack(values)
        stats["meta_cols"][col] = {"mean": arr.mean(), "std": arr.std()}
    for col, values in photo_vals.items():
        arr = np.stack(values)
        stats["photo_cols"][col] = {"mean": arr.mean(), "std": arr.std()}

    return stats


def normalize_metadata(example, info):
    """
    Normalize metadata values using z-score normalization:
    (value - mean) / std.

    The 'stats' parameter should be a dictionary with computed means and stds for both meta_cols and photo_cols.
    """
    metadata = example["metadata"]

    # Normalize meta_cols
    for col, value in metadata["meta_cols"].items():
        mean = info["meta_cols"][col]["mean"]
        std = info["meta_cols"][col]["std"]
        metadata["meta_cols"][col] = (metadata["meta_cols"][col] - mean) / std

    # Normalize photo_cols
    for col, value in metadata["photo_cols"].items():
        mean = info["photo_cols"][col]["mean"]
        std = info["photo_cols"][col]["std"]
        metadata["photo_cols"][col] = (metadata["photo_cols"][col] - mean) / std

    example["metadata"] = metadata
    return example


def preprocess_metadata(example):
    """
    Extract the values from 'meta_cols' and stack them into a numpy array.
    """
    example["metadata"] = np.stack(list(example["metadata"]["meta_cols"].values()))
    return example


def main():
    """
    Main function for processing and uploading datasets.

    - Loads each dataset based on subset and random seed.
    - Applies preprocessing for spectra, photometry, and metadata.
    - Casts columns to appropriate feature types.
    - Pushes the processed dataset to Hugging Face Hub.
    """
    for sub in ["sub10", "sub25", "sub50", "full"]:
        for seed in [42, 66, 0, 12, 123]:
            name = f"{sub}_{seed}"
            print(f"Processing: {name}")

            # Load dataset from Hugging Face Hub
            ds = load_dataset('AstroMLCore/AstroM3Dataset', name=name, trust_remote_code=True, num_proc=16)
            ds = ds.with_format('numpy')

            # Transform and normalize metadata
            ds = ds.map(transform_metadata, num_proc=16)
            info = compute_metadata_stats(ds['train'])
            ds = ds.map(lambda example: normalize_metadata(example, info))

            # Transform spectra
            ds = ds.map(preprocess_spectra, num_proc=16)
            ds = ds.cast_column('spectra', datasets.Array2D(shape=(3, 2575), dtype='float32'))

            # Transform photometry
            ds = ds.map(preprocess_lc, num_proc=16)
            ds = ds.cast_column('photometry', datasets.Array2D(shape=(None, 9), dtype='float32'))

            # Stack metadata into one numpy array
            ds = ds.map(preprocess_metadata, num_proc=16)
            ds = ds.cast_column('metadata', datasets.Sequence(feature=datasets.Value('float32'), length=34))

            # Change label type
            ds = ds.cast_column('label', datasets.ClassLabel(
                names=['DSCT', 'EA', 'EB', 'EW', 'HADS', 'M', 'ROT', 'RRAB', 'RRC', 'SR']))

            # Upload processed dataset to Hugging Face Hub
            ds.push_to_hub('AstroMLCore/AstroM3Processed', config_name=name)


if __name__ == '__main__':
    main()