MeriDK commited on
Commit
4f8ca91
·
1 Parent(s): 77a9f5e

Updated loading logic

Browse files
Files changed (1) hide show
  1. AstroM3Dataset.py +20 -12
AstroM3Dataset.py CHANGED
@@ -5,8 +5,10 @@ import pandas as pd
5
  import numpy as np
6
  import json
7
  from astropy.io import fits
 
8
 
9
  from utils.parallelzipfile import ParallelZipFile as ZipFile
 
10
 
11
  _DESCRIPTION = (
12
  "AstroM3 is a time-series astronomy dataset containing photometry, spectra, "
@@ -37,8 +39,9 @@ _CITATION = """
37
  class AstroM3Dataset(datasets.GeneratorBasedBuilder):
38
  """Hugging Face dataset for AstroM3 with configurable subsets and seeds."""
39
 
40
- DEFAULT_CONFIG_NAME = "full_42"
41
 
 
42
  BUILDER_CONFIGS = [
43
  datasets.BuilderConfig(name=f"{sub}_{seed}", version=_VERSION, data_dir=None)
44
  for sub in ["full", "sub10", "sub25", "sub50"]
@@ -52,7 +55,7 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
52
  {
53
  "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
54
  "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
55
- "metadata": datasets.Sequence(datasets.Value("float32"), length=25),
56
  "label": datasets.Value("string"),
57
  }
58
  ),
@@ -114,22 +117,26 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
114
  "test": f"{_URL}/splits/{sub}/{seed}/test.csv",
115
  "info": f"{_URL}/splits/{sub}/{seed}/info.json",
116
  }
117
- extracted_path = dl_manager.download_and_extract(urls)
 
118
 
119
  # Load all spectra files
120
- spectra_urls = {}
 
 
 
 
121
 
122
- for split in ["train", "val", "test"]:
123
- df = pd.read_csv(extracted_path[split])
124
- for _, row in df.iterrows():
125
- spectra_url = f"{_URL}/spectra/{split}/{row['target']}/{row['spec_filename']}"
126
- spectra_urls[row["spec_filename"]] = spectra_url
127
 
128
- spectra = dl_manager.download_and_extract(spectra_urls)
129
 
130
  # Load photometry and init reader
131
  photometry_path = dl_manager.download(f"{_URL}/photometry.zip")
132
  self.reader_v = ZipFile(photometry_path)
 
133
 
134
  return [
135
  datasets.SplitGenerator(
@@ -154,6 +161,7 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
154
 
155
  def _generate_examples(self, csv_path, info_path, spectra, split):
156
  """Yields examples from a CSV file containing photometry, spectra, metadata, and labels."""
 
157
 
158
  if not os.path.exists(csv_path):
159
  raise FileNotFoundError(f"Missing dataset file: {csv_path}")
@@ -166,9 +174,9 @@ class AstroM3Dataset(datasets.GeneratorBasedBuilder):
166
  with open(info_path) as f:
167
  info = json.loads(f.read())
168
 
169
- for idx, row in df.iterrows():
170
  photometry = self._get_photometry(row["name"])
171
- spectra = self._get_spectra(spectra[row['spec_filename']])
172
 
173
  yield idx, {
174
  "photometry": photometry,
 
5
  import numpy as np
6
  import json
7
  from astropy.io import fits
8
+ from tqdm import tqdm
9
 
10
  from utils.parallelzipfile import ParallelZipFile as ZipFile
11
+ from datasets.utils.tqdm import enable_progress_bars, disable_progress_bars
12
 
13
  _DESCRIPTION = (
14
  "AstroM3 is a time-series astronomy dataset containing photometry, spectra, "
 
39
  class AstroM3Dataset(datasets.GeneratorBasedBuilder):
40
  """Hugging Face dataset for AstroM3 with configurable subsets and seeds."""
41
 
42
+ # HF_DATASETS_DISABLE_PROGRESS_BARS = True
43
 
44
+ DEFAULT_CONFIG_NAME = "full_42"
45
  BUILDER_CONFIGS = [
46
  datasets.BuilderConfig(name=f"{sub}_{seed}", version=_VERSION, data_dir=None)
47
  for sub in ["full", "sub10", "sub25", "sub50"]
 
55
  {
56
  "photometry": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
57
  "spectra": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=3)),
58
+ "metadata": datasets.Sequence(datasets.Value("float32"), length=38),
59
  "label": datasets.Value("string"),
60
  }
61
  ),
 
117
  "test": f"{_URL}/splits/{sub}/{seed}/test.csv",
118
  "info": f"{_URL}/splits/{sub}/{seed}/info.json",
119
  }
120
+ extracted_path = dl_manager.download(urls)
121
+ # print("Downloaded train.csv val.csv test.csv info.json")
122
 
123
  # Load all spectra files
124
+ spectra = {}
125
+ df1 = pd.read_csv(extracted_path["train"])
126
+ df2 = pd.read_csv(extracted_path["val"])
127
+ df3 = pd.read_csv(extracted_path["test"])
128
+ df_combined = pd.concat([df1, df2, df3], ignore_index=True)
129
 
130
+ for _, row in df_combined.iterrows():
131
+ spectra_url = f"{_URL}/spectra/{row['target']}/{row['spec_filename']}"
132
+ spectra[row["spec_filename"]] = dl_manager.download(spectra_url)
 
 
133
 
134
+ # print("Downloaded spectra files")
135
 
136
  # Load photometry and init reader
137
  photometry_path = dl_manager.download(f"{_URL}/photometry.zip")
138
  self.reader_v = ZipFile(photometry_path)
139
+ # print("Downloaded photometry")
140
 
141
  return [
142
  datasets.SplitGenerator(
 
161
 
162
  def _generate_examples(self, csv_path, info_path, spectra, split):
163
  """Yields examples from a CSV file containing photometry, spectra, metadata, and labels."""
164
+ print("here")
165
 
166
  if not os.path.exists(csv_path):
167
  raise FileNotFoundError(f"Missing dataset file: {csv_path}")
 
174
  with open(info_path) as f:
175
  info = json.loads(f.read())
176
 
177
+ for idx, row in enumerate(df.iterrows()):
178
  photometry = self._get_photometry(row["name"])
179
+ spectra = self._get_spectra(spectra[row["spec_filename"]])
180
 
181
  yield idx, {
182
  "photometry": photometry,