|
import os |
|
import pandas as pd |
|
import numpy as np |
|
import datasets |
|
|
|
class CSVhrrrDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
A custom dataset to load CSV and corresponding hrrr files. |
|
The CSV files are loaded using pandas and the hrrr files using numpy. |
|
""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description="Dataset containing CSV and corresponding hrrr files.", |
|
features=datasets.Features({ |
|
"csv_data": datasets.Value("string"), |
|
"hrrr_file_path": datasets.Value("string"), |
|
"filename": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
homepage="https://huggingface.co/datasets/nasa-impact/WINDSET/tree/main/weather_forecast_discussion", |
|
license="MIT", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
Define dataset splits for this dataset (train, validation, test). |
|
In this case, we just load all the files into a single split. |
|
""" |
|
|
|
csv_dir = os.path.join(os.getcwd(), "weather_forecast_discussion/csv_reports") |
|
hrrr_dir = os.path.join(os.getcwd(), "weather_forecast_discussion/hrrr") |
|
|
|
|
|
if not os.path.isdir(csv_dir): |
|
raise FileNotFoundError(f"CSV directory {csv_dir} not found!") |
|
if not os.path.isdir(hrrr_dir): |
|
raise FileNotFoundError(f"hrrr directory {hrrr_dir} not found!") |
|
|
|
|
|
csv_files = [f for f in os.listdir(csv_dir) if f.endswith('.csv')] |
|
hrrr_files = [f for f in os.listdir(hrrr_dir) if f.endswith('.grib2')] |
|
|
|
|
|
file_pairs = [] |
|
for csv_file in csv_files: |
|
|
|
date_str = os.path.splitext(csv_file)[0] |
|
|
|
|
|
matching_hrrr_files = [hrrr for hrrr in hrrr_files if f"hrrr.{date_str}." in hrrr] |
|
|
|
if len(matching_hrrr_files) == 1: |
|
file_pairs.append((csv_file, matching_hrrr_files[0])) |
|
elif len(matching_hrrr_files) == 0: |
|
print(f"Warning: No matching hrrr file found for CSV file: {csv_file}") |
|
else: |
|
print(f"Warning: Multiple matching hrrr files found for CSV file: {csv_file}. Using the first match.") |
|
file_pairs.append((csv_file, matching_hrrr_files[0])) |
|
|
|
|
|
if not file_pairs: |
|
raise ValueError("No valid CSV-hrrr file pairs found. Check the directory structure and file names.") |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"file_pairs": file_pairs, |
|
"csv_dir": csv_dir, |
|
"hrrr_dir": hrrr_dir, |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, file_pairs, csv_dir, hrrr_dir): |
|
""" |
|
Yield examples from the CSV and hrrr files. |
|
Each example contains data from a CSV file and its corresponding hrrr file. |
|
""" |
|
example_id = 0 |
|
|
|
for csv_file, hrrr_file in file_pairs: |
|
|
|
csv_file_path = os.path.join(csv_dir, csv_file) |
|
csv_data = pd.read_csv(csv_file_path) |
|
|
|
|
|
hrrr_file_path = os.path.join(hrrr_dir, hrrr_file) |
|
|
|
|
|
yield example_id, { |
|
"csv_data": csv_data["discussion"].to_string(), |
|
"hrrr_file_path": hrrr_file_path, |
|
"filename": csv_file, |
|
} |
|
example_id += 1 |
|
|