Datasets:

ArXiv:
License:
omshinde's picture
file for loading data with HF datasets load_dataset() module
1143910 verified
import os
import pandas as pd
import numpy as np
import datasets
class CSVhrrrDataset(datasets.GeneratorBasedBuilder):
"""
A custom dataset to load CSV and corresponding hrrr files.
The CSV files are loaded using pandas and the hrrr files using numpy.
"""
# Define dataset name and version
VERSION = datasets.Version("1.0.0")
def _info(self):
# Dataset description and features
return datasets.DatasetInfo(
description="Dataset containing CSV and corresponding hrrr files.",
features=datasets.Features({
"csv_data": datasets.Value("string"), # CSV content as a string or specific columns
"hrrr_file_path": datasets.Value("string"), # Assuming hrrr files contain float32 data
"filename": datasets.Value("string"), # Filename of the CSV file
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/nasa-impact/WINDSET/tree/main/weather_forecast_discussion",
license="MIT",
)
def _split_generators(self, dl_manager):
"""
Define dataset splits for this dataset (train, validation, test).
In this case, we just load all the files into a single split.
"""
# Get the directory paths
csv_dir = os.path.join(os.getcwd(), "weather_forecast_discussion/csv_reports")
hrrr_dir = os.path.join(os.getcwd(), "weather_forecast_discussion/hrrr")
# Check that both directories exist
if not os.path.isdir(csv_dir):
raise FileNotFoundError(f"CSV directory {csv_dir} not found!")
if not os.path.isdir(hrrr_dir):
raise FileNotFoundError(f"hrrr directory {hrrr_dir} not found!")
# List CSV and hrrr files
csv_files = [f for f in os.listdir(csv_dir) if f.endswith('.csv')]
hrrr_files = [f for f in os.listdir(hrrr_dir) if f.endswith('.grib2')]
# Ensure CSV and hrrr files are paired correctly by date
file_pairs = []
for csv_file in csv_files:
# Extract the date from the CSV file (assuming format is 'date.csv')
date_str = os.path.splitext(csv_file)[0]
# Search for matching hrrr files in the hrrr directory
matching_hrrr_files = [hrrr for hrrr in hrrr_files if f"hrrr.{date_str}." in hrrr]
if len(matching_hrrr_files) == 1: # Ensure exactly one match
file_pairs.append((csv_file, matching_hrrr_files[0]))
elif len(matching_hrrr_files) == 0:
print(f"Warning: No matching hrrr file found for CSV file: {csv_file}")
else:
print(f"Warning: Multiple matching hrrr files found for CSV file: {csv_file}. Using the first match.")
file_pairs.append((csv_file, matching_hrrr_files[0])) # Use the first match if multiple are found
# If no valid file pairs were found, raise an error
if not file_pairs:
raise ValueError("No valid CSV-hrrr file pairs found. Check the directory structure and file names.")
# Create a split generator
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"file_pairs": file_pairs,
"csv_dir": csv_dir,
"hrrr_dir": hrrr_dir,
}
)
]
def _generate_examples(self, file_pairs, csv_dir, hrrr_dir):
"""
Yield examples from the CSV and hrrr files.
Each example contains data from a CSV file and its corresponding hrrr file.
"""
example_id = 0
for csv_file, hrrr_file in file_pairs:
# Load CSV file using pandas
csv_file_path = os.path.join(csv_dir, csv_file)
csv_data = pd.read_csv(csv_file_path)
# Load corresponding hrrr file using numpy
hrrr_file_path = os.path.join(hrrr_dir, hrrr_file)
# Yield example with both CSV and hrrr data
yield example_id, {
"csv_data": csv_data["discussion"].to_string(), # Store content under discussion only
"hrrr_file_path": hrrr_file_path,
"filename": csv_file,
}
example_id += 1