# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Archival NOAA NWP forecasting data covering most of 2016-2022. """
import numpy as np
import xarray as xr
import json

import datasets


# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{ocf:gfs,
title = {GFS Forecast Dataset},
author={Jacob Bieker},
year={2022}
}
"""

# You can copy an official description
_DESCRIPTION = """\
This dataset consists of various NOAA datasets related to operational forecasts, including FNL Analysis files,
GFS operational forecasts, and the raw observations used to initialize the grid.
"""

_HOMEPAGE = "https://mtarchive.geol.iastate.edu/"

_LICENSE = "US Government data, Open license, no restrictions"

# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
    "gfs_v16": "gfs_v16.json",
    "raw": "raw.json",
    "analysis": "analysis.json",
}

class GFEReforecastDataset(datasets.GeneratorBasedBuilder):
    """Archival MRMS Precipitation Rate Radar data for the continental US, covering most of 2016-2022."""

    VERSION = datasets.Version("1.0.0")

    # This is an example of a dataset with multiple configurations.
    # If you don't want/need to define several sub-sets in your dataset,
    # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.

    # If you need to make complex sub-parts in the datasets with configurable options
    # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
    # BUILDER_CONFIG_CLASS = MyBuilderConfig

    # You will be able to load one or the other configurations in the following list with
    # data = datasets.load_dataset('my_dataset', 'first_domain')
    # data = datasets.load_dataset('my_dataset', 'second_domain')
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="analysis", version=VERSION, description="FNL 0.25 degree Analysis files"),
        datasets.BuilderConfig(name="raw_analysis", version=VERSION, description="FNL 0.25 degree Analysis files coupled with raw observations"),
        datasets.BuilderConfig(name="gfs_v16", version=VERSION, description="GFS v16 Forecasts from April 2021 through 2022, returned as a 696 channel image"),
        datasets.BuilderConfig(name="raw_gfs_v16", version=VERSION, description="GFS v16 Forecasts from April 2021 through 2022, returned as a 696 channel image, coupled with raw observations"),
        datasets.BuilderConfig(name="gfs_v16_variables", version=VERSION, description="GFS v16 Forecasts from April 2021 through 2022 with one returned array per variable"),
    ]

    DEFAULT_CONFIG_NAME = "gfs_v16"  # It's not mandatory to have a default configuration. Just use one if it make sense.

    def __init__(self, filepath="https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/gfs_v16.json", **kwargs):
        """BuilderConfig for GFEReforecastDataset.

        Args:
        filepath: *string*, the path of a json file, which consists of the relative path of https://huggingface.co/datasets/openclimatefix/gfs-reforecast
        to the data files we want to load, default = "https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/gfs_v16.json"
        **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.filepath = filepath
    
    def _info(self):
        features = {}
        if "v16" in self.config.name:
            # TODO Add the variables one with all 696 variables, potentially combined by level
            features = {
                    "current_state": datasets.Array3D((721,1440,696), dtype="float32"),
                    "next_state": datasets.Array3D((721,1440,696), dtype="float32"),
                    "timestamp": datasets.Sequence(datasets.Value("timestamp[ns]")),
                    "latitude": datasets.Sequence(datasets.Value("float32")),
                    "longitude": datasets.Sequence(datasets.Value("float32"))
                    # These are the features of your dataset like images, labels ...
                }
        elif "analysis" in self.config.name:
            # TODO Add the variables one with all 322 variables, potentially combined by level
            features = {
                    "current_state": datasets.Array3D((721,1440,322), dtype="float32"),
                    "next_state": datasets.Array3D((721,1440,322), dtype="float32"),
                    "timestamp": datasets.Sequence(datasets.Value("timestamp[ns]")),
                    "latitude": datasets.Sequence(datasets.Value("float32")),
                    "longitude": datasets.Sequence(datasets.Value("float32"))
                    # These are the features of your dataset like images, labels ...
                }
        if "raw" in self.config.name:
            # Add the raw observation features, capping at 256,000 observations, padding if not enough
            raw_features = {"observations": datasets.Array2D((256000,1), dtype="float32"),
                            "observation_type": datasets.Array2D((256000,1), dtype="string"),
                            "observation_lat": datasets.Array2D((256000,1), dtype="float32"),
                            "observation_lon": datasets.Array2D((256000,1), dtype="float32"),
                            }
            features = features.update(raw_features)

        features = datasets.Features(features)

        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
        urls = _URLS[self.config.name]
        streaming = dl_manager.is_streaming
        if streaming:
            urls = dl_manager.download_and_extract(urls)
        else:
            with open(self.filepath, "r") as f:
                filepaths = json.load(f)
                data_dir = dl_manager.download_and_extract(filepaths)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir,
                    "split": "train",
                    "streaming": streaming,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir,
                    "split": "test",
                    "streaming": streaming,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": urls if streaming else data_dir,
                    "split": "valid",
                    "streaming": streaming
                },
            ),
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, filepath, split, streaming):

        # Load the list of files for the type of data
        if streaming:
            with open(filepath, "r") as f:
                filepaths = json.load(f)
                filepaths = ['zip:///::https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/' + f for f in filepaths]
        else:
            filepaths = filepath
        if "v16" in self.config.name:
            idx = 0
            for f in filepaths:
                dataset = xr.open_dataset(f, engine='zarr', chunks={})
                try:
                    for t in range(len(dataset["time"].values)-1):
                        data_t = dataset.isel(time=t)
                        data_t1 = dataset.isel(time=(t+1))
                        value = {"current_state": np.stack([data_t[v].values for v in sorted(data_t.data_vars)], axis=2),
                                 "next_state": np.stack([data_t1[v].values for v in sorted(data_t.data_vars)], axis=2),
                                 "timestamp": data_t["time"].values.reshape(-1),
                                 "latitude": data_t["latitude"].values,
                                 "longitude": data_t["longitude"].values}
                        idx += 1
                        yield idx, value
                except:
                    # Some of the zarrs potentially have corrupted data at the end, and might fail, so this avoids that
                    continue