import os
import random
from glob import glob
import json
from huggingface_hub import hf_hub_download


from astropy.io import fits
import datasets
from datasets import DownloadManager
from fsspec.core import url_to_fs

_DESCRIPTION = """
SBI-16-2D is a dataset which is part of the AstroCompress project. 
It contains imaging data assembled from the Hubble Space Telescope (HST).
"""

_HOMEPAGE = "https://google.github.io/AstroCompress"

_LICENSE = "CC BY 4.0"

_URL = "https://huggingface.co/datasets/AstroCompress/SBI-16-2D/resolve/main/"

_URLS = {
    "tiny": {
        "train": "./splits/tiny_train.jsonl",
        "test": "./splits/tiny_test.jsonl",
    },
    "full": {
        "train": "./splits/full_train.jsonl",
        "test": "./splits/full_test.jsonl",
    },
}

_REPO_ID = "AstroCompress/SBI-16-2D"


class SBI_16_2D(datasets.GeneratorBasedBuilder):
    """SBI-16-2D Dataset"""

    VERSION = datasets.Version("1.0.4")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="tiny",
            version=VERSION,
            description="A small subset of the data, to test downsteam workflows.",
        ),
        datasets.BuilderConfig(
            name="full",
            version=VERSION,
            description="The full dataset",
        ),
    ]

    DEFAULT_CONFIG_NAME = "tiny"

    def __init__(self, **kwargs):
        super().__init__(version=self.VERSION, **kwargs)

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "image": datasets.Image(decode=True, mode="I;16"),
                    "ra": datasets.Value("float64"),
                    "dec": datasets.Value("float64"),
                    "pixscale": datasets.Value("float64"),
                    "image_id": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation="TBD",
        )

    def _split_generators(self, dl_manager: DownloadManager):

        ret = []
        base_path = dl_manager._base_path
        locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
        _, path = url_to_fs(base_path)

        for split in ["train", "test"]:
            if locally_run:
                split_file_location = os.path.normpath(
                    os.path.join(path, _URLS[self.config.name][split])
                )
                split_file = dl_manager.download_and_extract(split_file_location)
            else:
                split_file = hf_hub_download(
                    repo_id=_REPO_ID,
                    filename=_URLS[self.config.name][split],
                    repo_type="dataset",
                )
            with open(split_file, encoding="utf-8") as f:
                data_filenames = []
                data_metadata = []
                for line in f:
                    item = json.loads(line)
                    data_filenames.append(item["image"])
                    data_metadata.append(
                        {
                            "ra": item["ra"],
                            "dec": item["dec"],
                            "pixscale": item["pixscale"],
                            "image_id": item["image_id"],
                        }
                    )
                if locally_run:
                    data_urls = [
                        os.path.normpath(os.path.join(path, data_filename))
                        for data_filename in data_filenames
                    ]
                    data_files = [
                        dl_manager.download(data_url) for data_url in data_urls
                    ]
                else:
                    data_urls = data_filenames
                    data_files = [
                        hf_hub_download(
                            repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
                        )
                        for data_url in data_urls
                    ]
            ret.append(
                datasets.SplitGenerator(
                    name=(
                        datasets.Split.TRAIN
                        if split == "train"
                        else datasets.Split.TEST
                    ),
                    gen_kwargs={
                        "filepaths": data_files,
                        "split_file": split_file,
                        "split": split,
                        "data_metadata": data_metadata,
                    },
                ),
            )
        return ret

    def _generate_examples(self, filepaths, split_file, split, data_metadata):
        """Generate SBI-16-2D examples"""

        for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
            with fits.open(filepath, memmap=False) as hdul:
                # Process image data from HDU index 1
                image_data_1 = hdul[1].data[:, :].tolist()
                task_instance_key_1 = f"{self.config.name}-{split}-{idx}-HDU1"
                yield task_instance_key_1, {**{"image": image_data_1}, **item}
    
                # Process image data from HDU index 4
                image_data_4 = hdul[4].data[:, :].tolist()
                task_instance_key_4 = f"{self.config.name}-{split}-{idx}-HDU4"
                yield task_instance_key_4, {**{"image": image_data_4}, **item}