import os import json import datasets logger = datasets.logging.get_logger(__name__) _CITATION = """\ @dataset{gotthatdata_stargate_2024, title = {STARGATE: CIA Remote Viewing Archive}, author = {GotThatData}, year = {2024}, url = {https://huggingface.co/datasets/GotThatData/STARGATE} } """ _DESCRIPTION = """\ STARGATE is a dataset of 12,000+ declassified CIA PDFs related to remote viewing (RV), extrasensory perception (ESP), and anomalous cognition. This loader includes structured metadata and binary access to the original scanned PDFs. """ _HOMEPAGE = "https://huggingface.co/datasets/GotThatData/STARGATE" _LICENSE = "CC-BY-4.0" class StargatePDFConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(StargatePDFConfig, self).__init__(**kwargs) class StargatePDFDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ StargatePDFConfig(name="default", version=VERSION, description="STARGATE raw PDFs with metadata") ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "filename": datasets.Value("string"), "document_id": datasets.Value("string"), "page_count": datasets.Value("int32"), "image_count": datasets.Value("int32"), "processed_at": datasets.Value("string"), "ocr_status": datasets.Value("string"), "text_extracted": datasets.Value("bool"), "source": datasets.Value("string"), "tags": datasets.Sequence(datasets.Value("string")), "pdf": datasets.Value("binary"), }), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): # Path to local or downloaded files archive_path = dl_manager.download_and_extract("./") # Change if remote metadata_path = os.path.join(archive_path, "metadata.json") data_dir = os.path.join(archive_path, "data") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"metadata_path": metadata_path, "data_dir": data_dir} ) ] def _generate_examples(self, metadata_path, data_dir): logger.info(f"⏳ Loading metadata from {metadata_path}") with open(metadata_path, "r", encoding="utf-8") as f: records = json.load(f) for idx, record in enumerate(records): pdf_path = os.path.join(data_dir, record["filename"]) if not os.path.isfile(pdf_path): logger.warning(f"🚫 Missing PDF: {pdf_path}") continue with open(pdf_path, "rb") as pdf_file: yield idx, { "filename": record.get("filename"), "document_id": record.get("document_id", record["filename"].replace(".pdf", "")), "page_count": record.get("page_count", 0), "image_count": record.get("image_count", 0), "processed_at": record.get("processed_at", ""), "ocr_status": record.get("ocr_status", "pending"), "text_extracted": record.get("text_extracted", False), "source": record.get("source", "CIA Stargate Archive"), "tags": record.get("tags", []), "pdf": pdf_file.read(), }