import glob import os import datasets from PIL import Image import csv _VERSION = "2024-07-17" _URL = f"https://github.com/DEFI-COLaF/LADaS/archive/refs/tags/{_VERSION}.tar.gz" _HOMEPAGE = "https://github.com/DEFI-COLaF/LADaS" _LICENSE = "CC BY 4.0" _CITATION = """\ @misc{Clerice_Layout_Analysis_Dataset, author = {Clérice, Thibault and Janès, Juliette and Scheithauer, Hugo and Bénière, Sarah and Romary, Laurent and Sagot, Benoit and Bougrelle, Roxane}, title = {{Layout Analysis Dataset with SegmOnto (LADaS)}}, url = {https://github.com/DEFI-COLaF/LADaS} } """ _CATEGORIES: list[str] = ["AdvertisementZone", "DigitizationArtefactZone", "DropCapitalZone", "FigureZone", "FigureZone-FigDesc", "FigureZone-Head", "GraphicZone", "GraphicZone-Decoration", "GraphicZone-FigDesc", "GraphicZone-Head", "GraphicZone-Maths", "GraphicZone-Part", "GraphicZone-TextualContent", "MainZone-Date", "MainZone-Entry", "MainZone-Entry-Continued", "MainZone-Form", "MainZone-Head", "MainZone-Lg", "MainZone-Lg-Continued", "MainZone-List", "MainZone-List-Continued", "MainZone-Other", "MainZone-P", "MainZone-P-Continued", "MainZone-Signature", "MainZone-Sp", "MainZone-Sp-Continued", "MarginTextZone-ManuscriptAddendum", "MarginTextZone-Notes", "MarginTextZone-Notes-Continued", "NumberingZone", "TitlePageZone", "TitlePageZone-Index", "QuireMarksZone", "RunningTitleZone", "StampZone", "StampZone-Sticker", "TableZone", "TableZone-Continued", "TableZone-Head"] class LadasConfig(datasets.BuilderConfig): """Builder Config for LADaS""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) class LadasDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version(_VERSION.replace("-", ".")) BUILDER_CONFIGS = [ LadasConfig( name="full", description="Full version of the dataset" ) ] def _info(self) -> datasets.DatasetInfo: features = datasets.Features({ "image_path": datasets.Value("string"), "year": datasets.Value("int32"), "dating-certainty": datasets.Value("bool"), "set": datasets.Value("string"), "image": datasets.Image(), "width": datasets.Value("int32"), "height": datasets.Value("int32"), "objects": datasets.Sequence( { "bbox": datasets.Sequence(datasets.Value("float32"), length=4), "category": datasets.Value("string"), } ) }) return datasets.DatasetInfo( features=features, homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE ) def _split_generators(self, dl_manager): urls_to_download = _URL downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "local_dir": downloaded_files, "split": "train" }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "local_dir": downloaded_files, "split": "valid" }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "local_dir": downloaded_files, "split": "test" }, ), ] def _generate_examples(self, local_dir: str, split: str): idx = 0 df = {} for file in glob.glob(os.path.join(local_dir, "*", "metadata.csv")): with open(file) as f: reader = csv.DictReader(f) for line in reader: df[line["file"]] = line for file in glob.glob(os.path.join(local_dir, "*", "data", "*", split, "labels", "*.txt")): objects = [] with open(file) as f: for line in f: cls, *bbox = line.strip().split() objects.append({"category": _CATEGORIES[int(cls)], "bbox": list(map(float, bbox))}) image_path = os.path.normpath(file).split(os.sep) image_path = os.path.join(*image_path[:-2], "images", image_path[-1].replace(".txt", ".jpg")) if file.startswith("/") and not image_path.startswith("/"): image_path = "/" + image_path with open(image_path, "rb") as f: image_bytes = f.read() with Image.open(image_path) as im: width, height = im.size filename = os.path.basename(image_path) line = df[filename] yield idx, { "image_path": f"{line['subset']}/{filename}", "image": {"path": image_path, "bytes": image_bytes}, "year": line["year"] or None, "dating-certainty": line["dating-certainty"], "set": line["subset"], "width": width, "height": height, "objects": objects, } idx += 1 if __name__ == "__main__": LadasDataset().download_and_prepare()