|
|
|
"""TODO: Add a description here.""" |
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import xml.etree.ElementTree as ET |
|
import os |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={Shixuan An |
|
}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"dataset": "https://prod-dcd-datasets-cache-zipfiles.s3.eu-west-1.amazonaws.com/5ty2wb6gvg-1.zip" |
|
} |
|
|
|
|
|
|
|
class RDD2020_Dataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
_URLS = _URLS |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"image_id": datasets.Value("string"), |
|
"country": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"image_resolution": datasets.Features({ |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
"depth": datasets.Value("int32"), |
|
}), |
|
"image_path": datasets.Value("string"), |
|
|
|
"crack_type": datasets.Sequence(datasets.Value("string")), |
|
"crack_coordinates": datasets.Sequence(datasets.Features({ |
|
"x_min": datasets.Value("int32"), |
|
"x_max": datasets.Value("int32"), |
|
"y_min": datasets.Value("int32"), |
|
"y_max": datasets.Value("int32"), |
|
})), |
|
}), |
|
homepage='https://data.mendeley.com/datasets/5ty2wb6gvg/1', |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""This method downloads/extracts the data and defines the splits.""" |
|
data_dir = dl_manager.download_and_extract(_URLS["dataset"]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images_dir": os.path.join(data_dir, "train"), |
|
"annotations_dir": os.path.join(data_dir, "train", "annotations"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test1", |
|
gen_kwargs={ |
|
"images_dir": os.path.join(data_dir, "test1"), |
|
"annotations_dir": os.path.join(data_dir, "test1", "annotations"), |
|
"split": "test1", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test2", |
|
gen_kwargs={ |
|
"images_dir": os.path.join(data_dir, "test2"), |
|
"annotations_dir": os.path.join(data_dir, "test2", "annotations"), |
|
"split": "test2", |
|
}, |
|
), |
|
] |
|
|
|
|
|
|
|
def _generate_examples(self, images_dir, annotations_dir, split): |
|
"""Yields examples as (key, example) tuples.""" |
|
for image_file in os.listdir(images_dir): |
|
if not image_file.endswith('.jpg'): |
|
continue |
|
image_id = image_file.split('.')[0] |
|
annotation_file = image_id + '.xml' |
|
annotation_path = os.path.join(annotations_dir, annotation_file) |
|
|
|
if not os.path.exists(annotation_path): |
|
continue |
|
|
|
tree = ET.parse(annotation_path) |
|
root = tree.getroot() |
|
|
|
country = split.capitalize() |
|
image_path = os.path.join(images_dir, image_file) |
|
crack_type = [] |
|
crack_coordinates = [] |
|
|
|
for obj in root.findall('object'): |
|
crack_type.append(obj.find('name').text) |
|
bndbox = obj.find('bndbox') |
|
coordinates = { |
|
"x_min": int(bndbox.find('xmin').text), |
|
"x_max": int(bndbox.find('xmax').text), |
|
"y_min": int(bndbox.find('ymin').text), |
|
"y_max": int(bndbox.find('ymax').text), |
|
} |
|
crack_coordinates.append(coordinates) |
|
|
|
|
|
image_resolution = {"width": 600, "height": 600, "depth": 3} if country != "India" else {"width": 720, |
|
"height": 720, |
|
"depth": 3} |
|
yield image_id, { |
|
"image_id": image_id, |
|
"country": country, |
|
"type": split, |
|
"image_resolution": image_resolution, |
|
"image_path": image_path, |
|
"crack_type": crack_type, |
|
"crack_coordinates": crack_coordinates, |
|
} |
|
|
|
|
|
|