# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
from glob import glob | |
import os | |
import numpy as np | |
from PIL import Image | |
from tokenizers import pre_tokenizers | |
from tokenizers.pre_tokenizers import Digits, Split, Whitespace, Sequence | |
import datasets | |
from itertools import chain | |
import pandas as pd | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
'train': "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_train.zip", | |
'test': "http://hyperion.bbirke.de/data/ref_seg/ref_seg_ger_test.zip", | |
} | |
_LABELS = [ | |
'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage', | |
'volume', 'year', 'issue', 'title', 'fpage', 'edition' | |
] | |
_FEATURES = datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"tokens": datasets.Sequence(datasets.Value("string")), | |
# "attention_mask": datasets.Sequence(datasets.Value("int64")), | |
# "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), | |
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), | |
# "fonts": datasets.Sequence(datasets.Value("string")), | |
# "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), | |
# "original_image": datasets.features.Image(), | |
"labels": datasets.Sequence(datasets.features.ClassLabel( | |
names=list(chain.from_iterable([['B-' + x, 'I-' + x] for x in _LABELS])) + ['O'] | |
)), | |
"labels_ref": datasets.Sequence(datasets.features.ClassLabel( | |
names=['B-ref', 'I-ref', ] | |
)) | |
# These are the features of your dataset like images, labels ... | |
} | |
) | |
# def load_image(image_path, size=None): | |
# image = Image.open(image_path).convert("RGB") | |
# w, h = image.size | |
# if size is not None: | |
# # resize image | |
# image = image.resize((size, size)) | |
# image = np.asarray(image) | |
# image = image[:, :, ::-1] # flip color channels from RGB to BGR | |
# image = image.transpose(2, 0, 1) # move channels to first dimension | |
# return image, (w, h) | |
# def normalize_bbox(bbox, size): | |
# return [ | |
# int(1000 * int(bbox[0]) / size[0]), | |
# int(1000 * int(bbox[1]) / size[1]), | |
# int(1000 * int(bbox[2]) / size[0]), | |
# int(1000 * int(bbox[3]) / size[1]), | |
# ] | |
# | |
# | |
# def simplify_bbox(bbox): | |
# return [ | |
# min(bbox[0::2]), | |
# min(bbox[1::2]), | |
# max(bbox[2::2]), | |
# max(bbox[3::2]), | |
# ] | |
# | |
# | |
# def merge_bbox(bbox_list): | |
# x0, y0, x1, y1 = list(zip(*bbox_list)) | |
# return [min(x0), min(y0), max(x1), max(y1)] | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class RefSeg(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
CHUNK_SIZE = 256 | |
VERSION = datasets.Version("1.0.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
# BUILDER_CONFIGS = [ | |
# datasets.BuilderConfig(name="sample", version=VERSION, | |
# description="This part of my dataset covers a first domain"), | |
# datasets.BuilderConfig(name="full", version=VERSION, | |
# description="This part of my dataset covers a second domain"), | |
# ] | |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
split_tokens = [".", ":", ",", ";", "/", "-", "(", ")"] | |
TOKENIZER = Sequence([ | |
Whitespace(), | |
Digits(), | |
] + [Split(x, behavior="isolated") for x in split_tokens]) | |
#TOKENIZER = Whitespace() | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=_FEATURES, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
data_dir = dl_manager.download_and_extract(_URLS) | |
# print(data_dir) | |
# with open(os.path.join(data_dir, "train.csv")) as f: | |
# files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
# csv.DictReader(f, skipinitialspace=True)] | |
# with open(os.path.join(data_dir, "test.csv")) as f: | |
# files_test = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
# csv.DictReader(f, skipinitialspace=True)] | |
# with open(os.path.join(data_dir, "validation.csv")) as f: | |
# files_validation = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), | |
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in | |
# csv.DictReader(f, skipinitialspace=True)] | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": data_dir['train'], | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": data_dir['test'], | |
"split": "test" | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
# print(filepath) | |
# print(split) | |
paths = glob(filepath + '/' + split + '/*.csv') | |
key = 0 | |
for f in paths: | |
df = pd.read_csv(f, keep_default_na=False) | |
input_ids = [] | |
labels = [] | |
refs = [] | |
for i, row in df.iterrows(): | |
# tokenized_input = row['token'].split(' ') | |
tkn = self.TOKENIZER.pre_tokenize_str(row['token']) | |
if row['label'] == 'identifier': | |
row['label'] = 'other' | |
if not tkn: | |
continue | |
tokenized_input, offsets = zip(*tkn) | |
tokenized_input = list(tokenized_input) | |
for t in range(len(tokenized_input)): | |
if t == 0: | |
refs.append(row['ref'] + '-ref') | |
else: | |
refs.append('I-ref') | |
if len(tokenized_input) > 1: | |
if row['tag'] == 'B': | |
if tokenized_input[0] == '': | |
continue | |
input_ids.append(tokenized_input[0]) | |
labels.append(row['tag'] + '-' + row['label']) | |
for input_id in tokenized_input[1:]: | |
if input_id == '': | |
continue | |
input_ids.append(input_id) | |
labels.append('I-' + row['label']) | |
elif row['tag'] == 'I': | |
for input_id in tokenized_input: | |
input_ids.append(input_id) | |
labels.append('I-' + row['label']) | |
else: | |
if tokenized_input[0] == '': | |
continue | |
for input_id in tokenized_input: | |
input_ids.append(input_id) | |
labels.append('O') | |
elif len(tokenized_input) == 1: | |
if tokenized_input[0] == '': | |
continue | |
input_ids.append(tokenized_input[0]) | |
if row['tag'] == 'O': | |
labels.append(row['tag']) | |
else: | |
labels.append(row['tag'] + '-' + row['label']) | |
else: | |
continue | |
clean_input_ids = [] | |
clean_labels = [] | |
clean_refs = [] | |
for i, input in enumerate(input_ids): | |
if input != '': | |
clean_input_ids.append(input) | |
clean_labels.append(labels[i]) | |
clean_refs.append(refs[i]) | |
# n_chunks = int(len(clean_input_ids) / self.CHUNK_SIZE) if len(clean_input_ids) % self.CHUNK_SIZE == 0 \ | |
# else int(len(clean_input_ids) / self.CHUNK_SIZE) + 1 | |
# split_ids = np.array_split(clean_input_ids, n_chunks) | |
# split_labels = np.array_split(clean_labels, n_chunks) | |
# split_refs = np.array_split(clean_refs, n_chunks) | |
# print(clean_input_ids) | |
# for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs): | |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)): | |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)] | |
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE] | |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE] | |
# split_fonts = fonts[index:index + self.CHUNK_SIZE] | |
# split_labels = clean_labels[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)] | |
# split_labels_post = [item for sublist in split_labels for item in sublist] | |
# if(len(split_ids) != len(split_labels)): | |
# print(f) | |
# print(len(input_ids), input_ids) | |
# print(len(split_labels), split_labels) | |
# for s in split_labels: | |
# if type(s) is not str: | |
# print(f) | |
# print(len(input_ids), input_ids) | |
# print(len(split_labels), split_labels) | |
# print(len(split_labels_post), split_labels_post) | |
# print(split_labels, len(split_labels)) | |
# print(split_ids, len(split_ids)) | |
yield key, { | |
"id": f"{os.path.basename(f)}", | |
'tokens': clean_input_ids, | |
# 'attention_mask': [1] * len(chunk_ids), | |
# "bbox": split_bboxes, | |
# "RGBs": split_rgbs, | |
# "fonts": split_fonts, | |
# "image": image, | |
# "original_image": original_image, | |
"labels": clean_labels, | |
"labels_ref": clean_refs | |
} | |
key += 1 | |