gazeta / gazeta.py
IlyaGusev's picture
meta
9ee07df
raw
history blame
4.53 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset for automatic summarization of Russian news"""
import csv
import json
import os
import datasets
_CITATION = """
@InProceedings{10.1007/978-3-030-59082-6_9,
author="Gusev, Ilya",
editor="Filchenkov, Andrey and Kauttonen, Janne and Pivovarova, Lidia",
title="Dataset for Automatic Summarization of Russian News",
booktitle="Artificial Intelligence and Natural Language",
year="2020",
publisher="Springer International Publishing",
address="Cham",
pages="122--134",
isbn="978-3-030-59082-6"
}
"""
_DESCRIPTION = """\
Dataset for automatic summarization of Russian news
"""
_HOMEPAGE = "https://github.com/IlyaGusev/gazeta"
_LICENSE = ""
_URLs = {
'github.com': "https://github.com/IlyaGusev/gazeta/releases/download/0.1/gazeta_jsonl.tar.gz",
}
class GazetaDataset(datasets.GeneratorBasedBuilder):
"""Dataset for automatic summarization of Russian news"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="github.com", version=VERSION, description=""),
]
DEFAULT_CONFIG_NAME = "github.com" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"summary": datasets.Value("string"),
"title": datasets.Value("string"),
"date": datasets.Value("string"),
"url": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "gazeta_train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "gazeta_test.jsonl"),
"split": "test"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "gazeta_val.jsonl"),
"split": "dev",
},
),
]
def _generate_examples(
self, filepath, split
):
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, data