File size: 4,145 Bytes
fa53fa6 d07ecd5 9d30a18 149200f 9d30a18 d07ecd5 9d30a18 a4dbcca d07ecd5 3a6b34c 9d30a18 d163ff5 9d30a18 3a6b34c 9d30a18 d07ecd5 9d30a18 d07ecd5 9d30a18 d07ecd5 9d30a18 49d75a9 e826634 9d30a18 e826634 49d75a9 d07ecd5 9d30a18 d07ecd5 9d30a18 d07ecd5 3a6b34c d07ecd5 9d30a18 3a6b34c 9d30a18 d07ecd5 9d30a18 707ae5a d07ecd5 9d30a18 707ae5a d07ecd5 9d30a18 707ae5a d07ecd5 9d30a18 d07ecd5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
from __future__ import annotations
import json
import random
from typing import Generator
from datasets import (
BuilderConfig,
DatasetInfo,
DownloadManager,
Features,
GeneratorBasedBuilder,
Sequence,
Split,
SplitGenerator,
Value,
Version,
)
from datasets.data_files import DataFilesDict
_CITATION = """
@inproceedings{omi-2021-wikipedia,
title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築",
author = "近江 崇宏",
booktitle = "言語処理学会第27回年次大会",
year = "2021",
url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf",
}
"""
_DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc."
_HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset"
_LICENSE = "CC-BY-SA 3.0"
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"
class NerWikipediaDatasetConfig(BuilderConfig):
def __init__(
self,
name: str = "default",
version: Version | str | None = Version("0.0.0"),
data_dir: str | None = None,
data_files: DataFilesDict | None = None,
description: str | None = None,
shuffle: bool = True,
seed: int = 42,
train_ratio: float = 0.8,
validation_ratio: float = 0.1,
) -> None:
super().__init__(
name=name,
version=version,
data_dir=data_dir,
data_files=data_files,
description=description,
)
self.shuffle = shuffle
self.seed = seed
self.train_ratio = train_ratio
self.validation_ratio = validation_ratio
class NerWikipediaDataset(GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = NerWikipediaDatasetConfig
BUILDER_CONFIGS = [
NerWikipediaDatasetConfig(
name="ner-wikipedia-dataset",
version=Version("2.0.0"),
description=_DESCRIPTION,
),
]
def _info(self) -> DatasetInfo:
return DatasetInfo(
description=_DESCRIPTION,
features=Features(
{
"curid": Value("string"),
"text": Value("string"),
"entities": [
{
"name": Value("string"),
"span": Sequence(Value("int64"), length=2),
"type": Value("string"),
}
],
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: DownloadManager
) -> list[SplitGenerator]:
dataset_dir = str(dl_manager.download_and_extract(_URL))
with open(dataset_dir, "r", encoding="utf-8") as f:
data = json.load(f)
if self.config.shuffle == True:
random.seed(self.config.seed)
random.shuffle(data)
num_data = len(data)
num_train_data = int(num_data * self.config.train_ratio)
num_validation_data = int(num_data * self.config.validation_ratio)
train_data = data[:num_train_data]
validation_data = data[
num_train_data : num_train_data + num_validation_data
]
test_data = data[num_train_data + num_validation_data :]
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"data": train_data},
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={"data": validation_data},
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={"data": test_data},
),
]
def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
for i, d in enumerate(data):
yield i, {
"curid": d["curid"],
"text": d["text"],
"entities": d["entities"],
}
|