Kosuke-Yamada commited on
Commit
05b69ca
1 Parent(s): e39dd70

modify file

Browse files
Files changed (1) hide show
  1. ner-wikinews-dataset.py +32 -23
ner-wikinews-dataset.py CHANGED
@@ -1,6 +1,17 @@
1
  import json
2
 
3
- import datasets
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  _CITATION = ""
6
  _DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
@@ -9,29 +20,27 @@ _LICENSE = "This work is licensed under CC BY 2.5"
9
  _URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
10
 
11
 
12
- class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
13
  BUILDER_CONFIGS = [
14
- datasets.BuilderConfig(
15
  name="new-wikinews-dataset",
16
- version=datasets.Version("1.1.0"),
17
  description=_DESCRIPTION,
18
  ),
19
  ]
20
 
21
  def _info(self):
22
- return datasets.DatasetInfo(
23
  description=_DESCRIPTION,
24
- features=datasets.Features(
25
  {
26
- "curid": datasets.Value("string"),
27
- "text": datasets.Value("string"),
28
  "entities": [
29
  {
30
- "name": datasets.Value(dtype="string"),
31
- "span": datasets.Sequence(
32
- datasets.Value(dtype="int64"), length=2
33
- ),
34
- "type": datasets.Value(dtype="string"),
35
  }
36
  ],
37
  }
@@ -72,24 +81,24 @@ class NerWikinewsDataset(datasets.GeneratorBasedBuilder):
72
  )
73
  return outputs
74
 
75
- def _split_generators(self, dl_manager):
 
 
76
  data_file = dl_manager.download_and_extract(_URL)
77
  with open(data_file, "r") as f:
78
  data = json.load(f)
79
-
80
  data = self._convert_data_format(data)
81
-
82
  return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TEST,
85
  gen_kwargs={"data": data},
86
  ),
87
  ]
88
 
89
- def _generate_examples(self, data):
90
- for key, data in enumerate(data):
91
  yield key, {
92
- "curid": data["curid"],
93
- "text": data["text"],
94
- "entities": data["entities"],
95
  }
 
1
  import json
2
 
3
+ from datasets import (
4
+ BuilderConfig,
5
+ DatasetInfo,
6
+ DownloadManager,
7
+ Features,
8
+ GeneratorBasedBuilder,
9
+ Sequence,
10
+ Split,
11
+ SplitGenerator,
12
+ Value,
13
+ Version,
14
+ )
15
 
16
  _CITATION = ""
17
  _DESCRIPTION = "This is a dataset of Wikinews articles manually labeled with the named entity label."
 
20
  _URL = "https://huggingface.co/datasets/llm-book/ner-wikinews-dataset/raw/main/annotated_wikinews.json"
21
 
22
 
23
+ class NerWikinewsDataset(GeneratorBasedBuilder):
24
  BUILDER_CONFIGS = [
25
+ BuilderConfig(
26
  name="new-wikinews-dataset",
27
+ version=Version("1.0.0"),
28
  description=_DESCRIPTION,
29
  ),
30
  ]
31
 
32
  def _info(self):
33
+ return DatasetInfo(
34
  description=_DESCRIPTION,
35
+ features=Features(
36
  {
37
+ "curid": Value("string"),
38
+ "text": Value("string"),
39
  "entities": [
40
  {
41
+ "name": Value("string"),
42
+ "span": Sequence(Value("int64"), length=2),
43
+ "type": Value("string"),
 
 
44
  }
45
  ],
46
  }
 
81
  )
82
  return outputs
83
 
84
+ def _split_generators(
85
+ self, dl_manager: DownloadManager
86
+ ) -> list[SplitGenerator]:
87
  data_file = dl_manager.download_and_extract(_URL)
88
  with open(data_file, "r") as f:
89
  data = json.load(f)
 
90
  data = self._convert_data_format(data)
 
91
  return [
92
+ SplitGenerator(
93
+ name=Split.TEST,
94
  gen_kwargs={"data": data},
95
  ),
96
  ]
97
 
98
+ def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
99
+ for key, d in enumerate(data):
100
  yield key, {
101
+ "curid": d["curid"],
102
+ "text": d["text"],
103
+ "entities": d["entities"],
104
  }