MHoubre commited on
Commit
1baaeaa
·
1 Parent(s): a86bc19

first commit

Browse files
Files changed (7) hide show
  1. .gitattributes +3 -0
  2. README.md +28 -0
  3. dataset_infos.json +1 -0
  4. kp20k.py +150 -0
  5. test.json +3 -0
  6. train.json +3 -0
  7. validation.json +3 -0
.gitattributes CHANGED
@@ -36,3 +36,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  *.mp3 filter=lfs diff=lfs merge=lfs -text
37
  *.ogg filter=lfs diff=lfs merge=lfs -text
38
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
36
  *.mp3 filter=lfs diff=lfs merge=lfs -text
37
  *.ogg filter=lfs diff=lfs merge=lfs -text
38
  *.wav filter=lfs diff=lfs merge=lfs -text
39
+ validation.json filter=lfs diff=lfs merge=lfs -text
40
+ test.json filter=lfs diff=lfs merge=lfs -text
41
+ train.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # KP20k dataset for Keyphrase Generation
2
+
3
+ ## About
4
+ KP20k is a dataset for benchmarking keyphrase extraction and generation models.
5
+ The data is composed of 570 809 abstracts and their associated titles from scientific articles.
6
+
7
+ Details about the dataset can be found in the original paper:
8
+ - Meng et al 2017.
9
+ [Deep keyphrase Generation](https://aclanthology.org/P17-1054.pdf)
10
+ Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, pages 582–592
11
+
12
+ ## Content
13
+
14
+ The dataset is divided into the following three splits:
15
+
16
+ | Split | # documents | # keyphrases by document (average) | % Present | % Reordered | % Mixed | % Unseen |
17
+ | :--------- | ----------: | -----------: | --------: | ----------: | ------: | -------: |
18
+ | Train | 530 809 | 5.28 | 40.65 | 7.58 | 24.43 | 27.34 |
19
+ | Test | 20 000 | 5.29 | 40.70 | 7.63 | 24.31 | 27.35 |
20
+ | Validation | 20 000 | 5.27 | 40.80 | 7.56 | 24.52 | 27.12 |
21
+
22
+
23
+ The following data fields are available:
24
+ - **id**: unique identifier of the document.
25
+ - **title**: title of the document.
26
+ - **abstract**: abstract of the document.
27
+ - **keyphrases**: list of reference keyphrases.
28
+ - **prmu**: list of <u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen categories for reference keyphrases.
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"KP20k": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"},"title": {"dtype": "string", "id": null, "_type": "Value"},"abstract": {"dtype": "string", "id": null, "_type": "Value"}, "keyphrases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "prmu": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "json", "config_name": "KP20k", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 654714676, "num_examples": 530809, "dataset_name": "json"}, "test": {"name": "test", "num_bytes": 24675779, "num_examples": 20000, "dataset_name": "json"}, "validation": {"name": "validation", "num_bytes": 24657665, "num_examples": 20000, "dataset_name": "json"}}, "download_size": 720581004, "post_processing_size": null, "dataset_size": 704048120, "size_in_bytes": 1424629124}, "KP20k": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"abstract": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "keyphrases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "prmu": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "json", "config_name": "KP20k", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 654714676, "num_examples": 530809, "dataset_name": "json"}, "test": {"name": "test", "num_bytes": 24675779, "num_examples": 20000, "dataset_name": "json"}, "validation": {"name": "validation", "num_bytes": 24657665, "num_examples": 20000, "dataset_name": "json"}}, "download_size": 720581004, "post_processing_size": null, "dataset_size": 704048120, "size_in_bytes": 1424629124}}
kp20k.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ import datasets
8
+
9
+
10
+
11
+ _CITATION = """\
12
+ @InProceedings{meng-EtAl:2017:Long,
13
+ author = {Meng, Rui and Zhao, Sanqiang and Han, Shuguang and He, Daqing and Brusilovsky, Peter and Chi, Yu},
14
+ title = {Deep Keyphrase Generation},
15
+ booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
16
+ month = {July},
17
+ year = {2017},
18
+ address = {Vancouver, Canada},
19
+ publisher = {Association for Computational Linguistics},
20
+ pages = {582--592},
21
+ url = {http://aclweb.org/anthology/P17-1054}
22
+ }
23
+ """
24
+
25
+ # You can copy an official description
26
+ _DESCRIPTION = """\
27
+ KP20k dataset for keyphrase extraction and generation in scientific paper.
28
+ """
29
+
30
+ _HOMEPAGE = "http://memray.me/uploads/acl17-keyphrase-generation.pdf"
31
+
32
+ # License information from the original source page https://github.com/memray/seq2seq-keyphrase
33
+ _LICENSE = "MIT LICENSE"
34
+
35
+ # TODO: Add link to the official dataset URLs here
36
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
37
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
38
+ _URLS = {
39
+ "test": "test.json",
40
+ "train": "train.json",
41
+ "validation": "validation.json"
42
+ }
43
+
44
+ class KP20kConfig(datasets.BuilderConfig):
45
+
46
+ def __init__(self, **kwargs):
47
+
48
+ super(KP20kConfig, self).__init__(**kwargs)
49
+
50
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
51
+ class KP20k(datasets.GeneratorBasedBuilder):
52
+
53
+ VERSION = datasets.Version("0.0.1","")
54
+
55
+ # This is an example of a dataset with multiple configurations.
56
+ # If you don't want/need to define several sub-sets in your dataset,
57
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
58
+
59
+ # If you need to make complex sub-parts in the datasets with configurable options
60
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
61
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
62
+
63
+ # You will be able to load one or the other configurations in the following list with
64
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
65
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
66
+ BUILDER_CONFIGS = [
67
+ KP20kConfig(
68
+ name="raw",
69
+ version=VERSION,
70
+ description="This part of the dataset covers the raw data.",
71
+ ),
72
+ ]
73
+
74
+ #DEFAULT_CONFIG_NAME = "raw" # It's not mandatory to have a default configuration. Just use one if it make sense.
75
+
76
+ def _info(self):
77
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
78
+ print(self.config)
79
+ features = datasets.Features(
80
+ {
81
+ 'id': datasets.Value(dtype="string"),
82
+ "title": datasets.Value("string"),
83
+ "abstract": datasets.Value("string"),
84
+ "keyphrases": datasets.features.Sequence(datasets.Value("string")),
85
+ "prmu": datasets.features.Sequence(datasets.Value("string")),
86
+ }
87
+ )
88
+ return datasets.DatasetInfo(
89
+ # This is the description that will appear on the datasets page.
90
+ description=_DESCRIPTION,
91
+ # This defines the different columns of the dataset and their types
92
+ features=features,
93
+ homepage=_HOMEPAGE,
94
+ # License for the dataset if available
95
+ license=_LICENSE,
96
+ # Citation for the dataset
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
102
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
103
+
104
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
105
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
106
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
107
+ urls = _URLS
108
+ data_dir = dl_manager.download_and_extract(urls)
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TRAIN,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "filepath": os.path.join(data_dir,"train.json"),
115
+ "split": "train",
116
+ },
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TEST,
120
+ # These kwargs will be passed to _generate_examples
121
+ gen_kwargs={
122
+ "filepath": os.path.join(data_dir,"test.json"),
123
+ "split": "test"
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": os.path.join(data_dir,"validation.json"),
131
+ "split": "validation",
132
+ },
133
+ ),
134
+ ]
135
+
136
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
137
+ def _generate_examples(self, filepath, split):
138
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
139
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
140
+ with open(filepath, encoding="utf-8") as f:
141
+ for key, row in enumerate(f):
142
+ data = json.loads(row)
143
+ # Yields examples as (key, example) tuples
144
+ yield key, {
145
+ "id": data["id"],
146
+ "title": data["title"],
147
+ "abstract": data["abstract"],
148
+ "keyphrases": data["keyphrases"],
149
+ "prmu": data["prmu"],
150
+ }
test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6caeb55eaf941deb11f9e5152494310db2ac5970194e722798e3e035855561
3
+ size 25255559
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a20765c76126945e9eb298d7837175e886403f113c1a23c0cab7dc3cd9496d
3
+ size 670087948
validation.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c0ec7541c24c81b44c11c8cc5a0cbda88956a39bf94552dd03dcdf7fb25dd67
3
+ size 25237497