iluvvatar commited on
Commit
7d04c1d
·
0 Parent(s):
Files changed (5) hide show
  1. .gitattributes +38 -0
  2. README.md +65 -0
  3. RuREBus.py +142 -0
  4. data/test.jsonl +0 -0
  5. data/train.jsonl +0 -0
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.wasm filter=lfs diff=lfs merge=lfs -text
25
+ *.xz filter=lfs diff=lfs merge=lfs -text
26
+ *.zip filter=lfs diff=lfs merge=lfs -text
27
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
28
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ # Audio files - uncompressed
30
+ *.pcm filter=lfs diff=lfs merge=lfs -text
31
+ *.sam filter=lfs diff=lfs merge=lfs -text
32
+ *.raw filter=lfs diff=lfs merge=lfs -text
33
+ # Audio files - compressed
34
+ *.aac filter=lfs diff=lfs merge=lfs -text
35
+ *.flac filter=lfs diff=lfs merge=lfs -text
36
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
37
+ *.ogg filter=lfs diff=lfs merge=lfs -text
38
+ *.wav filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ languages:
3
+ - ru
4
+ multilinguality:
5
+ - monolingual
6
+ pretty_name: RuREBus
7
+ task_categories:
8
+ - structure-prediction
9
+ task_ids:
10
+ - named-entity-recognition
11
+ ---
12
+
13
+ # RuREBus dataset
14
+
15
+ ## Table of Contents
16
+ - [Dataset Description](#dataset-description)
17
+ - [Dataset Structure](#dataset-structure)
18
+ - [Citation Information](#citation-information)
19
+ - [Contacts](#contacts)
20
+
21
+ ## Dataset Description
22
+ RuREBus dataset (https://github.com/dialogue-evaluation/RuREBus) is
23
+ a Russian dataset for named entity recognition and relation extraction.
24
+
25
+ ## Dataset Structure
26
+ There are two subsets of the dataset.
27
+
28
+ Using
29
+ `load_dataset('MalakhovIlya/RuREBus')`
30
+ you can download annotated data (DatasetDict) for named entity recognition task and
31
+ relation extraction tasks.
32
+ This subset consists of two splits: "train" and "test".
33
+
34
+ Using
35
+ `load_dataset('MalakhovIlya/NEREL', 'raw_txt')['raw_txt']`
36
+ you can download (Dataset) large corpus (~3gb) raw texts of the same subject
37
+ area, but without any annotations.
38
+
39
+ "entities" are used in named-entity recognition task (see https://en.wikipedia.org/wiki/Named-entity_recognition).
40
+ "relations" are used in relationship extraction task (see https://en.wikipedia.org/wiki/Relationship_extraction).
41
+
42
+ Each entity is represented by a string of the following format:
43
+ `"<id>\t<type> <start> <stop>\t<text>"`, where
44
+ `<id>` is an entity id,
45
+ `<type>` is one of entity types,
46
+ `<start>` is a position of the first symbol of entity in text,
47
+ `<stop>` is the last symbol position in text +1.
48
+
49
+ Each relation is represented by a string of the following format:
50
+ `"<id>\t<type> Arg1:<arg1_id> Arg2:<arg2_id>"`, where
51
+ `<id>` is a relation id,
52
+ `<arg1_id>` and `<arg2_id>` are entity ids.
53
+
54
+ ## Citation Information
55
+ @inproceedings{rurebus,
56
+ Address = {Moscow, Russia},
57
+ Author = {Ivanin, Vitaly and Artemova, Ekaterina and Batura, Tatiana and Ivanov, Vladimir and Sarkisyan, Veronika and Tutubalina, Elena and Smurov, Ivan},
58
+ Title = {RuREBus-2020 Shared Task: Russian Relation Extraction for Business},
59
+ Booktitle = {Computational Linguistics and Intellectual Technologies: Proceedings of the International Conference “Dialog” [Komp’iuternaia Lingvistika i Intellektual’nye Tehnologii: Trudy Mezhdunarodnoj Konferentsii “Dialog”]},
60
+ Year = {2020}
61
+ }
62
+
63
+ ## Contacts
64
+ Malakhov Ilya
65
+ Telegram - https://t.me/noname_4710
RuREBus.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import json
3
+ import requests
4
+ from urllib.parse import urlencode
5
+ from pathlib import Path
6
+ import zipfile
7
+ import os
8
+ import shutil
9
+
10
+ _NAME = 'RuREBus'
11
+ _CITATION = '''
12
+ @inproceedings{rurebus,
13
+ Address = {Moscow, Russia},
14
+ Author = {Ivanin, Vitaly and Artemova, Ekaterina and Batura, Tatiana and Ivanov, Vladimir and Sarkisyan, Veronika and Tutubalina, Elena and Smurov, Ivan},
15
+ Title = {RuREBus-2020 Shared Task: Russian Relation Extraction for Business},
16
+ Booktitle = {Computational Linguistics and Intellectual Technologies: Proceedings of the International Conference “Dialog” [Komp’iuternaia Lingvistika i Intellektual’nye Tehnologii: Trudy Mezhdunarodnoj Konferentsii “Dialog”]},
17
+ Year = {2020}
18
+ }
19
+ '''.strip()
20
+ _DESCRIPTION = 'Russian Relation Extraction for Business'
21
+ _HOMEPAGE = 'https://github.com/dialogue-evaluation/RuREBus'
22
+ _VERSION = '1.0.0'
23
+
24
+
25
+ def decode_file_names(folder):
26
+ for root, dirs, files in os.walk(folder, topdown=False):
27
+ root = Path(root)
28
+ for file in files:
29
+ old_name = root / Path(file)
30
+ new_name = root / Path(
31
+ file.encode('cp437').decode('cp866'))
32
+ old_name.rename(new_name)
33
+ for dir in dirs:
34
+ old_name = root / Path(dir)
35
+ new_name = root / Path(dir.encode('cp437').decode('cp866'))
36
+ old_name.rename(new_name)
37
+
38
+
39
+ class RuREBusBuilder(datasets.GeneratorBasedBuilder):
40
+ base_url = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?'
41
+ public_key = 'https://disk.yandex.ru/d/t1WakmYXlL6jBw'
42
+ final_url = base_url + urlencode(dict(public_key=public_key))
43
+ response = requests.get(final_url)
44
+ raw_txt_url = response.json()['href']
45
+
46
+ _DATA_URLS = {
47
+ 'train': 'data/train.jsonl',
48
+ 'test': f'data/test.jsonl',
49
+ }
50
+ _RAW_TXT_URLS = {
51
+ 'raw_txt': raw_txt_url
52
+ }
53
+ VERSION = datasets.Version(_VERSION)
54
+ BUILDER_CONFIGS = [
55
+ datasets.BuilderConfig('data',
56
+ version=VERSION,
57
+ description='Annotated data'),
58
+ datasets.BuilderConfig('raw_txt',
59
+ version=VERSION,
60
+ description='Raw texts without annotations'),
61
+ ]
62
+ DEFAULT_CONFIG_NAME = 'data'
63
+
64
+ def _info(self) -> datasets.DatasetInfo:
65
+ if self.config.name == 'data':
66
+ features = datasets.Features({
67
+ 'id': datasets.Value('int32'),
68
+ 'text': datasets.Value('string'),
69
+ 'entities': datasets.Sequence(datasets.Value('string')),
70
+ 'relations': datasets.Sequence(datasets.Value('string'))
71
+ })
72
+ else:
73
+ features = datasets.Features({
74
+ 'region': datasets.Value('string'),
75
+ 'district': datasets.Value('string'),
76
+ 'title': datasets.Value('string'),
77
+ 'text': datasets.Value('string')
78
+ })
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ homepage=_HOMEPAGE,
83
+ citation=_CITATION
84
+ )
85
+
86
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
87
+ if self.config.name == 'data':
88
+ files = dl_manager.download(self._DATA_URLS)
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={'filepath': files['train']},
93
+ ),
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.TEST,
96
+ gen_kwargs={'filepath': files['test']},
97
+ ),
98
+ ]
99
+ else:
100
+ folder = dl_manager.download_and_extract(self._RAW_TXT_URLS)['raw_txt']
101
+ decode_file_names(folder)
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name='raw_txt',
105
+ gen_kwargs={'filepath': folder,
106
+ 'raw_txt': True},
107
+ )
108
+ ]
109
+
110
+ def _generate_examples(self, filepath, raw_txt=False):
111
+ if not raw_txt:
112
+ with open(filepath, encoding='utf-8') as f:
113
+ for i, line in enumerate(f):
114
+ yield i, json.loads(line)
115
+ else:
116
+ path = Path(filepath) / 'MED_txt/unparsed_txt'
117
+ i = 0
118
+ for root, dirs, files in os.walk(path):
119
+ if files:
120
+ root = Path(root)
121
+ region = root.parent.name
122
+ district = root.name
123
+ titles = {}
124
+ with open(root / 'name_dict.txt', encoding='utf-8') as f_titles:
125
+ for line in f_titles:
126
+ key, title = line.split(maxsplit=1)[1].split('_', maxsplit=1)
127
+ titles[key] = title.strip()
128
+ for file in files:
129
+ if file != 'name_dict.txt':
130
+ file = Path(file)
131
+ key = file.name.split('_', maxsplit=1)[0]
132
+ title = titles[key]
133
+ with open(root / file, encoding='utf-8') as f:
134
+ text = f.read()
135
+ item = {
136
+ 'region': region,
137
+ 'district': district,
138
+ 'title': title,
139
+ 'text': text
140
+ }
141
+ yield i, item
142
+ i += 1
data/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train.jsonl ADDED
The diff for this file is too large to render. See raw diff