symeneses commited on
Commit
64323cf
·
1 Parent(s): c5674a6

add script and update readme

Browse files
Files changed (2) hide show
  1. README.md +57 -0
  2. merlin.py +150 -0
README.md CHANGED
@@ -1,3 +1,60 @@
1
  ---
2
  license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-sa-4.0
3
+ task_categories:
4
+ - text-classification
5
+ language:
6
+ - de
7
+ - it
8
+ - cs
9
+ pretty_name: MERLIN Written Learner Corpus for Czech, German, Italian 1.1.
10
+ size_categories:
11
+ - 1K<n<10K
12
  ---
13
+
14
+ # Dataset Card for MERLIN
15
+
16
+ The MERLIN corpus is a written learner corpus for Czech, German, and Italian that has been
17
+ designed to illustrate the Common European Framework of Reference for Languages (CEFR) with
18
+ authentic learner data. The corpus contains learner texts produced in standardized language
19
+ certifications covering CEFR levels A1-C1. The MERLIN annotation scheme includes a wide
20
+ range of language characteristics that provide researchers with concrete examples of learner
21
+ performance and progress across multiple proficiency levels.
22
+
23
+ ## Dataset Details
24
+
25
+ ### Dataset Description
26
+
27
+ The MERLIN corpus contains 2,286 texts for learners of Italian, German and Czech that were taken from written examinations of acknowledged test institutions. The exams aim to test knowledge across the levels A1-C1 of the Common European Framework of Reference (CEFR).
28
+
29
+ - **Homepage :** https://merlin-platform.eu/
30
+ - **Funded by :** The MERLIN project was funded from 2012 until 2014 by the EU Lifelong Learning Programme under project number 518989-LLP-1-2011-1-DE-KA2-KA2MP.
31
+ - **Shared by :** Since 2018, corpus data are available through the CLARIN network.
32
+ - **Language(s) (NLP):** Czech, German and Italian
33
+ - **License:** Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)
34
+
35
+ ### Dataset Sources
36
+
37
+ - **Repository:** https://clarin.eurac.edu/repository/xmlui/handle/20.500.12124/6
38
+ - **Paper:** Wisniewski, Katrin; Abel, Andrea; Vodičková, Kateřina; et al., 2018,
39
+ MERLIN Written Learner Corpus for Czech, German, Italian 1.1, Eurac Research CLARIN Centre,
40
+ http://hdl.handle.net/20.500.12124/6.
41
+
42
+ ## Uses
43
+
44
+ - Teachers and material writers
45
+ - Curriculum design and course planning
46
+ - Language testing
47
+
48
+ For more details and practicla examples, see [use cases](https://www.merlin-platform.eu/C_teacher.php).
49
+
50
+ ## Citation
51
+
52
+ **BibTeX:**
53
+
54
+ @misc{20.500.12124/6,
55
+ title = {{MERLIN} Written Learner Corpus for Czech, German, Italian 1.1},
56
+ author = {Wisniewski, Katrin and Abel, Andrea and Vodi{\v c}kov{\'a}, Kate{\v r}ina and Plassmann, Sybille and Meurers, Detmar and Woldt, Claudia and Sch{\"o}ne, Karin and Blaschitz, Verena and Lyding, Verena and Nicolas, Lionel and Vettori, Chiara and Pe{\v c}en{\'y}, Pavel and Hana, Jirka and {\v C}urdov{\'a}, Veronika and {\v S}tindlov{\'a}, Barbora and Klein, Gudrun and Lauppe, Louise and Boyd, Adriane and Bykh, Serhiy and Krivanek, Julia},
57
+ url = {http://hdl.handle.net/20.500.12124/6},
58
+ note = {Eurac Research {CLARIN} Centre},
59
+ copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)},
60
+ year = {2018} }
merlin.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MERLIN Written Learner Corpus for Czech, German, Italian 1.1."""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @misc{20.500.12124/6,
25
+ title = {{MERLIN} Written Learner Corpus for Czech, German, Italian 1.1},
26
+ author = {Wisniewski, Katrin and Abel, Andrea and Vodi{\v c}kov{\'a}, Kate{\v r}ina and Plassmann,
27
+ Sybille and Meurers, Detmar and Woldt, Claudia and Sch{\"o}ne, Karin and Blaschitz, Verena and Lyding,
28
+ Verena and Nicolas, Lionel and Vettori, Chiara and Pe{\v c}en{\'y}, Pavel and Hana, Jirka and
29
+ {\v C}urdov{\'a}, Veronika and {\v S}tindlov{\'a}, Barbora and Klein, Gudrun and Lauppe, Louise and Boyd,
30
+ Adriane and Bykh, Serhiy and Krivanek, Julia},
31
+ url = {http://hdl.handle.net/20.500.12124/6},
32
+ note = {Eurac Research {CLARIN} Centre},
33
+ copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)},
34
+ year = {2018} }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ The MERLIN corpus is a written learner corpus for Czech, German, and Italian that has been
39
+ designed to illustrate the Common European Framework of Reference for Languages (CEFR) with
40
+ authentic learner data. The corpus contains learner texts produced in standardized language
41
+ certifications covering CEFR levels A1-C1. The MERLIN annotation scheme includes a wide
42
+ range of language characteristics that provide researchers with concrete examples of learner
43
+ performance and progress across multiple proficiency levels.
44
+ """
45
+
46
+ _HOMEPAGE = "https://merlin-platform.eu/"
47
+
48
+ _LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
49
+
50
+ _URLS = {
51
+ "multilingual": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip",
52
+ "german": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip",
53
+ "italian": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip",
54
+ "czech": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip",
55
+ }
56
+
57
+ class MerlinDataset(datasets.GeneratorBasedBuilder):
58
+ """Merlin dataset including three languages."""
59
+
60
+ VERSION = datasets.Version("1.1.0")
61
+
62
+ BUILDER_CONFIGS = [
63
+ datasets.BuilderConfig(name="multilingual", version=VERSION, description="Merlin dataset including three languages."),
64
+ datasets.BuilderConfig(name="german", version=VERSION, description="Merlin dataset German."),
65
+ datasets.BuilderConfig(name="italian", version=VERSION, description="Merlin dataset Italian."),
66
+ datasets.BuilderConfig(name="czech", version=VERSION, description="Merlin dataset Czech."),
67
+ ]
68
+
69
+
70
+ def _info(self):
71
+ features = datasets.Features(
72
+ {
73
+ "author": datasets.Value("string"),
74
+ "language": datasets.ClassLabel(num_classes=3, names=["Czech", "German", "Italian"]),
75
+ "level": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
76
+ "level_grammar": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
77
+ "level_ortography": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
78
+ "level_vocabulary_range": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
79
+ "level_vocabulary_control": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
80
+ "level_coherence": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
81
+ "level_appropriateness": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']),
82
+ "text": datasets.Value("string"),
83
+ "text_target": datasets.Value("string"),
84
+ }
85
+ )
86
+
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ homepage=_HOMEPAGE,
91
+ license=_LICENSE,
92
+ citation=_CITATION,
93
+ )
94
+
95
+ def _split_generators(self, dl_manager):
96
+ urls = _URLS[self.config.name]
97
+ data_dir = dl_manager.download_and_extract(urls)
98
+ filepath = os.path.join(data_dir, "merlin-text-v1.1/meta_ltext_THs")
99
+ if self.config.name != "multilingual":
100
+ filepath = os.path.join(filepath, self.config.name)
101
+ print(f"Genereting split from {filepath}")
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={
106
+ "filepath": filepath,
107
+ "split": "train",
108
+ },
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, filepath, split):
113
+ import re
114
+
115
+ file_list = []
116
+ for path, _, files in os.walk(filepath):
117
+ for file in files:
118
+ file_list.append(os.path.join(path, file))
119
+
120
+ print(f"Reading {len(file_list)} files")
121
+ # Transform the data
122
+ for f in file_list:
123
+ raw_text = open(f, "r").read()
124
+
125
+ language = re.findall(r'(Test language: )(.*?)(\n)', raw_text)[0][1]
126
+ author_id = re.findall(r'(Author ID: )(.*?)(\n)', raw_text)[0][1]
127
+ level = re.findall(r'(CEFR level of test: )(.*?)(\n)', raw_text)[0][1]
128
+ level_grammar = re.findall(r'(Grammatical accuracy: )(.*?)(\n)', raw_text)[0][1]
129
+ level_ortography = re.findall(r'(Orthography: )(.*?)(\n)', raw_text)[0][1]
130
+ level_vocabulary_range = re.findall(r'(Vocabulary range: )(.*?)(\n)', raw_text)[0][1]
131
+ level_vocabulary_control = re.findall(r'(Vocabulary control: )(.*?)(\n)', raw_text)[0][1]
132
+ level_coherence = re.findall(r'(Coherence/Cohesion: )(.*?)(\n)', raw_text)[0][1]
133
+ level_appropriateness = re.findall(r'(Sociolinguistic appropriateness: )(.*?)(\n)', raw_text)[0][1]
134
+ text = re.findall(r'(Learner text: \n\n)(.*?)(\n\n----------------\n\n)', raw_text, re.DOTALL)[0][1]
135
+ text_target = re.findall(r'(Target hypothesis 1: \n\n)(.*?)(\n\n----------------\n\n)', raw_text, re.DOTALL)[0][1]
136
+
137
+ id_ = f'{language}_{author_id}'
138
+ yield id_, {
139
+ "author": author_id,
140
+ "language": language,
141
+ "level": level,
142
+ "level_grammar": level_grammar,
143
+ "level_ortography": level_ortography,
144
+ "level_vocabulary_range": level_vocabulary_range,
145
+ "level_vocabulary_control": level_vocabulary_control,
146
+ "level_coherence": level_coherence,
147
+ "level_appropriateness": level_appropriateness,
148
+ "text": text,
149
+ "text_target": text_target,
150
+ }