Andre Barbosa commited on
Commit
e968ade
·
1 Parent(s): 7236a53

address script documentation and add reference by grader

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +76 -28
aes_enem_dataset.py CHANGED
@@ -11,9 +11,6 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
 
18
  import csv
19
  import math
@@ -28,16 +25,45 @@ from tqdm.auto import tqdm
28
 
29
  np.random.seed(42) # Set the seed
30
 
31
- # TODO: Add BibTeX citation
32
- # Find for instance the citation on arxiv or on the dataset repo/website
33
- _CITATION = """\
34
- TODO
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  """
36
 
37
- # TODO: Add description of the dataset here
38
- # You can copy an official description
39
  _DESCRIPTION = """\
40
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
 
43
  # TODO: Add a link to an official homepage for the dataset here
@@ -81,6 +107,7 @@ CSV_HEADER = [
81
  "general",
82
  "specific",
83
  "essay_year",
 
84
  ]
85
 
86
  CSV_HEADERPROPOR = [
@@ -93,32 +120,49 @@ CSV_HEADERPROPOR = [
93
  ]
94
 
95
  SOURCE_A_DESC = """
96
- Source A have 860 essays available from August 2015 to March 2020.
97
- For each month of that period, a new prompt together with supporting texts were given, and the graded essays from the previous month were made available.
 
98
  Of the 56 prompts, 12 had no associated essays available (at the time of download).
99
- Additionally, there were 3 prompts that asked for a text in the format of a letter. We removed those 15 prompts and associated texts from the corpus.
100
- For an unknown reason, 414 of the essays were graded using a five-point scale of either {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
101
- To avoid introducing bias, we also discarded such instances, resulting in a dataset of 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
102
- Some of the essays used a six-point scale with 20 points instead of 40 points as the second class. As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
103
- The original data contains comments from the annotators explaining their per-competence scores. They are included in our dataset.
 
 
 
 
 
104
  """
105
 
106
- SOURCE_A_WITH_GRADERS = "Same as SourceA but augmented with reviwers contractors grade's. Each essay then have three grades: the downloaded one and each grader's feedback. "
 
 
 
 
 
 
107
 
108
  SOURCE_B_DESC = """
109
- Source B is very similar to Source A: a new prompt and supporting texts are made available every month along with the graded essays submitted in the previous month.
110
- We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released prior to June 2016 were graded on a five-point scale and consequently discarded.
 
 
111
  This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
112
 
113
- Although in principle, Source B also provides supporting texts for students, none were available at the time the data was downloaded.
114
- To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever possible, by manually matching prompts between the two corpora.
115
- We ended up with approx. 1,000 essays containing both prompt and supporting texts, and approx. 2,200 essays containing only the respective prompt.
 
 
 
116
  """
117
 
118
  PROPOR2024 = """
119
- Splits used for PROPOR paper. It is a variation of sourceAWithGraders dataset. Post publication we noticed that there was an issue in the reproducible setting.
120
-
121
- We fix that and set this config to keep reproducibility w.r.t. numbers reported in the paper.
122
  """
123
 
124
 
@@ -159,6 +203,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
159
  "essay_year": datasets.Value("int16"),
160
  "general_comment": datasets.Value("string"),
161
  "specific_comment": datasets.Value("string"),
 
162
  }
163
  )
164
 
@@ -306,7 +351,8 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
306
  for grader in [grader_a, grader_b]:
307
  grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
308
  grader.grades = grader.grades.apply(map_list)
309
-
 
310
  return grader_a, grader_b
311
 
312
  def _generate_splits(self, filepath: str, train_size=0.7):
@@ -409,7 +455,6 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
409
  assert (
410
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
411
  ), "Overlap between val and test id_prompt"
412
- #train_df['essay_year'] = train_df['essay_year'].astype(int)
413
  train_df.to_csv(f"{dirname}/train.csv", index=False)
414
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
415
  test_df.to_csv(f"{dirname}/test.csv", index=False)
@@ -449,6 +494,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
449
  "essay_year": row["essay_year"],
450
  "general_comment": row["general"],
451
  "specific_comment": row["specific"],
 
452
  }
453
 
454
 
@@ -719,6 +765,7 @@ class HTMLParser:
719
  general_comment = None
720
  specific_comment = None
721
  essay_year = None
 
722
  for prompt_folder in tqdm(
723
  sub_folders,
724
  desc=f"Parsing HTML files from: {key}",
@@ -761,6 +808,7 @@ class HTMLParser:
761
  general_comment,
762
  specific_comment,
763
  essay_year,
 
764
  ]
765
  )
766
  essay_id += 1
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
 
14
 
15
  import csv
16
  import math
 
25
 
26
  np.random.seed(42) # Set the seed
27
 
28
+ _CITATION = """
29
+ @inproceedings{silveira-etal-2024-new,
30
+ title = "A New Benchmark for Automatic Essay Scoring in {P}ortuguese",
31
+ author = "Silveira, Igor Cataneo and
32
+ Barbosa, Andr{\'e} and
33
+ Mau{\'a}, Denis Deratani",
34
+ editor = "Gamallo, Pablo and
35
+ Claro, Daniela and
36
+ Teixeira, Ant{\'o}nio and
37
+ Real, Livy and
38
+ Garcia, Marcos and
39
+ Oliveira, Hugo Goncalo and
40
+ Amaro, Raquel",
41
+ booktitle = "Proceedings of the 16th International Conference on Computational Processing of Portuguese - Vol. 1",
42
+ month = mar,
43
+ year = "2024",
44
+ address = "Santiago de Compostela, Galicia/Spain",
45
+ publisher = "Association for Computational Lingustics",
46
+ url = "https://aclanthology.org/2024.propor-1.23/",
47
+ pages = "228--237"
48
+ }
49
  """
50
 
 
 
51
  _DESCRIPTION = """\
52
+ This dataset was created as part of our work on advancing Automatic Essay Scoring for
53
+ Brazilian Portuguese. It comprises a large collection of publicly available essays
54
+ collected from websites simulating University Entrance Exams, with a subset expertly
55
+ annotated to provide reliable assessment indicators. The dataset includes both the raw
56
+ text and processed forms of the essays, along with supporting prompts and supplemental
57
+ texts.
58
+
59
+ Key Features:
60
+ - A diverse corpus of essays with detailed annotations.
61
+ - A subset graded by expert annotators to evaluate essay quality and task difficulty.
62
+ - Comprehensive metadata providing provenance and context for each essay.
63
+ - An empirical analysis framework to support state-of-the-art predictive modeling.
64
+
65
+ For further details, please refer to the paper “A New Benchmark for Automatic Essay
66
+ Scoring in Portuguese” available at https://aclanthology.org/2024.propor-1.23/.
67
  """
68
 
69
  # TODO: Add a link to an official homepage for the dataset here
 
107
  "general",
108
  "specific",
109
  "essay_year",
110
+ "reference"
111
  ]
112
 
113
  CSV_HEADERPROPOR = [
 
120
  ]
121
 
122
  SOURCE_A_DESC = """
123
+ SourceA have 860 essays available from August 2015 to March 2020.
124
+ For each month of that period, a new prompt together with supporting texts were given,
125
+ and the graded essays from the previous month were made available.
126
  Of the 56 prompts, 12 had no associated essays available (at the time of download).
127
+ Additionally, there were 3 prompts that asked for a text in the format of a letter.
128
+ We removed those 15 prompts and associated texts from the corpus.
129
+ For an unknown reason, 414 of the essays were graded using a five-point scale of either
130
+ {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
131
+ To avoid introducing bias, we also discarded such instances, resulting in a dataset of
132
+ 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
133
+ Some of the essays used a six-point scale with 20 points instead of 40 points as the second class.
134
+ As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
135
+ The original data contains comments from the annotators explaining their per-competence scores.
136
+ They are included in our dataset.
137
  """
138
 
139
+ SOURCE_A_WITH_GRADERS = """
140
+ sourceAWithGraders includes the original dataset augmented with grades from additional reviewers.
141
+ Each essay is replicated three times:
142
+ 1. The original essay with its grades from the website.
143
+ 2. The same essay with grades from the first human grader.
144
+ 3. The same essay with grades from the second human grader.
145
+ """
146
 
147
  SOURCE_B_DESC = """
148
+ SourceB is very similar to Source A: a new prompt and supporting texts are made
149
+ available every month along with the graded essays submitted in the previous month.
150
+ We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released
151
+ prior to June 2016 were graded on a five-point scale and consequently discarded.
152
  This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
153
 
154
+ Although in principle, Source B also provides supporting texts for students, none were
155
+ available at the time the data was downloaded.
156
+ To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever
157
+ possible, by manually matching prompts between the two corpora.
158
+ We ended up with approx. 1,000 essays containing both prompt and supporting texts, and
159
+ approx. 2,200 essays containing only the respective prompt.
160
  """
161
 
162
  PROPOR2024 = """
163
+ This split corresponds to the results reported in the PROPOR 2024 paper. While reproducibility was
164
+ fixed in the sourceAWithGraders configuration, this split preserves the original
165
+ distribution of prompts and scores as used in the paper.
166
  """
167
 
168
 
 
203
  "essay_year": datasets.Value("int16"),
204
  "general_comment": datasets.Value("string"),
205
  "specific_comment": datasets.Value("string"),
206
+ "reference": datasets.Value("string"),
207
  }
208
  )
209
 
 
351
  for grader in [grader_a, grader_b]:
352
  grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
353
  grader.grades = grader.grades.apply(map_list)
354
+ grader_a["reference"] = "grader_a"
355
+ grader_b["reference"] = "grader_b"
356
  return grader_a, grader_b
357
 
358
  def _generate_splits(self, filepath: str, train_size=0.7):
 
455
  assert (
456
  len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
457
  ), "Overlap between val and test id_prompt"
 
458
  train_df.to_csv(f"{dirname}/train.csv", index=False)
459
  val_df.to_csv(f"{dirname}/validation.csv", index=False)
460
  test_df.to_csv(f"{dirname}/test.csv", index=False)
 
494
  "essay_year": row["essay_year"],
495
  "general_comment": row["general"],
496
  "specific_comment": row["specific"],
497
+ "reference": row["reference"]
498
  }
499
 
500
 
 
765
  general_comment = None
766
  specific_comment = None
767
  essay_year = None
768
+ reference = "crawled_from_web"
769
  for prompt_folder in tqdm(
770
  sub_folders,
771
  desc=f"Parsing HTML files from: {key}",
 
808
  general_comment,
809
  specific_comment,
810
  essay_year,
811
+ reference
812
  ]
813
  )
814
  essay_id += 1