abarbosa commited on
Commit
10eb3cd
·
verified ·
1 Parent(s): 74a6967

Delete loading script

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +0 -1234
aes_enem_dataset.py DELETED
@@ -1,1234 +0,0 @@
1
- # Copyright 2023 Andre Barbosa, Igor Cataneo Silveira & The HuggingFace Datasets Authors
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import csv
16
- import math
17
- import os
18
- import re
19
- from pathlib import Path
20
-
21
- import datasets
22
- import numpy as np
23
- import pandas as pd
24
- from multiprocessing import Pool, cpu_count
25
- from bs4 import BeautifulSoup
26
- from tqdm.auto import tqdm
27
-
28
- RANDOM_STATE = 42
29
- np.random.seed(RANDOM_STATE) # Set the seed
30
-
31
- _CITATION = """
32
- @inproceedings{silveira-etal-2024-new,
33
- title = "A New Benchmark for Automatic Essay Scoring in {P}ortuguese",
34
- author = "Silveira, Igor Cataneo and
35
- Barbosa, Andr{\'e} and
36
- Mau{\'a}, Denis Deratani",
37
- editor = "Gamallo, Pablo and
38
- Claro, Daniela and
39
- Teixeira, Ant{\'o}nio and
40
- Real, Livy and
41
- Garcia, Marcos and
42
- Oliveira, Hugo Goncalo and
43
- Amaro, Raquel",
44
- booktitle = "Proceedings of the 16th International Conference on Computational Processing of Portuguese - Vol. 1",
45
- month = mar,
46
- year = "2024",
47
- address = "Santiago de Compostela, Galicia/Spain",
48
- publisher = "Association for Computational Lingustics",
49
- url = "https://aclanthology.org/2024.propor-1.23/",
50
- pages = "228--237"
51
- }
52
- """
53
-
54
- _DESCRIPTION = """\
55
- This dataset was created as part of our work on advancing Automatic Essay Scoring for
56
- Brazilian Portuguese. It comprises a large collection of publicly available essays
57
- collected from websites simulating University Entrance Exams, with a subset expertly
58
- annotated to provide reliable assessment indicators. The dataset includes both the raw
59
- text and processed forms of the essays, along with supporting prompts and supplemental
60
- texts.
61
-
62
- Key Features:
63
- - A diverse corpus of essays with detailed annotations.
64
- - A subset graded by expert annotators to evaluate essay quality and task difficulty.
65
- - Comprehensive metadata providing provenance and context for each essay.
66
- - An empirical analysis framework to support state-of-the-art predictive modeling.
67
-
68
- For further details, please refer to the paper “A New Benchmark for Automatic Essay
69
- Scoring in Portuguese” available at https://aclanthology.org/2024.propor-1.23/.
70
- """
71
-
72
- # TODO: Add a link to an official homepage for the dataset here
73
- _HOMEPAGE = ""
74
-
75
- # TODO: Add the licence for the dataset here if you can find it
76
- _LICENSE = ""
77
-
78
-
79
- _URLS = {
80
- "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
81
- "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
82
- "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz",
83
- "PROPOR2024": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/propor2024.tar.gz",
84
- "gradesThousand": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/scrapedGradesThousand.tar.gz",
85
- }
86
-
87
-
88
- PROMPTS_TO_IGNORE = [
89
- "brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista",
90
- "carta-convite-discutir-discriminacao-na-escola",
91
- "informacao-no-rotulo-de-produtos-transgenicos",
92
- ]
93
-
94
- # Essays to Ignore
95
- ESSAY_TO_IGNORE = [
96
- "direitos-em-conflito-liberdade-de-expressao-e-intimidade/2.html",
97
- "terceirizacao-avanco-ou-retrocesso/2.html",
98
- "artes-e-educacao-fisica-opcionais-ou-obrigatorias/2.html",
99
- "violencia-e-drogas-o-papel-do-usuario/0.html",
100
- "internacao-compulsoria-de-dependentes-de-crack/0.html",
101
- ]
102
-
103
- CSV_HEADER = [
104
- "id",
105
- "id_prompt",
106
- "prompt",
107
- "supporting_text",
108
- "title",
109
- "essay",
110
- "grades",
111
- "general",
112
- "specific",
113
- "essay_year",
114
- "reference",
115
- ]
116
-
117
- CSV_HEADERPROPOR = [
118
- "id",
119
- "id_prompt",
120
- "title",
121
- "essay",
122
- "grades",
123
- "essay_year",
124
- "reference",
125
- ]
126
-
127
- CSV_HEADERTHOUSAND = [
128
- "id",
129
- "author",
130
- "id_prompt",
131
- "essay_year",
132
- "grades",
133
- "essay",
134
- "source",
135
- "supporting_text",
136
- "prompt",
137
- ]
138
-
139
- CSV_HEADER_JBCS25 = [
140
- "id",
141
- "id_prompt",
142
- "essay_text",
143
- "grades",
144
- "essay_year",
145
- "supporting_text",
146
- "prompt",
147
- "reference",
148
- ]
149
-
150
- SOURCE_A_DESC = """
151
- SourceA have 860 essays available from August 2015 to March 2020.
152
- For each month of that period, a new prompt together with supporting texts were given,
153
- and the graded essays from the previous month were made available.
154
- Of the 56 prompts, 12 had no associated essays available (at the time of download).
155
- Additionally, there were 3 prompts that asked for a text in the format of a letter.
156
- We removed those 15 prompts and associated texts from the corpus.
157
- For an unknown reason, 414 of the essays were graded using a five-point scale of either
158
- {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
159
- To avoid introducing bias, we also discarded such instances, resulting in a dataset of
160
- 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
161
- Some of the essays used a six-point scale with 20 points instead of 40 points as the second class.
162
- As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
163
- The original data contains comments from the annotators explaining their per-competence scores.
164
- They are included in our dataset.
165
- """
166
-
167
- SOURCE_A_WITH_GRADERS = """
168
- sourceAWithGraders includes the original dataset augmented with grades from additional reviewers.
169
- Each essay is replicated three times:
170
- 1. The original essay with its grades from the website.
171
- 2. The same essay with grades from the first human grader.
172
- 3. The same essay with grades from the second human grader.
173
- """
174
-
175
- SOURCE_B_DESC = """
176
- SourceB is very similar to Source A: a new prompt and supporting texts are made
177
- available every month along with the graded essays submitted in the previous month.
178
- We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released
179
- prior to June 2016 were graded on a five-point scale and consequently discarded.
180
- This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
181
-
182
- Although in principle, Source B also provides supporting texts for students, none were
183
- available at the time the data was downloaded.
184
- To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever
185
- possible, by manually matching prompts between the two corpora.
186
- We ended up with approx. 1,000 essays containing both prompt and supporting texts, and
187
- approx. 2,200 essays containing only the respective prompt.
188
- """
189
-
190
- PROPOR2024 = """
191
- This split corresponds to the results reported in the PROPOR 2024 paper. While reproducibility was
192
- fixed in the sourceAWithGraders configuration, this split preserves the original
193
- distribution of prompts and scores as used in the paper.
194
- """
195
-
196
- GRADES_THOUSAND = """
197
- TODO
198
- """
199
-
200
- JBCS2025 = """
201
- TODO
202
- """
203
-
204
-
205
- class AesEnemDataset(datasets.GeneratorBasedBuilder):
206
- """
207
- AES Enem Dataset. For full explanation about generation process, please refer to: https://aclanthology.org/2024.propor-1.23/
208
-
209
- We realized in our experiments that there was an issue in the determistic process regarding how the dataset is generated.
210
- To reproduce results from PROPOR paper, please refer to "PROPOR2024" config. Other configs are reproducible now.
211
- """
212
-
213
- VERSION = datasets.Version("1.0.0")
214
-
215
- # You will be able to load one or the other configurations in the following list with
216
- BUILDER_CONFIGS = [
217
- datasets.BuilderConfig(
218
- name="sourceAOnly", version=VERSION, description=SOURCE_A_DESC
219
- ),
220
- datasets.BuilderConfig(
221
- name="sourceAWithGraders",
222
- version=VERSION,
223
- description=SOURCE_A_WITH_GRADERS,
224
- ),
225
- datasets.BuilderConfig(
226
- name="sourceB",
227
- version=VERSION,
228
- description=SOURCE_B_DESC,
229
- ),
230
- datasets.BuilderConfig(
231
- name="PROPOR2024", version=VERSION, description=PROPOR2024
232
- ),
233
- datasets.BuilderConfig(
234
- name="gradesThousand", version=VERSION, description=GRADES_THOUSAND
235
- ),
236
- datasets.BuilderConfig(name="JBCS2025", version=VERSION, description=JBCS2025),
237
- ]
238
-
239
- def _info(self):
240
- if self.config.name == "PROPOR2024":
241
- features = datasets.Features(
242
- {
243
- "id": datasets.Value("string"),
244
- "id_prompt": datasets.Value("string"),
245
- "essay_title": datasets.Value("string"),
246
- "essay_text": datasets.Value("string"),
247
- "grades": datasets.Sequence(datasets.Value("int16")),
248
- "essay_year": datasets.Value("int16"),
249
- "reference": datasets.Value("string"),
250
- }
251
- )
252
- elif self.config.name == "gradesThousand":
253
- features = datasets.Features(
254
- {
255
- "id": datasets.Value("string"),
256
- "id_prompt": datasets.Value("string"),
257
- "supporting_text": datasets.Value("string"),
258
- "prompt": datasets.Value("string"),
259
- "essay_text": datasets.Value("string"),
260
- "grades": datasets.Sequence(datasets.Value("int16")),
261
- "essay_year": datasets.Value("int16"),
262
- "source": datasets.Value("string"),
263
- }
264
- )
265
- elif self.config.name == "JBCS2025":
266
- features = datasets.Features(
267
- {
268
- "id": datasets.Value("string"),
269
- "id_prompt": datasets.Value("string"),
270
- "essay_text": datasets.Value("string"),
271
- "grades": datasets.Sequence(datasets.Value("int16")),
272
- "essay_year": datasets.Value("int16"),
273
- "supporting_text": datasets.Value("string"),
274
- "prompt": datasets.Value("string"),
275
- "reference": datasets.Value("string"),
276
- }
277
- )
278
- else:
279
- features = datasets.Features(
280
- {
281
- "id": datasets.Value("string"),
282
- "id_prompt": datasets.Value("string"),
283
- "prompt": datasets.Value("string"),
284
- "supporting_text": datasets.Value("string"),
285
- "essay_title": datasets.Value("string"),
286
- "essay_text": datasets.Value("string"),
287
- "grades": datasets.Sequence(datasets.Value("int16")),
288
- "essay_year": datasets.Value("int16"),
289
- "general_comment": datasets.Value("string"),
290
- "specific_comment": datasets.Value("string"),
291
- "reference": datasets.Value("string"),
292
- }
293
- )
294
-
295
- return datasets.DatasetInfo(
296
- # This is the description that will appear on the datasets page.
297
- description=_DESCRIPTION,
298
- # This defines the different columns of the dataset and their types
299
- features=features, # Here we define them above because they are different between the two configurations
300
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
301
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
302
- # supervised_keys=("sentence", "label"),
303
- # Homepage of the dataset for documentation
304
- homepage=_HOMEPAGE,
305
- # License for the dataset if available
306
- license=_LICENSE,
307
- # Citation for the dataset
308
- citation=_CITATION,
309
- )
310
-
311
- def _post_process_dataframe(self, filepath):
312
- def map_year(year):
313
- if year <= 2017:
314
- return "<=2017"
315
- return str(year)
316
-
317
- def normalize_grades(grades):
318
- grades = grades.strip("[]").split(", ")
319
- grade_mapping = {"0.0": 0, "20": 40, "2.0": 2}
320
-
321
- # We will remove the rows that match the criteria below
322
- if any(
323
- single_grade
324
- in grades[:-1] # we ignore the sum, and only check the concetps
325
- for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"]
326
- ):
327
- return None
328
- # Use the mapping to transform grades, ignoring the last grade
329
- mapped_grades = [
330
- int(grade_mapping.get(grade_concept, grade_concept))
331
- for grade_concept in grades[:-1]
332
- ]
333
- # Calculate and append the sum of the mapped grades as the last element
334
- mapped_grades.append(sum(mapped_grades))
335
- return mapped_grades
336
-
337
- df = pd.read_csv(filepath)
338
- df["general"] = df["general"].fillna("")
339
- df["essay_year"] = df["essay_year"].astype("int")
340
- df["mapped_year"] = df["essay_year"].apply(map_year)
341
- df["grades"] = df["grades"].apply(normalize_grades)
342
- df = df.dropna(subset=["grades"])
343
- df = df[
344
- ~(df["id_prompt"] + "/" + df["id"]).isin(ESSAY_TO_IGNORE)
345
- ] # arbitrary removal of zero graded essays
346
- df.to_csv(filepath, index=False)
347
-
348
- def _preprocess_propor2024(self, base_path: str):
349
- for split_case in ["train.csv", "validation.csv", "test.csv"]:
350
- filepath = f"{base_path}/propor2024/{split_case}"
351
- df = pd.read_csv(filepath)
352
-
353
- # Dictionary to track how many times we've seen each (id, id_prompt) pair
354
- counts = {}
355
- # List to store the reference for each row
356
- references = []
357
-
358
- # Define the mapping for each occurrence
359
- occurrence_to_reference = {
360
- 0: "crawled_from_web",
361
- 1: "grader_a",
362
- 2: "grader_b",
363
- }
364
-
365
- # Iterate through rows in the original order
366
- for _, row in df.iterrows():
367
- key = (row["id"], row["id_prompt"])
368
- count = counts.get(key, 0)
369
- # Assign the reference based on the count
370
- ref = occurrence_to_reference.get(count, "unknown")
371
- references.append(ref)
372
- counts[key] = count + 1
373
-
374
- # Add the reference column without changing the order of rows
375
- df["reference"] = references
376
- df.to_csv(filepath, index=False)
377
-
378
- def _split_generators(self, dl_manager):
379
- if self.config.name != "JBCS2025":
380
- urls = _URLS[self.config.name]
381
- extracted_files = dl_manager.download_and_extract({self.config.name: urls})
382
- if "PROPOR2024" == self.config.name:
383
- base_path = extracted_files["PROPOR2024"]
384
- self._preprocess_propor2024(base_path)
385
- return [
386
- datasets.SplitGenerator(
387
- name=datasets.Split.TRAIN,
388
- # These kwargs will be passed to _generate_examples
389
- gen_kwargs={
390
- "filepath": os.path.join(base_path, "propor2024/train.csv"),
391
- "split": "train",
392
- },
393
- ),
394
- datasets.SplitGenerator(
395
- name=datasets.Split.VALIDATION,
396
- # These kwargs will be passed to _generate_examples
397
- gen_kwargs={
398
- "filepath": os.path.join(
399
- base_path, "propor2024/validation.csv"
400
- ),
401
- "split": "validation",
402
- },
403
- ),
404
- datasets.SplitGenerator(
405
- name=datasets.Split.TEST,
406
- gen_kwargs={
407
- "filepath": os.path.join(base_path, "propor2024/test.csv"),
408
- "split": "test",
409
- },
410
- ),
411
- ]
412
- if "gradesThousand" == self.config.name:
413
- urls = _URLS[self.config.name]
414
- extracted_files = dl_manager.download_and_extract({self.config.name: urls})
415
- base_path = f"{extracted_files['gradesThousand']}/scrapedGradesThousand"
416
- for split in ["train", "validation", "test"]:
417
- split_filepath = os.path.join(base_path, f"{split}.csv")
418
- grades_thousand = pd.read_csv(split_filepath)
419
- grades_thousand[["supporting_text", "prompt"]] = grades_thousand[
420
- "supporting_text"
421
- ].apply(
422
- lambda original_text: pd.Series(
423
- self._extract_prompt_and_clean(original_text)
424
- )
425
- )
426
- grades_thousand.to_csv(split_filepath, index=False)
427
- return [
428
- datasets.SplitGenerator(
429
- name=datasets.Split.TRAIN,
430
- # These kwargs will be passed to _generate_examples
431
- gen_kwargs={
432
- "filepath": os.path.join(base_path, "train.csv"),
433
- "split": "train",
434
- },
435
- ),
436
- datasets.SplitGenerator(
437
- name=datasets.Split.VALIDATION,
438
- # These kwargs will be passed to _generate_examples
439
- gen_kwargs={
440
- "filepath": os.path.join(base_path, "validation.csv"),
441
- "split": "validation",
442
- },
443
- ),
444
- datasets.SplitGenerator(
445
- name=datasets.Split.TEST,
446
- gen_kwargs={
447
- "filepath": os.path.join(base_path, "test.csv"),
448
- "split": "test",
449
- },
450
- ),
451
- ]
452
- if "sourceA" in self.config.name:
453
- html_parser = self._process_html_files(extracted_files)
454
- self._post_process_dataframe(html_parser.sourceA)
455
- self._generate_splits(html_parser.sourceA)
456
- folder_sourceA = Path(html_parser.sourceA).parent
457
- return [
458
- datasets.SplitGenerator(
459
- name=datasets.Split.TRAIN,
460
- # These kwargs will be passed to _generate_examples
461
- gen_kwargs={
462
- "filepath": folder_sourceA / "train.csv",
463
- "split": "train",
464
- },
465
- ),
466
- datasets.SplitGenerator(
467
- name=datasets.Split.VALIDATION,
468
- # These kwargs will be passed to _generate_examples
469
- gen_kwargs={
470
- "filepath": folder_sourceA / "validation.csv",
471
- "split": "validation",
472
- },
473
- ),
474
- datasets.SplitGenerator(
475
- name=datasets.Split.TEST,
476
- gen_kwargs={
477
- "filepath": folder_sourceA / "test.csv",
478
- "split": "test",
479
- },
480
- ),
481
- ]
482
- elif self.config.name == "sourceB":
483
- html_parser = self._process_html_files(extracted_files)
484
- self._post_process_dataframe(html_parser.sourceB)
485
- return [
486
- datasets.SplitGenerator(
487
- name="full",
488
- gen_kwargs={
489
- "filepath": html_parser.sourceB,
490
- "split": "full",
491
- },
492
- ),
493
- ]
494
- elif "JBCS2025" == self.config.name:
495
- extracted_files = dl_manager.download_and_extract(
496
- {
497
- "sourceA": _URLS["sourceAWithGraders"],
498
- "grades_thousand": _URLS["gradesThousand"],
499
- }
500
- )
501
- config_name_source_a = "sourceAWithGraders"
502
-
503
- html_parser = self._process_html_files(
504
- paths_dict={config_name_source_a: extracted_files["sourceA"]},
505
- config_name=config_name_source_a,
506
- )
507
- self._post_process_dataframe(html_parser.sourceA)
508
- self._generate_splits(html_parser.sourceA, config_name=config_name_source_a)
509
- folder_sourceA = Path(html_parser.sourceA).parent
510
- for split in ["train", "validation", "test"]:
511
- sourceA = pd.read_csv(folder_sourceA / f"{split}.csv")
512
- common_columns = [
513
- "id",
514
- "id_prompt",
515
- "essay_text",
516
- "grades",
517
- "essay_year",
518
- "supporting_text",
519
- "prompt",
520
- "reference",
521
- ]
522
- combined_split = sourceA[
523
- sourceA.reference.isin(["grader_a", "grader_b"])
524
- ]
525
- combined_split = combined_split.rename(columns={"essay": "essay_text"})
526
- combined_split["grades"] = combined_split["grades"].str.replace(",", "")
527
- final_split = combined_split[common_columns].sample(
528
- frac=1, random_state=RANDOM_STATE
529
- ).reset_index(drop=True)
530
- # overwrites the sourceA data
531
- final_split.to_csv(folder_sourceA / f"{split}.csv", index=False)
532
- return [
533
- datasets.SplitGenerator(
534
- name=datasets.Split.TRAIN,
535
- # These kwargs will be passed to _generate_examples
536
- gen_kwargs={
537
- "filepath": folder_sourceA / "train.csv",
538
- "split": "train",
539
- },
540
- ),
541
- datasets.SplitGenerator(
542
- name=datasets.Split.VALIDATION,
543
- # These kwargs will be passed to _generate_examples
544
- gen_kwargs={
545
- "filepath": folder_sourceA / "validation.csv",
546
- "split": "validation",
547
- },
548
- ),
549
- datasets.SplitGenerator(
550
- name=datasets.Split.TEST,
551
- gen_kwargs={
552
- "filepath": folder_sourceA / "test.csv",
553
- "split": "test",
554
- },
555
- ),
556
- ]
557
-
558
- def _extract_prompt_and_clean(self, text: str):
559
- """
560
- 1) Find an uppercase block matching "PROPOSTA DE REDACAO/REDAÇÃO"
561
- (with flexible spacing and accents) anywhere in 'text'.
562
- 2) Capture everything from there until the next heading that
563
- starts a line (TEXTO..., TEXTOS..., INSTRUÇÕES...) or end-of-text.
564
- 3) Remove that captured block from the original, returning:
565
- (supporting_text, prompt)
566
- """
567
-
568
- # Regex explanation:
569
- # (?m) => MULTILINE, so ^ can match start of lines
570
- # 1) PROPOSTA\s+DE\s+REDA(?:C|Ç)(?:AO|ÃO)
571
- # - "PROPOSTA", then one-or-more spaces/newlines,
572
- # then "DE", then spaces, then "REDA(C|Ç)",
573
- # and either "AO" or "ÃO" (uppercase).
574
- # - This part may skip diacritic or accent variations in "REDAÇÃO" vs. "REDACAO".
575
- #
576
- # 2) (?:.*?\n?)*? => a non-greedy capture of subsequent lines
577
- # (including possible newlines). We use [\s\S]*? as an alternative.
578
- #
579
- # 3) Lookahead (?=^(?:TEXTO|TEXTOS|INSTRUÇÕES|\Z))
580
- # means: stop right before a line that starts with "TEXTO", "TEXTOS",
581
- # or "INSTRUÇÕES", OR the very end of the text (\Z).
582
- #
583
- # If found, that entire portion is group(1).
584
- def force_newline_after_proposta(text: str) -> str:
585
- """
586
- If we see "PROPOSTA DE REDAÇÃO" immediately followed by some
587
- non-whitespace character (like "A"), insert two newlines.
588
- E.g., "PROPOSTA DE REDAÇÃOA partir..." becomes
589
- "PROPOSTA DE REDAÇÃO\n\nA partir..."
590
- """
591
- # This pattern looks for:
592
- # (PROPOSTA DE REDAÇÃO)
593
- # (?=\S) meaning "immediately followed by a NON-whitespace character"
594
- # then we replace that with "PROPOSTA DE REDAÇÃO\n\n"
595
- pattern = re.compile(r"(?=\S)(PROPOSTA DE REDAÇÃO)(?=\S)")
596
- return pattern.sub(r"\n\1\n\n", text)
597
-
598
- text = force_newline_after_proposta(text)
599
- pattern = re.compile(
600
- r"(?m)" # MULTILINE
601
- r"("
602
- r"PROPOSTA\s+DE\s+REDA(?:C|Ç)(?:AO|ÃO)" # e.g. PROPOSTA DE REDACAO / REDAÇÃO
603
- r"(?:[\s\S]*?)" # lazily grab the subsequent text
604
- r")"
605
- r"(?=(?:TEXTO|TEXTOS|INSTRUÇÕES|TExTO|\Z))"
606
- )
607
-
608
- match = pattern.search(text)
609
- if match:
610
- prompt = match.group(1).strip()
611
- # Remove that block from the original:
612
- start, end = match.span(1)
613
- main_text = text[:start] + text[end:]
614
- else:
615
- # No match => keep entire text in supporting_text, prompt empty
616
- prompt = ""
617
- main_text = text
618
-
619
- return main_text.strip(), prompt.strip()
620
-
621
- def _process_html_files(self, paths_dict, config_name=None):
622
- html_parser = HTMLParser(paths_dict)
623
- if config_name is None:
624
- config_name = self.config.name
625
- html_parser.parse(config_name)
626
- return html_parser
627
-
628
- def _parse_graders_data(self, dirname):
629
- map_grades = {"0": 0, "1": 40, "2": 80, "3": 120, "4": 160, "5": 200}
630
-
631
- def map_list(grades_list):
632
- result = [map_grades.get(item, None) for item in grades_list]
633
- sum_grades = sum(result)
634
- result.append(sum_grades)
635
- return result
636
-
637
- grader_a = pd.read_csv(f"{dirname}/GraderA.csv")
638
- grader_b = pd.read_csv(f"{dirname}/GraderB.csv")
639
- for grader in [grader_a, grader_b]:
640
- grader.grades = grader.grades.apply(lambda x: x.strip("[]").split(", "))
641
- grader.grades = grader.grades.apply(map_list)
642
- grader_a["reference"] = "grader_a"
643
- grader_b["reference"] = "grader_b"
644
- return grader_a, grader_b
645
-
646
- def _generate_splits(self, filepath: str, train_size=0.7, config_name=None):
647
- np.random.seed(RANDOM_STATE)
648
- df = pd.read_csv(filepath)
649
- train_set = []
650
- val_set = []
651
- test_set = []
652
- df = df.sort_values(by=["essay_year", "id_prompt"]).reset_index(drop=True)
653
- buckets = {}
654
- for key, group in df.groupby("mapped_year"):
655
- buckets[key] = sorted(group["id_prompt"].unique())
656
- df.drop("mapped_year", axis=1, inplace=True)
657
- for year in sorted(buckets.keys()):
658
- prompts = buckets[year]
659
- np.random.shuffle(prompts)
660
- num_prompts = len(prompts)
661
-
662
- # All prompts go to the test if less than 3
663
- if num_prompts <= 3:
664
- train_set.append(df[df["id_prompt"].isin([prompts[0]])])
665
- val_set.append(df[df["id_prompt"].isin([prompts[1]])])
666
- test_set.append(df[df["id_prompt"].isin([prompts[2]])])
667
- continue
668
-
669
- # Determine the number of prompts for each set based on train_size and remaining prompts
670
- num_train = math.floor(num_prompts * train_size)
671
- num_val_test = num_prompts - num_train
672
- num_val = num_val_test // 2
673
- num_test = num_val_test - num_val
674
-
675
- # Assign prompts to each set
676
- train_set.append(df[df["id_prompt"].isin(prompts[:num_train])])
677
- val_set.append(
678
- df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])]
679
- )
680
- test_set.append(
681
- df[
682
- df["id_prompt"].isin(
683
- prompts[
684
- (num_train + num_val) : (num_train + num_val + num_test)
685
- ]
686
- )
687
- ]
688
- )
689
-
690
- # Convert lists of groups to DataFrames
691
- train_df = pd.concat(train_set)
692
- val_df = pd.concat(val_set)
693
- test_df = pd.concat(test_set)
694
- dirname = os.path.dirname(filepath)
695
- if config_name is None:
696
- config_name = self.config.name
697
- if config_name == "sourceAWithGraders":
698
- grader_a, grader_b = self._parse_graders_data(dirname)
699
- grader_a_data = pd.merge(
700
- train_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
701
- grader_a.drop(columns=["essay"]),
702
- on=["id", "id_prompt"],
703
- how="inner",
704
- )
705
- grader_b_data = pd.merge(
706
- train_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
707
- grader_b.drop(columns=["essay"]),
708
- on=["id", "id_prompt"],
709
- how="inner",
710
- )
711
- train_df = pd.concat([train_df, grader_a_data, grader_b_data])
712
- train_df = train_df.sort_values(by=["id", "id_prompt"]).reset_index(
713
- drop=True
714
- )
715
-
716
- grader_a_data = pd.merge(
717
- val_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
718
- grader_a.drop(columns=["essay"]),
719
- on=["id", "id_prompt"],
720
- how="inner",
721
- )
722
- grader_b_data = pd.merge(
723
- val_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
724
- grader_b.drop(columns=["essay"]),
725
- on=["id", "id_prompt"],
726
- how="inner",
727
- )
728
- val_df = pd.concat([val_df, grader_a_data, grader_b_data])
729
- val_df = val_df.sort_values(by=["id", "id_prompt"]).reset_index(drop=True)
730
-
731
- grader_a_data = pd.merge(
732
- test_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
733
- grader_a.drop(columns=["essay"]),
734
- on=["id", "id_prompt"],
735
- how="inner",
736
- )
737
- grader_b_data = pd.merge(
738
- test_df[["id", "id_prompt", "essay", "prompt", "supporting_text"]],
739
- grader_b.drop(columns=["essay"]),
740
- on=["id", "id_prompt"],
741
- how="inner",
742
- )
743
- test_df = pd.concat([test_df, grader_a_data, grader_b_data])
744
- test_df = test_df.sort_values(by=["id", "id_prompt"]).reset_index(drop=True)
745
-
746
- train_df = train_df.sample(frac=1, random_state=RANDOM_STATE).reset_index(
747
- drop=True
748
- )
749
- val_df = val_df.sample(frac=1, random_state=RANDOM_STATE).reset_index(
750
- drop=True
751
- )
752
- test_df = test_df.sample(frac=1, random_state=RANDOM_STATE).reset_index(
753
- drop=True
754
- )
755
-
756
- # Data Validation Assertions
757
- assert (
758
- len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0
759
- ), "Overlap between train and val id_prompt"
760
- assert (
761
- len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
762
- ), "Overlap between train and test id_prompt"
763
- assert (
764
- len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
765
- ), "Overlap between val and test id_prompt"
766
- train_df.to_csv(f"{dirname}/train.csv", index=False)
767
- val_df.to_csv(f"{dirname}/validation.csv", index=False)
768
- test_df.to_csv(f"{dirname}/test.csv", index=False)
769
-
770
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
771
- def _generate_examples(self, filepath, split):
772
- if self.config.name == "PROPOR2024":
773
- with open(filepath, encoding="utf-8") as csvfile:
774
- next(csvfile)
775
- csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADERPROPOR)
776
- for i, row in enumerate(csv_reader):
777
- grades = row["grades"].strip("[]")
778
- grades = grades.split()
779
- yield (
780
- i,
781
- {
782
- "id": row["id"],
783
- "id_prompt": row["id_prompt"],
784
- "essay_title": row["title"],
785
- "essay_text": row["essay"],
786
- "grades": grades,
787
- "essay_year": row["essay_year"],
788
- "reference": row["reference"],
789
- },
790
- )
791
- elif self.config.name == "gradesThousand":
792
- with open(filepath, encoding="utf-8") as csvfile:
793
- next(csvfile)
794
- csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADERTHOUSAND)
795
- for i, row in enumerate(csv_reader):
796
- grades = row["grades"].strip("[]")
797
- grades = grades.split(", ")
798
- yield (
799
- i,
800
- {
801
- "id": row["id"],
802
- "id_prompt": row["id_prompt"],
803
- "supporting_text": row["supporting_text"],
804
- "prompt": row["prompt"],
805
- "essay_text": row["essay"],
806
- "grades": grades,
807
- "essay_year": row["essay_year"],
808
- "author": row["author"],
809
- "source": row["source"],
810
- },
811
- )
812
- elif self.config.name == "JBCS2025":
813
- with open(filepath, encoding="utf-8") as csvfile:
814
- next(csvfile)
815
- csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER_JBCS25)
816
- for i, row in enumerate(csv_reader):
817
- grades = row["grades"].strip("[]")
818
- grades = grades.split()
819
- yield (
820
- i,
821
- {
822
- "id": row["id"],
823
- "id_prompt": row["id_prompt"],
824
- "essay_text": row["essay_text"],
825
- "grades": grades,
826
- "essay_year": row["essay_year"],
827
- "supporting_text": row["supporting_text"],
828
- "prompt": row["prompt"],
829
- "reference": row["reference"],
830
- },
831
- )
832
- else:
833
- with open(filepath, encoding="utf-8") as csvfile:
834
- next(csvfile)
835
- csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
836
- for i, row in enumerate(csv_reader):
837
- grades = row["grades"].strip("[]")
838
- grades = grades.split(", ")
839
- yield (
840
- i,
841
- {
842
- "id": row["id"],
843
- "id_prompt": row["id_prompt"],
844
- "prompt": row["prompt"],
845
- "supporting_text": row["supporting_text"],
846
- "essay_title": row["title"],
847
- "essay_text": row["essay"],
848
- "grades": grades,
849
- "essay_year": row["essay_year"],
850
- "general_comment": row["general"],
851
- "specific_comment": row["specific"],
852
- "reference": row["reference"],
853
- },
854
- )
855
-
856
-
857
- class HTMLParser:
858
- def __init__(self, paths_dict):
859
- self.paths_dict = paths_dict
860
- self.sourceA = None
861
- self.sourceB = None
862
-
863
- def apply_soup(self, filepath, num):
864
- # recebe uma URL, salva o HTML dessa página e retorna o soup dela
865
- file = open(os.path.join(filepath, num), "r", encoding="utf8")
866
- conteudo = file.read()
867
- soup = BeautifulSoup(conteudo, "html.parser")
868
- return soup
869
-
870
- def _get_title(self, soup):
871
- if self.sourceA:
872
- title = soup.find("div", class_="container-composition")
873
- if title is None:
874
- title = soup.find("h1", class_="pg-color10").get_text()
875
- else:
876
- title = title.h2.get_text()
877
- title = title.replace("\xa0", "")
878
- return title.replace(";", ",")
879
- elif self.sourceB:
880
- title = soup.find("h1", class_="titulo-conteudo").get_text()
881
- return title.strip("- Banco de redações").strip()
882
-
883
- def _get_grades(self, soup):
884
- if self.sourceA:
885
- grades = soup.find("section", class_="results-table")
886
- final_grades = []
887
- if grades is not None:
888
- grades = grades.find_all("span", class_="points")
889
- assert len(grades) == 6, f"Missing grades: {len(grades)}"
890
- for single_grade in grades:
891
- grade = int(single_grade.get_text())
892
- final_grades.append(grade)
893
- assert final_grades[-1] == sum(final_grades[:-1]), (
894
- "Grading sum is not making sense"
895
- )
896
- else:
897
- grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7")
898
- grades_sum = float(
899
- soup.find("th", class_="noBorder-left").get_text().replace(",", ".")
900
- )
901
- grades = grades.find_all("td")[:10]
902
- for idx in range(1, 10, 2):
903
- grade = float(grades[idx].get_text().replace(",", "."))
904
- final_grades.append(grade)
905
- assert grades_sum == sum(final_grades), (
906
- "Grading sum is not making sense"
907
- )
908
- final_grades.append(grades_sum)
909
- return final_grades
910
- elif self.sourceB:
911
- table = soup.find("table", {"id": "redacoes_corrigidas"})
912
- grades = table.find_all("td", class_="simple-td")
913
- grades = grades[3:]
914
- result = []
915
- for single_grade in grades:
916
- result.append(int(single_grade.get_text()))
917
- assert len(result) == 5, "We should have 5 Grades (one per concept) only"
918
- result.append(
919
- sum(result)
920
- ) # Add sum as a sixt element to keep the same pattern
921
- return result
922
-
923
- def _get_general_comment(self, soup):
924
- if self.sourceA:
925
-
926
- def get_general_comment_aux(soup):
927
- result = soup.find("article", class_="list-item c")
928
- if result is not None:
929
- result = result.find("div", class_="description")
930
- return result.get_text()
931
- else:
932
- result = soup.find("p", style="margin: 0px 0px 11px;")
933
- if result is not None:
934
- return result.get_text()
935
- else:
936
- result = soup.find("p", style="margin: 0px;")
937
- if result is not None:
938
- return result.get_text()
939
- else:
940
- result = soup.find(
941
- "p", style="margin: 0px; text-align: justify;"
942
- )
943
- if result is not None:
944
- return result.get_text()
945
- else:
946
- return ""
947
-
948
- text = soup.find("div", class_="text")
949
- if text is not None:
950
- text = text.find("p")
951
- if (text is None) or (len(text.get_text()) < 2):
952
- return get_general_comment_aux(soup)
953
- return text.get_text()
954
- else:
955
- return get_general_comment_aux(soup)
956
- elif self.sourceB:
957
- return ""
958
-
959
- def _get_specific_comment(self, soup, general_comment):
960
- if self.sourceA:
961
- result = soup.find("div", class_="text")
962
- cms = []
963
- if result is not None:
964
- result = result.find_all("li")
965
- if result != []:
966
- for item in result:
967
- text = item.get_text()
968
- if text != "\xa0":
969
- cms.append(text)
970
- else:
971
- result = soup.find("div", class_="text").find_all("p")
972
- for item in result:
973
- text = item.get_text()
974
- if text != "\xa0":
975
- cms.append(text)
976
- else:
977
- result = soup.find_all("article", class_="list-item c")
978
- if len(result) < 2:
979
- return ["First if"]
980
- result = result[1].find_all("p")
981
- for item in result:
982
- text = item.get_text()
983
- if text != "\xa0":
984
- cms.append(text)
985
- specific_comment = cms.copy()
986
- if general_comment in specific_comment:
987
- specific_comment.remove(general_comment)
988
- if (len(specific_comment) > 1) and (len(specific_comment[0]) < 2):
989
- specific_comment = specific_comment[1:]
990
- return self._clean_list(specific_comment)
991
- elif self.sourceB:
992
- return ""
993
-
994
- def _get_essay(self, soup):
995
- if self.sourceA:
996
- essay = soup.find("div", class_="text-composition")
997
- result = []
998
- if essay is not None:
999
- essay = essay.find_all("p")
1000
- for f in essay:
1001
- while f.find("span", style="color:#00b050") is not None:
1002
- f.find("span", style="color:#00b050").decompose()
1003
- while f.find("span", class_="certo") is not None:
1004
- f.find("span", class_="certo").decompose()
1005
- for paragraph in essay:
1006
- result.append(paragraph.get_text())
1007
- else:
1008
- essay = soup.find("div", {"id": "texto"})
1009
- essay.find("section", class_="list-items").decompose()
1010
- essay = essay.find_all("p")
1011
- for f in essay:
1012
- while f.find("span", class_="certo") is not None:
1013
- f.find("span", class_="certo").decompose()
1014
- for paragraph in essay:
1015
- result.append(paragraph.get_text())
1016
- return "\n".join(self._clean_list(result))
1017
- elif self.sourceB:
1018
- table = soup.find("article", class_="texto-conteudo entire")
1019
- table = soup.find("div", class_="area-redacao-corrigida")
1020
- if table is None:
1021
- result = None
1022
- else:
1023
- for span in soup.find_all("span"):
1024
- span.decompose()
1025
- result = table.find_all("p")
1026
- result = " ".join(
1027
- [
1028
- paragraph.get_text().replace("\xa0", "").strip()
1029
- for paragraph in result
1030
- ]
1031
- )
1032
- return result
1033
-
1034
- def _get_essay_year(self, soup):
1035
- if self.sourceA:
1036
- pattern = r"redações corrigidas - \w+/\d+"
1037
- first_occurrence = re.search(pattern, soup.get_text().lower())
1038
- matched_url = first_occurrence.group(0) if first_occurrence else None
1039
- year_pattern = r"\d{4}"
1040
- return re.search(year_pattern, matched_url).group(0)
1041
- elif self.sourceB:
1042
- pattern = r"Enviou seu texto em.*?(\d{4})"
1043
- match = re.search(pattern, soup.get_text())
1044
- return match.group(1) if match else -1
1045
-
1046
- def _clean_title(self, title):
1047
- if self.sourceA:
1048
- smaller_index = title.find("[")
1049
- if smaller_index == -1:
1050
- return title
1051
- else:
1052
- bigger_index = title.find("]")
1053
- new_title = title[:smaller_index] + title[bigger_index + 1 :]
1054
- return self._clean_title(new_title.replace(" ", " "))
1055
- elif self.sourceB:
1056
- return title
1057
-
1058
- def _clean_list(self, list):
1059
- if list == []:
1060
- return []
1061
- else:
1062
- new_list = []
1063
- for phrase in list:
1064
- phrase = (
1065
- phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".")
1066
- )
1067
- while phrase.find(" ") != -1:
1068
- phrase = phrase.replace(" ", " ")
1069
- if len(phrase) > 1:
1070
- new_list.append(phrase)
1071
- return new_list
1072
-
1073
- def _clean_string(self, sentence):
1074
- sentence = sentence.replace("\xa0", "").replace("\u200b", "")
1075
- sentence = (
1076
- sentence.replace(".", ". ")
1077
- .replace("?", "? ")
1078
- .replace("!", "! ")
1079
- .replace(")", ") ")
1080
- .replace(":", ": ")
1081
- .replace("”", "” ")
1082
- )
1083
- sentence = sentence.replace(" ", " ").replace(". . . ", "...")
1084
- sentence = sentence.replace("(editado)", "").replace("(Editado)", "")
1085
- sentence = sentence.replace("(editado e adaptado)", "").replace(
1086
- "(Editado e adaptado)", ""
1087
- )
1088
- sentence = sentence.replace(". com. br", ".com.br")
1089
- sentence = sentence.replace("[Veja o texto completo aqui]", "")
1090
- return sentence
1091
-
1092
- def _get_supporting_text(self, soup):
1093
- if self.sourceA:
1094
- textos = soup.find_all("ul", class_="article-wording-item")
1095
- resposta = []
1096
- for t in textos[:-1]:
1097
- resposta.append(
1098
- t.find("h3", class_="item-titulo").get_text().replace("\xa0", "")
1099
- )
1100
- resposta.append(
1101
- self._clean_string(
1102
- t.find("div", class_="item-descricao").get_text()
1103
- )
1104
- )
1105
- return resposta
1106
- else:
1107
- return ""
1108
-
1109
- def _get_prompt(self, soup):
1110
- if self.sourceA:
1111
- prompt = soup.find("div", class_="text").find_all("p")
1112
- if len(prompt[0].get_text()) < 2:
1113
- return [prompt[1].get_text().replace("\xa0", "")]
1114
- else:
1115
- return [prompt[0].get_text().replace("\xa0", "")]
1116
- else:
1117
- return ""
1118
-
1119
- def _process_all_prompts(self, sub_folders, file_dir, reference, prompts_to_ignore):
1120
- """
1121
- Process all prompt folders in parallel and return all rows to write.
1122
-
1123
- Args:
1124
- sub_folders (list): List of prompt folder names (or Paths).
1125
- file_dir (str): Base directory where prompts are located.
1126
- reference: Reference info to include in each row.
1127
- prompts_to_ignore (collection): Prompts to be ignored.
1128
-
1129
- Returns:
1130
- list: A list of all rows to write to the CSV.
1131
- """
1132
-
1133
- args_list = [
1134
- (prompt_folder, file_dir, reference, prompts_to_ignore, self)
1135
- for prompt_folder in sub_folders
1136
- ]
1137
-
1138
- all_rows = []
1139
- # Use a Pool to parallelize processing.
1140
- with Pool(processes=cpu_count()) as pool:
1141
- # Using imap allows us to update the progress bar.
1142
- for rows in tqdm(
1143
- pool.imap(HTMLParser._process_prompt_folder, args_list),
1144
- total=len(args_list),
1145
- desc="Processing prompts",
1146
- ):
1147
- all_rows.extend(rows)
1148
- return all_rows
1149
-
1150
- def parse(self, config_name: str):
1151
- for key, filepath in self.paths_dict.items():
1152
- if key != config_name:
1153
- continue # TODO improve later, we will only support a single config at a time
1154
- if "sourceA" in config_name:
1155
- self.sourceA = f"{filepath}/sourceA/sourceA.csv"
1156
- elif config_name == "sourceB":
1157
- self.sourceB = f"{filepath}/sourceB/sourceB.csv"
1158
- file = self.sourceA if self.sourceA else self.sourceB
1159
- file_path = Path(file)
1160
- file_dir = file_path.parent
1161
- sorted_files = sorted(file_dir.iterdir(), key=lambda p: p.name)
1162
- sub_folders = [name for name in sorted_files if name.suffix != ".csv"]
1163
- reference = "crawled_from_web"
1164
- all_rows = self._process_all_prompts(
1165
- sub_folders, file_dir, reference, PROMPTS_TO_IGNORE
1166
- )
1167
- with open(file_path, "w", newline="", encoding="utf8") as final_file:
1168
- writer = csv.writer(final_file)
1169
- writer.writerow(CSV_HEADER)
1170
- for row in all_rows:
1171
- writer.writerow(row)
1172
-
1173
- @staticmethod
1174
- def _process_prompt_folder(args):
1175
- """
1176
- Process one prompt folder and return a list of rows to write to CSV.
1177
- Args:
1178
- args (tuple): Contains:
1179
- - prompt_folder: The folder name (or Path object) for the prompt.
1180
- - file_dir: The base directory.
1181
- - reference: Reference info to include in each row.
1182
- - prompts_to_ignore: A collection of prompts to skip.
1183
- - instance: An instance of the class that contains the parsing methods.
1184
- Returns:
1185
- list: A list of rows (each row is a list) to write to CSV.
1186
- """
1187
- prompt_folder, file_dir, reference, prompts_to_ignore, instance = args
1188
- rows = []
1189
- # Skip folders that should be ignored.
1190
- if prompt_folder in prompts_to_ignore:
1191
- return rows
1192
- # Build the full path for the prompt folder.
1193
- prompt = os.path.join(file_dir, prompt_folder)
1194
- # List and sort the HTML files.
1195
- try:
1196
- sorted_prompts = sorted(os.listdir(prompt))
1197
- except Exception as e:
1198
- print(f"Error listing directory {prompt}: {e}")
1199
- return rows
1200
- # Process the common "Prompt.html" once.
1201
- soup_prompt = instance.apply_soup(prompt, "Prompt.html")
1202
- essay_year = instance._get_essay_year(soup_prompt)
1203
- essay_supporting_text = "\n".join(instance._get_supporting_text(soup_prompt))
1204
- essay_prompt = "\n".join(instance._get_prompt(soup_prompt))
1205
- # Process each essay file except the prompt itself.
1206
- for essay_filename in sorted_prompts:
1207
- if essay_filename == "Prompt.html":
1208
- continue
1209
- soup_text = instance.apply_soup(prompt, essay_filename)
1210
- essay_title = instance._clean_title(instance._get_title(soup_text))
1211
- essay_grades = instance._get_grades(soup_text)
1212
- essay_text = instance._get_essay(soup_text)
1213
- general_comment = instance._get_general_comment(soup_text).strip()
1214
- specific_comment = instance._get_specific_comment(
1215
- soup_text, general_comment
1216
- )
1217
- # Create a row with all the information.
1218
- row = [
1219
- essay_filename,
1220
- prompt_folder
1221
- if not hasattr(prompt_folder, "name")
1222
- else prompt_folder.name,
1223
- essay_prompt,
1224
- essay_supporting_text,
1225
- essay_title,
1226
- essay_text,
1227
- essay_grades,
1228
- general_comment,
1229
- specific_comment,
1230
- essay_year,
1231
- reference,
1232
- ]
1233
- rows.append(row)
1234
- return rows