Andre Barbosa commited on
Commit
cc1b79b
·
1 Parent(s): efa4705

update gradesThousand and add it as a reference

Browse files
Files changed (2) hide show
  1. .gitattributes +1 -0
  2. aes_enem_dataset.py +75 -2
.gitattributes CHANGED
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
56
  sourceA.tar.gz filter=lfs diff=lfs merge=lfs -text
57
  sourceB.tar.gz filter=lfs diff=lfs merge=lfs -text
58
  sourceAWithGraders.tar.gz filter=lfs diff=lfs merge=lfs -text
 
 
56
  sourceA.tar.gz filter=lfs diff=lfs merge=lfs -text
57
  sourceB.tar.gz filter=lfs diff=lfs merge=lfs -text
58
  sourceAWithGraders.tar.gz filter=lfs diff=lfs merge=lfs -text
59
+ scrapedGradesThousand.tar.gz filter=lfs diff=lfs merge=lfs -text
aes_enem_dataset.py CHANGED
@@ -77,7 +77,8 @@ _URLS = {
77
  "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
78
  "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
79
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/blob/main/sourceB.tar.gz",
80
- "PROPOR2024": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/propor2024.tar.gz"
 
81
  }
82
 
83
 
@@ -120,6 +121,17 @@ CSV_HEADERPROPOR = [
120
  "reference"
121
  ]
122
 
 
 
 
 
 
 
 
 
 
 
 
123
  SOURCE_A_DESC = """
124
  SourceA have 860 essays available from August 2015 to March 2020.
125
  For each month of that period, a new prompt together with supporting texts were given,
@@ -166,6 +178,10 @@ fixed in the sourceAWithGraders configuration, this split preserves the original
166
  distribution of prompts and scores as used in the paper.
167
  """
168
 
 
 
 
 
169
 
170
  class AesEnemDataset(datasets.GeneratorBasedBuilder):
171
  """
@@ -175,7 +191,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
175
  To reproduce results from PROPOR paper, please refer to "PROPOR2024" config. Other configs are reproducible now.
176
  """
177
 
178
- VERSION = datasets.Version("0.1.0")
179
 
180
  # You will be able to load one or the other configurations in the following list with
181
  BUILDER_CONFIGS = [
@@ -189,6 +205,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
189
  description=SOURCE_B_DESC,
190
  ),
191
  datasets.BuilderConfig(name="PROPOR2024", version=VERSION, description=PROPOR2024),
 
192
  ]
193
 
194
  def _info(self):
@@ -204,6 +221,18 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
204
  "reference": datasets.Value("string"),
205
  }
206
  )
 
 
 
 
 
 
 
 
 
 
 
 
207
  else:
208
  features = datasets.Features(
209
  {
@@ -335,6 +364,33 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
335
  },
336
  ),
337
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  html_parser = self._process_html_files(extracted_files)
339
  if "sourceA" in self.config.name:
340
  self._post_process_dataframe(html_parser.sourceA)
@@ -522,6 +578,23 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
522
  "essay_year": row["essay_year"],
523
  "reference": row["reference"]
524
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  else:
526
  with open(filepath, encoding="utf-8") as csvfile:
527
  next(csvfile)
 
77
  "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
78
  "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz",
79
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/blob/main/sourceB.tar.gz",
80
+ "PROPOR2024": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/propor2024.tar.gz",
81
+ "gradesThousand": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/scrapedGradesThousand.tar.gz"
82
  }
83
 
84
 
 
121
  "reference"
122
  ]
123
 
124
+ CSV_HEADERTHOUSAND = [
125
+ "id",
126
+ "author",
127
+ "id_prompt",
128
+ "essay_year",
129
+ "grades",
130
+ "essay",
131
+ "source",
132
+ "supporting_text",
133
+ ]
134
+
135
  SOURCE_A_DESC = """
136
  SourceA have 860 essays available from August 2015 to March 2020.
137
  For each month of that period, a new prompt together with supporting texts were given,
 
178
  distribution of prompts and scores as used in the paper.
179
  """
180
 
181
+ GRADES_THOUSAND = """
182
+ TODO
183
+ """
184
+
185
 
186
  class AesEnemDataset(datasets.GeneratorBasedBuilder):
187
  """
 
191
  To reproduce results from PROPOR paper, please refer to "PROPOR2024" config. Other configs are reproducible now.
192
  """
193
 
194
+ VERSION = datasets.Version("0.2.0")
195
 
196
  # You will be able to load one or the other configurations in the following list with
197
  BUILDER_CONFIGS = [
 
205
  description=SOURCE_B_DESC,
206
  ),
207
  datasets.BuilderConfig(name="PROPOR2024", version=VERSION, description=PROPOR2024),
208
+ datasets.BuilderConfig(name="gradesThousand", version=VERSION, description=GRADES_THOUSAND),
209
  ]
210
 
211
  def _info(self):
 
221
  "reference": datasets.Value("string"),
222
  }
223
  )
224
+ elif self.config.name=="gradesThousand":
225
+ features = datasets.Features(
226
+ {
227
+ "id": datasets.Value("string"),
228
+ "id_prompt": datasets.Value("string"),
229
+ "supporting_text": datasets.Value("string"),
230
+ "essay_text": datasets.Value("string"),
231
+ "grades": datasets.Sequence(datasets.Value("int16")),
232
+ "essay_year": datasets.Value("int16"),
233
+ "source": datasets.Value("string"),
234
+ }
235
+ )
236
  else:
237
  features = datasets.Features(
238
  {
 
364
  },
365
  ),
366
  ]
367
+ if "gradesThousand" == self.config.name:
368
+ base_path = f"{extracted_files["gradesThousand"]}/scrapedGradesThousand"
369
+ return [
370
+ datasets.SplitGenerator(
371
+ name=datasets.Split.TRAIN,
372
+ # These kwargs will be passed to _generate_examples
373
+ gen_kwargs={
374
+ "filepath": os.path.join(base_path, "train.csv"),
375
+ "split": "train",
376
+ },
377
+ ),
378
+ datasets.SplitGenerator(
379
+ name=datasets.Split.VALIDATION,
380
+ # These kwargs will be passed to _generate_examples
381
+ gen_kwargs={
382
+ "filepath": os.path.join(base_path, "validation.csv"),
383
+ "split": "validation",
384
+ },
385
+ ),
386
+ datasets.SplitGenerator(
387
+ name=datasets.Split.TEST,
388
+ gen_kwargs={
389
+ "filepath": os.path.join(base_path, "test.csv"),
390
+ "split": "test",
391
+ },
392
+ ),
393
+ ]
394
  html_parser = self._process_html_files(extracted_files)
395
  if "sourceA" in self.config.name:
396
  self._post_process_dataframe(html_parser.sourceA)
 
578
  "essay_year": row["essay_year"],
579
  "reference": row["reference"]
580
  }
581
+ elif self.config.name == "gradesThousand":
582
+ with open(filepath, encoding="utf-8") as csvfile:
583
+ next(csvfile)
584
+ csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADERTHOUSAND)
585
+ for i, row in enumerate(csv_reader):
586
+ grades = row["grades"].strip("[]")
587
+ grades = grades.split(", ")
588
+ yield i, {
589
+ "id": row["id"],
590
+ "id_prompt": row["id_prompt"],
591
+ "supporting_text": row["supporting_text"],
592
+ "essay_text": row["essay"],
593
+ "grades": grades,
594
+ "essay_year": row["essay_year"],
595
+ "author": row["author"],
596
+ "source": row["source"]
597
+ }
598
  else:
599
  with open(filepath, encoding="utf-8") as csvfile:
600
  next(csvfile)