Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Portuguese
Size:
1K - 10K
License:
Andre Barbosa
commited on
Commit
·
3cf44d9
1
Parent(s):
1fdeff2
add reference columns to all splits
Browse files- aes_enem_dataset.py +61 -15
aes_enem_dataset.py
CHANGED
@@ -117,6 +117,7 @@ CSV_HEADERPROPOR = [
|
|
117 |
"essay",
|
118 |
"grades",
|
119 |
"essay_year",
|
|
|
120 |
]
|
121 |
|
122 |
SOURCE_A_DESC = """
|
@@ -191,21 +192,34 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
191 |
]
|
192 |
|
193 |
def _info(self):
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
return datasets.DatasetInfo(
|
211 |
# This is the description that will appear on the datasets page.
|
@@ -260,11 +274,42 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
260 |
] # arbitrary removal of zero graded essays
|
261 |
df.to_csv(filepath, index=False)
|
262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
def _split_generators(self, dl_manager):
|
264 |
urls = _URLS[self.config.name]
|
265 |
extracted_files = dl_manager.download_and_extract({self.config.name: urls})
|
266 |
if "PROPOR2024" == self.config.name:
|
267 |
base_path = extracted_files["PROPOR2024"]
|
|
|
268 |
return [
|
269 |
datasets.SplitGenerator(
|
270 |
name=datasets.Split.TRAIN,
|
@@ -475,6 +520,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
475 |
"essay_text": row["essay"],
|
476 |
"grades": grades,
|
477 |
"essay_year": row["essay_year"],
|
|
|
478 |
}
|
479 |
else:
|
480 |
with open(filepath, encoding="utf-8") as csvfile:
|
|
|
117 |
"essay",
|
118 |
"grades",
|
119 |
"essay_year",
|
120 |
+
"reference"
|
121 |
]
|
122 |
|
123 |
SOURCE_A_DESC = """
|
|
|
192 |
]
|
193 |
|
194 |
def _info(self):
|
195 |
+
if self.config.name=="PROPOR2024":
|
196 |
+
features = datasets.Features(
|
197 |
+
{
|
198 |
+
"id": datasets.Value("string"),
|
199 |
+
"id_prompt": datasets.Value("string"),
|
200 |
+
"essay_title": datasets.Value("string"),
|
201 |
+
"essay_text": datasets.Value("string"),
|
202 |
+
"grades": datasets.Sequence(datasets.Value("int16")),
|
203 |
+
"essay_year": datasets.Value("int16"),
|
204 |
+
"reference": datasets.Value("string"),
|
205 |
+
}
|
206 |
+
)
|
207 |
+
else:
|
208 |
+
features = datasets.Features(
|
209 |
+
{
|
210 |
+
"id": datasets.Value("string"),
|
211 |
+
"id_prompt": datasets.Value("string"),
|
212 |
+
"prompt": datasets.Value("string"),
|
213 |
+
"supporting_text": datasets.Value("string"),
|
214 |
+
"essay_title": datasets.Value("string"),
|
215 |
+
"essay_text": datasets.Value("string"),
|
216 |
+
"grades": datasets.Sequence(datasets.Value("int16")),
|
217 |
+
"essay_year": datasets.Value("int16"),
|
218 |
+
"general_comment": datasets.Value("string"),
|
219 |
+
"specific_comment": datasets.Value("string"),
|
220 |
+
"reference": datasets.Value("string"),
|
221 |
+
}
|
222 |
+
)
|
223 |
|
224 |
return datasets.DatasetInfo(
|
225 |
# This is the description that will appear on the datasets page.
|
|
|
274 |
] # arbitrary removal of zero graded essays
|
275 |
df.to_csv(filepath, index=False)
|
276 |
|
277 |
+
def _preprocess_propor2024(self, base_path: str):
|
278 |
+
for split_case in ["train.csv", "validation.csv", "test.csv"]:
|
279 |
+
filepath = f"{base_path}/propor2024/{split_case}"
|
280 |
+
df = pd.read_csv(filepath)
|
281 |
+
|
282 |
+
# Dictionary to track how many times we've seen each (id, id_prompt) pair
|
283 |
+
counts = {}
|
284 |
+
# List to store the reference for each row
|
285 |
+
references = []
|
286 |
+
|
287 |
+
# Define the mapping for each occurrence
|
288 |
+
occurrence_to_reference = {
|
289 |
+
0: "crawled_from_web",
|
290 |
+
1: "grader_a",
|
291 |
+
2: "grader_b"
|
292 |
+
}
|
293 |
+
|
294 |
+
# Iterate through rows in the original order
|
295 |
+
for _, row in df.iterrows():
|
296 |
+
key = (row["id"], row["id_prompt"])
|
297 |
+
count = counts.get(key, 0)
|
298 |
+
# Assign the reference based on the count
|
299 |
+
ref = occurrence_to_reference.get(count, "unknown")
|
300 |
+
references.append(ref)
|
301 |
+
counts[key] = count + 1
|
302 |
+
|
303 |
+
# Add the reference column without changing the order of rows
|
304 |
+
df["reference"] = references
|
305 |
+
df.to_csv(filepath, index=False)
|
306 |
+
|
307 |
def _split_generators(self, dl_manager):
|
308 |
urls = _URLS[self.config.name]
|
309 |
extracted_files = dl_manager.download_and_extract({self.config.name: urls})
|
310 |
if "PROPOR2024" == self.config.name:
|
311 |
base_path = extracted_files["PROPOR2024"]
|
312 |
+
self._preprocess_propor2024(base_path)
|
313 |
return [
|
314 |
datasets.SplitGenerator(
|
315 |
name=datasets.Split.TRAIN,
|
|
|
520 |
"essay_text": row["essay"],
|
521 |
"grades": grades,
|
522 |
"essay_year": row["essay_year"],
|
523 |
+
"reference": row["reference"]
|
524 |
}
|
525 |
else:
|
526 |
with open(filepath, encoding="utf-8") as csvfile:
|