|
import re |
|
import string |
|
from pathlib import Path |
|
import logging |
|
|
|
import pandas as pd |
|
|
|
import datasets |
|
from datasets import DatasetInfo, SplitDict, SplitInfo, load_dataset |
|
|
|
ALPHABET = string.ascii_lowercase |
|
|
|
def temp_list(num_list): |
|
return map(lambda x: "temp_" + x, ALPHABET[: len(num_list)]) |
|
|
|
|
|
def extract_placeholders(text): |
|
pattern = r"<<(.*?)>>" |
|
matches = re.findall(pattern, text) |
|
return matches |
|
|
|
|
|
def multiple_replace(text, replacement_dict): |
|
for k, v in replacement_dict.items(): |
|
text = text.replace(k, v) |
|
return text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def solution_human(solution, num_list): |
|
eqs = extract_placeholders(solution) |
|
num_list = {key: str(value) for key, value in zip(temp_list(num_list), num_list)} |
|
|
|
modified = [] |
|
cached = {} |
|
for eq in eqs: |
|
eq = multiple_replace(eq, num_list) |
|
eq = multiple_replace(eq, cached) |
|
try: |
|
res = eval(eq) |
|
be_eval = True |
|
except Exception: |
|
res = eq |
|
be_eval = False |
|
cached[eq] = str(res) |
|
num_ops = sum([1 for char in eq if char in "+-*/"]) |
|
if num_ops and be_eval: |
|
modified.append(f"{eq}={cached[eq]}") |
|
else: |
|
modified.append(f"{eq}") |
|
|
|
text = solution |
|
for t, rt in zip(eqs, modified): |
|
text = text.replace(t, rt, 1) |
|
|
|
return text |
|
|
|
|
|
def get_expre(example): |
|
seq = example["target_template"] |
|
new_seq = [] |
|
for comp in seq[2:]: |
|
if comp.startswith("temp"): |
|
new_seq.append("{" + comp + "}") |
|
elif comp == "PI": |
|
new_seq.append("3.14") |
|
elif comp == "^": |
|
new_seq.append("**") |
|
else: |
|
new_seq.append(comp) |
|
|
|
|
|
|
|
eqs = "".join(new_seq) |
|
return {"expression": eqs} |
|
|
|
|
|
|
|
def regular(example): |
|
if example["id"] in ["17520"]: |
|
return False |
|
num_list = list(temp_list(example["num_list"])) |
|
eqs = example["expression"].format(**dict(zip(num_list, example["num_list"]))) |
|
return eval(eqs) == example["answer"] |
|
|
|
|
|
_DATA_FILES = ["data/math23k.csv"] |
|
|
|
|
|
class DatasetBuilder(datasets.DatasetBuilder): |
|
def _info(self): |
|
return DatasetInfo() |
|
|
|
def __init__(self, **kwargs): |
|
super().__init__(**kwargs) |
|
|
|
|
|
|
|
|
|
|
|
def _download_and_prepare( |
|
self, dl_manager, verification_mode, **prepare_split_kwargs |
|
): |
|
downloaded_files = dl_manager.download(_DATA_FILES) |
|
split_dict = SplitDict(dataset_name=self.name) |
|
split_info = SplitInfo(name="train", shard_lengths=downloaded_files[0]) |
|
split_dict.add(split_info) |
|
self.info.splits = split_dict |
|
self.info.download_size = dl_manager.downloaded_size |
|
|
|
def as_dataset(self, split, **kwargs): |
|
df_file=self.info.splits[split].shard_lengths |
|
logging.info("Loading dataset %s split %s from %s", self.name, split, df_file) |
|
df = pd.read_csv(df_file) |
|
ds = load_dataset("Gxg/Math23K", self.config.name, split=split) |
|
ds = ds.map(get_expre).filter(regular) |
|
ds = ds.add_column("solution", df["answers"]) |
|
ds = ds.map( |
|
lambda exa: { |
|
"solution_human": solution_human(exa["solution"], exa["num_list"]) |
|
} |
|
) |
|
ds = ds.select_columns(["original_text", "solution_human"]) |
|
|
|
ds = ds.rename_columns( |
|
{"original_text": "question", "solution_human": "answer"} |
|
) |
|
return ds |
|
|