File size: 3,879 Bytes
23a1b7b 5bc70a2 23a1b7b 687c1c3 23a1b7b 5bc70a2 5081572 5bc70a2 23a1b7b 5bc70a2 23a1b7b 5bc70a2 23a1b7b 5bc70a2 23a1b7b 5081572 23a1b7b 5bc70a2 687c1c3 5bc70a2 23a1b7b 5081572 5bc70a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import re
import string
from pathlib import Path
import logging
import pandas as pd
import datasets
from datasets import DatasetInfo, SplitDict, SplitInfo, load_dataset
ALPHABET = string.ascii_lowercase
def temp_list(num_list):
return map(lambda x: "temp_" + x, ALPHABET[: len(num_list)])
def extract_placeholders(text):
pattern = r"<<(.*?)>>"
matches = re.findall(pattern, text)
return matches
def multiple_replace(text, replacement_dict):
for k, v in replacement_dict.items():
text = text.replace(k, v)
return text
# if replacement_dict:
# pattern = "|".join(map(re.escape, replacement_dict.keys()))
# return re.sub(pattern, lambda m: replacement_dict[m.group()], text)
# else:
# return text
def solution_human(solution, num_list):
eqs = extract_placeholders(solution)
num_list = {key: str(value) for key, value in zip(temp_list(num_list), num_list)}
modified = []
cached = {}
for eq in eqs:
eq = multiple_replace(eq, num_list)
eq = multiple_replace(eq, cached)
try:
res = eval(eq)
be_eval = True
except Exception:
res = eq
be_eval = False
cached[eq] = str(res)
num_ops = sum([1 for char in eq if char in "+-*/"])
if num_ops and be_eval:
modified.append(f"{eq}={cached[eq]}")
else:
modified.append(f"{eq}")
text = solution
for t, rt in zip(eqs, modified):
text = text.replace(t, rt, 1)
return text
def get_expre(example):
seq = example["target_template"]
new_seq = []
for comp in seq[2:]:
if comp.startswith("temp"):
new_seq.append("{" + comp + "}")
elif comp == "PI":
new_seq.append("3.14")
elif comp == "^":
new_seq.append("**")
else:
new_seq.append(comp)
# num_list = list(set(sorted(num_list)))
# alphabet = string.ascii_lowercase
# num_list = list(map(lambda x: "temp_" + x, alphabet[: len(example["num_list"])]))
eqs = "".join(new_seq)
return {"expression": eqs}
# 获取字母表
def regular(example):
if example["id"] in ["17520"]:
return False
num_list = list(temp_list(example["num_list"]))
eqs = example["expression"].format(**dict(zip(num_list, example["num_list"])))
return eval(eqs) == example["answer"]
_DATA_FILES = ["data/math23k.csv"]
class DatasetBuilder(datasets.DatasetBuilder):
def _info(self):
return DatasetInfo()
def __init__(self, **kwargs):
super().__init__(**kwargs)
# def download_and_prepare(self, *args, **kwargs):
# return self
def _download_and_prepare(
self, dl_manager, verification_mode, **prepare_split_kwargs
):
downloaded_files = dl_manager.download(_DATA_FILES)
split_dict = SplitDict(dataset_name=self.name)
split_info = SplitInfo(name="train", shard_lengths=downloaded_files[0])
split_dict.add(split_info)
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def as_dataset(self, split, **kwargs):
df_file=self.info.splits[split].shard_lengths
logging.info("Loading dataset %s split %s from %s", self.name, split, df_file)
df = pd.read_csv(df_file)
ds = load_dataset("Gxg/Math23K", self.config.name, split=split)
ds = ds.map(get_expre).filter(regular)
ds = ds.add_column("solution", df["answers"])
ds = ds.map(
lambda exa: {
"solution_human": solution_human(exa["solution"], exa["num_list"])
}
)
ds = ds.select_columns(["original_text", "solution_human"])
ds = ds.rename_columns(
{"original_text": "question", "solution_human": "answer"}
)
return ds
|