text
stringlengths 0
93.6k
|
---|
denominator = match.group().split('/')[1]
|
numerator = match.group().split('/')[0]
|
if is_number(denominator) == True and is_number(numerator) == True:
|
if denominator == '0':
|
return round(float(numerator.replace(',', '')))
|
else:
|
frac = Fraction(match.group().replace(',', ''))
|
num_numerator = frac.numerator
|
num_denominator = frac.denominator
|
return round(float(num_numerator / num_denominator))
|
else:
|
return None
|
else:
|
if float(match.group().replace(',', '')) == float('inf'):
|
return None
|
return round(float(match.group().replace(',', '')))
|
else:
|
return None
|
else:
|
return None
|
def batch_data(data_list, batch_size=1):
|
n = len(data_list) // batch_size
|
batch_data = []
|
for i in range(n-1):
|
start = i * batch_size
|
end = (i+1)*batch_size
|
batch_data.append(data_list[start:end])
|
last_start = (n-1) * batch_size
|
last_end = MAX_INT
|
batch_data.append(data_list[last_start:last_end])
|
return batch_data
|
def gsm8k_test(model, data_path, start=0, end=MAX_INT, batch_size=1, tensor_parallel_size=1, filepath_output=None):
|
if filepath_output is None:
|
filepath_output = '/'.join(model.split('/')[:-1]) + "/" + "result_gsm8k.txt"
|
print(f"Result file will be dumped to {filepath_output}")
|
INVALID_ANS = "[invalid]"
|
gsm8k_ins = []
|
gsm8k_answers = []
|
problem_prompt = (
|
"Below is an instruction that describes a task. "
|
"Write a response that appropriately completes the request.\n\n"
|
"### Instruction:\n{instruction}\n\n### Response: Let's think step by step."
|
)
|
print('prompt =====', problem_prompt)
|
with open(data_path,"r+", encoding="utf8") as f:
|
for idx, item in enumerate(jsonlines.Reader(f)):
|
temp_instr = problem_prompt.format(instruction=item["query"])
|
gsm8k_ins.append(temp_instr)
|
temp_ans = item['response'].split('#### ')[1]
|
temp_ans = int(temp_ans.replace(',', ''))
|
gsm8k_answers.append(temp_ans)
|
gsm8k_ins = gsm8k_ins[start:end]
|
gsm8k_answers = gsm8k_answers[start:end]
|
print('length ====', len(gsm8k_ins))
|
batch_gsm8k_ins = batch_data(gsm8k_ins, batch_size=batch_size)
|
stop_tokens = ["Question:", "Question", "USER:", "USER", "ASSISTANT:", "ASSISTANT", "Instruction:", "Instruction", "Response:", "Response"]
|
sampling_params = SamplingParams(temperature=0.0, top_p=1, max_tokens=512, stop=stop_tokens)
|
print('sampling =====', sampling_params)
|
llm = LLM(model=model, tensor_parallel_size=tensor_parallel_size)
|
result = []
|
res_completions = []
|
for idx in trange(len(batch_gsm8k_ins), desc='Predicting on GSM8k'):
|
prompt = batch_gsm8k_ins[idx]
|
if isinstance(prompt, list):
|
pass
|
else:
|
prompt = [prompt]
|
completions = llm.generate(prompt, sampling_params, use_tqdm=False)
|
for output in completions:
|
generated_text = output.outputs[0].text
|
res_completions.append(generated_text)
|
invalid_outputs = []
|
for idx, (prompt, completion, prompt_answer) in enumerate(zip(gsm8k_ins, res_completions, gsm8k_answers)):
|
doc = {'question': prompt}
|
y_pred = extract_answer_number(completion)
|
if y_pred != None:
|
result.append(float(y_pred) == float(prompt_answer))
|
else:
|
result.append(False)
|
temp = {'question': prompt, 'output': completion, 'answer': prompt_answer}
|
invalid_outputs.append(temp)
|
acc = sum(result) / len(result)
|
print('len invalid outputs ====', len(invalid_outputs), ', valid_outputs===', invalid_outputs)
|
print('start===', start, ', end====', end)
|
print('gsm8k length====', len(result), ', gsm8k acc====', acc)
|
with open(filepath_output, "w") as f:
|
f.write(f"{acc:.5f}")
|
def parse_args():
|
parser = argparse.ArgumentParser()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.