Spaces:
Running
Running
update import
Browse files
utils.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import numpy as np
|
3 |
+
from typing import Dict
|
4 |
+
from datasets import load_dataset
|
5 |
+
import testing_util as test_util
|
6 |
+
|
7 |
+
|
8 |
+
DATASET = "codeparrot/apps"
|
9 |
+
|
10 |
+
|
11 |
+
def evaluate_generations(generations: list, level: str = "all", debug: bool = False):
|
12 |
+
"""We take the list of code generations and try to compile them
|
13 |
+
and the run their corresponding unit tests which are retrieved from the APPS dataset.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
generations: list of code generations (same order as samples in APPS dataset)
|
17 |
+
level: difficulty level used in the generation, can be "all", "introductory", "interview" or "competition"
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
results: dictionary of results, key is the problem index, value is a list of results for each generation
|
21 |
+
[-2] = compile error, [-1] = runtime error [False] = failed test case [True] = passed test case
|
22 |
+
"""
|
23 |
+
|
24 |
+
# generations are code generations in the same order of the dataset
|
25 |
+
apps_eval = load_dataset(DATASET, split="test", difficulties=[level])
|
26 |
+
results = {}
|
27 |
+
for index in range(len(generations)):
|
28 |
+
# code generations for problem (index)
|
29 |
+
problem_generations = generations[index]
|
30 |
+
# get corresponding samples from APPS dataset
|
31 |
+
sample = apps_eval[index]
|
32 |
+
res = []
|
33 |
+
# loop over the generations
|
34 |
+
for o_idx, o in enumerate(problem_generations):
|
35 |
+
curr_res = [-2]
|
36 |
+
try:
|
37 |
+
curr_res = test_util.run_test(sample, test=o, debug=debug)
|
38 |
+
#if debug:
|
39 |
+
print(f"\nSuccessful compilation of task {index}!")
|
40 |
+
fixed = []
|
41 |
+
for e in curr_res:
|
42 |
+
if isinstance(e, np.ndarray):
|
43 |
+
e = e.item(0)
|
44 |
+
if isinstance(e, np.bool_):
|
45 |
+
e = bool(e)
|
46 |
+
fixed.append(e)
|
47 |
+
curr_res = fixed
|
48 |
+
if not np.all(curr_res):
|
49 |
+
#if debug:
|
50 |
+
print(f"Results were not True for all test cases")
|
51 |
+
except Exception as e:
|
52 |
+
if debug:
|
53 |
+
print(f"Compilation failed, test framework exception = {repr(e)}{e}\n")
|
54 |
+
break
|
55 |
+
finally:
|
56 |
+
assert isinstance(curr_res, list)
|
57 |
+
res.append(curr_res)
|
58 |
+
results[index] = res
|
59 |
+
return results
|
60 |
+
|
61 |
+
|
62 |
+
def estimate_pass_at_k(num_samples, num_correct, k):
|
63 |
+
"""Estimates pass@k of each problem and returns them in an array."""
|
64 |
+
|
65 |
+
def estimator(n: int, c: int, k: int) -> float:
|
66 |
+
"""Calculates 1 - comb(n - c, k) / comb(n, k)."""
|
67 |
+
if n - c < k:
|
68 |
+
return 1.0
|
69 |
+
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
|
70 |
+
|
71 |
+
if isinstance(num_samples, int):
|
72 |
+
num_samples_it = itertools.repeat(num_samples, len(num_correct))
|
73 |
+
else:
|
74 |
+
assert len(num_samples) == len(num_correct)
|
75 |
+
num_samples_it = iter(num_samples)
|
76 |
+
|
77 |
+
return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
|
78 |
+
|
79 |
+
|
80 |
+
def get_results(results: Dict[int, list], count_errors: bool = False, k_list: list = [1, 10, 100]):
|
81 |
+
"""
|
82 |
+
Given the results evaluated against the testcases we output some statistics.
|
83 |
+
For single generations:
|
84 |
+
>>> example_results = {0: [[-2]], 1: [[False,False]], 2: [[True,True]], 3: [[False,True,False,True]], 4: [[-1,-1]]}
|
85 |
+
>>> get_results(example_results, count_errors=True)
|
86 |
+
Computing accuracy metrics...
|
87 |
+
number of compile errors = 1 avg = 0.2
|
88 |
+
number of runtime errors = 1 avg = 0.2
|
89 |
+
number of problems evaluated = 5
|
90 |
+
Average Accuracy : 0.3
|
91 |
+
Strict Accuracy : 0.2
|
92 |
+
{'avg_accuracy': 0.3, 'strict_accuracy': 0.2, 'pass_at_k': None}
|
93 |
+
|
94 |
+
For multiple generations:
|
95 |
+
>>> example_results = {0: [[-2], [True, True, True]], 1: [[-1,-1, -1], [True, False, True]]}
|
96 |
+
>>> get_results(example_results, k_list=[1, 2])
|
97 |
+
Computing pass@k metric for multiple generations...
|
98 |
+
{'pass@1': 0.25, 'pass@2': 0.5}
|
99 |
+
{'avg_accuracy': None, 'strict_accuracy': None, 'pass_at_k': {'pass@1': 0.25, 'pass@2': 0.5}}
|
100 |
+
"""
|
101 |
+
|
102 |
+
metrics = {"avg_accuracy": None, "strict_accuracy": None, "pass_at_k": None}
|
103 |
+
|
104 |
+
if len(results[0]) == 1:
|
105 |
+
# for single generations we compute average accuracy and stric accuracy: original APPS metrics
|
106 |
+
print("Computing accuracy metrics...")
|
107 |
+
res = []
|
108 |
+
per_prob_res = []
|
109 |
+
all_correct = []
|
110 |
+
for index in results:
|
111 |
+
problem_results = np.asarray(results[index])
|
112 |
+
res.extend(problem_results)
|
113 |
+
per_prob_res.append(np.mean(problem_results > 0))
|
114 |
+
all_correct.append(np.all(problem_results > 0))
|
115 |
+
# we count campilation and runtime errors once per pronlem
|
116 |
+
compile_errors = len([e for e in res if -2 in e])
|
117 |
+
runtime_errors = len([e for e in res if -1 in e])
|
118 |
+
total_testcases = len(res)
|
119 |
+
if count_errors:
|
120 |
+
print(f"number of compile errors = {compile_errors} avg = {compile_errors / total_testcases}")
|
121 |
+
print(f"number of runtime errors = {runtime_errors} avg = {runtime_errors / total_testcases}")
|
122 |
+
print(f"number of problems evaluated = {total_testcases}")
|
123 |
+
|
124 |
+
print(f"Average Accuracy : {np.mean(per_prob_res)}")
|
125 |
+
print(f"Strict Accuracy : {np.mean(all_correct)}")
|
126 |
+
metrics["avg_accuracy"] = np.mean(per_prob_res)
|
127 |
+
metrics["strict_accuracy"] = np.mean(all_correct)
|
128 |
+
|
129 |
+
else:
|
130 |
+
# for multiple generations we use pass@k metric used in the HumanEval benchmark
|
131 |
+
# we use strict accuracy, a generation is valid if it has to pass all the tests
|
132 |
+
print("Computing pass@k metric for multiple generations...")
|
133 |
+
# total is list with nb generations per task (task=index)
|
134 |
+
# correct is number of generations that passed all tests per task
|
135 |
+
total = []
|
136 |
+
correct = []
|
137 |
+
for index in results:
|
138 |
+
all_correct = []
|
139 |
+
for generation in results[index]:
|
140 |
+
gen = np.array(generation)
|
141 |
+
all_correct.append(np.all(gen>0))
|
142 |
+
total.append(len(all_correct))
|
143 |
+
correct.append(sum(all_correct))
|
144 |
+
total = np.array(total)
|
145 |
+
correct = np.array(correct)
|
146 |
+
ks = k_list
|
147 |
+
pass_at_k = {f"pass@{k}": estimate_pass_at_k(total, correct, k).mean() for k in ks if (total >= k).all()}
|
148 |
+
print(pass_at_k)
|
149 |
+
metrics["pass_at_k"] = pass_at_k
|
150 |
+
return metrics
|
151 |
+
|
152 |
+
def compute_metrics(generations, level="all", k_list=[1, 10, 100], count_errors=True, debug=False):
|
153 |
+
"""Return metrics for the given generations.
|
154 |
+
Args:
|
155 |
+
generations: list of code generations for each problem (each generation is a list of generations)
|
156 |
+
k_list: list of k values to compute pass@k when using multiple generations
|
157 |
+
count_errors: whether to count compilation and runtime errors when using single generations
|
158 |
+
level: difficulty level in APPS dataset that was used for the given generations (from: "all", "introductory", "interview", "competition")
|
159 |
+
Returns:
|
160 |
+
metrics: dict of metrics
|
161 |
+
|
162 |
+
Examples:
|
163 |
+
|
164 |
+
>>> import json
|
165 |
+
>>> # lists of solutions to the two first APPS problems (note not all solutions pass all tests)
|
166 |
+
>>> solution_sample1 = json.load(open("test_examples/solutions_problem_1.json", "r"))
|
167 |
+
>>> solution_sample2 = json.load(open("test_examples/solutions_problem_2.json", "r"))
|
168 |
+
>>> single_solutions = [solution_sample1[:1], solution_sample2[:1]]
|
169 |
+
>>> compute_metrics(single_solutions, level="all")
|
170 |
+
Computing accuracy metrics...
|
171 |
+
number of compile errors = 0 avg = 0.0
|
172 |
+
number of runtime errors = 0 avg = 0.0
|
173 |
+
number of problems evaluated = 2
|
174 |
+
Average Accuracy : 1.0
|
175 |
+
Strict Accuracy : 1.0
|
176 |
+
{'avg_accuracy': 1.0, 'strict_accuracy': 1.0, 'pass_at_k': None}
|
177 |
+
>>> multiple_solutions = [solution_sample1[:3], solution_sample2[:3]]
|
178 |
+
>>> compute_metrics(multiple_solutions, level="all", k_list=[1, 2, 3])
|
179 |
+
Computing pass@k metric for multiple generations...
|
180 |
+
{'pass@1': 1.0, 'pass@2': 1.0, 'pass@3': 1.0}
|
181 |
+
{'avg_accuracy': None, 'strict_accuracy': None, 'pass_at_k': {'pass@1': 1.0, 'pass@2': 1.0, 'pass@3': 1.0}}
|
182 |
+
"""
|
183 |
+
results = evaluate_generations(generations, level=level, debug=debug)
|
184 |
+
metrics = get_results(results, count_errors=count_errors, k_list=k_list)
|
185 |
+
return metrics
|
186 |
+
|
187 |
+
#import doctest
|
188 |
+
#doctest.testmod()
|