text
stringlengths
0
15.3k
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
(reflens, refmaxcounts) = item
test = normalize(test)
result: Dict[str, Any] = {}
result['testlen'] = len(test)
if eff_ref_len == 'shortest':
result['reflen'] = min(reflens)
elif eff_ref_len == 'average':
result['reflen'] = float(sum(reflens)) / len(reflens)
elif eff_ref_len == 'closest':
min_diff: Optional[int] = None
for reflen in reflens:
if min_diff is None or abs(reflen - len(test)) < min_diff:
min_diff = abs(reflen - len(test))
result['reflen'] = reflen
result['guess'] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)]
result['correct'] = [0] * n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result['correct'][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps: Dict[str, Any] = {'testlen': 0, 'reflen': 0, 'guess': [0] * n, 'correct': [0] * n}
for comps in allcomps:
for key in ['testlen', 'reflen']:
totalcomps[key] += comps[key]
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus: List[float] = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log(guess + addsmooth + sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000.0)
else:
all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0, 1 - float(totalcomps['reflen'] + 1) / (totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i == 0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall('[\\w]+|[^\\s\\w]', line))
def computeMaps(predictions, goldfile):
predictionMap: Dict[str, list] = {}
goldMap: Dict[str, list] = {}
gf = open(goldfile, 'r', encoding='utf-8')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap:
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
def smoothed_bleu_4(references, predictions, **kwargs):
predictionMap = {}
goldMap = {}
for (rid, pred) in enumerate(predictions):
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for (rid, row) in enumerate(references):
goldMap[rid] = [splitPuncts(row.strip().lower())]
return bleuFromMaps(goldMap, predictionMap)[0]