|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
import evaluate |
|
import datasets |
|
import re |
|
import dateutil.parser |
|
import numpy as np |
|
from difflib import SequenceMatcher |
|
import sacrebleu |
|
|
|
import time |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:module, |
|
title = {A great new module}, |
|
authors={huggingface, Inc.}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new module is designed to solve this great ML task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
|
|
_KWARGS_DESCRIPTION = """ |
|
Calculates how good are predictions given some references, using certain scores |
|
Args: |
|
predictions: list of predictions to score. Each predictions |
|
should be a string with tokens separated by spaces. |
|
references: list of reference for each prediction. Each |
|
reference should be a string with tokens separated by spaces. |
|
Returns: |
|
accuracy: description of the first score, |
|
another_score: description of the second score, |
|
Examples: |
|
Examples should be written in doctest format, and should illustrate how |
|
to use the function. |
|
|
|
>>> my_new_module = evaluate.load("my_new_module") |
|
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) |
|
>>> print(results) |
|
{'accuracy': 1.0} |
|
""" |
|
|
|
|
|
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" |
|
|
|
|
|
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
|
class LogMetric(evaluate.Metric): |
|
"""TODO: Short description of my evaluation module.""" |
|
|
|
|
|
timestamp_regex = r'^\s*\[?\s*(\d{4}[-/.]\d{2}[-/.]\d{2}(?:[ T]\d{2}[:]\d{2}(?:[:]\d{2}(?:[.,]\d+)?)?(?:Z|[+-]\d{2}[:]\d{2})?)?)\s*\]?\s*' |
|
timestamp_pattern = re.compile(timestamp_regex, re.MULTILINE) |
|
|
|
int_regex = r'(-?\d+)' |
|
int_pattern = re.compile(int_regex) |
|
|
|
float_regex = r'(-?\d+\.\d+)' |
|
float_pattern = re.compile(float_regex) |
|
|
|
sacrebleu_metric = evaluate.load("evaluate-metric/sacrebleu") |
|
|
|
|
|
def _info(self): |
|
|
|
return evaluate.MetricInfo( |
|
|
|
module_type="metric", |
|
description=_DESCRIPTION, |
|
citation=_CITATION, |
|
inputs_description=_KWARGS_DESCRIPTION, |
|
|
|
|
|
features=datasets.Features({ |
|
"predictions": datasets.Value("string", id="sequence"), |
|
"references": datasets.Value("string", id="sequence"), |
|
}), |
|
|
|
homepage="http://module.homepage", |
|
|
|
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], |
|
reference_urls=["http://path.to.reference.url/new_module"] |
|
) |
|
|
|
def _download_and_prepare(self, dl_manager): |
|
"""Optional: download external resources useful to compute the scores""" |
|
|
|
pass |
|
|
|
|
|
def get_jaccard_similarity(self, set1, set2): |
|
intersection = set1.intersection(set2) |
|
union = set1.union(set2) |
|
if (len(union) == 0): |
|
return 1.0 |
|
|
|
return len(intersection) / len(union) |
|
|
|
|
|
def get_length_score(self, preds_split, refs_split): |
|
|
|
pred_content_lengths = np.vectorize(len)(preds_split) |
|
ref_content_lengths = np.vectorize(len)(refs_split) |
|
|
|
return self.smapeScore(pred_content_lengths, ref_content_lengths) |
|
|
|
|
|
def smapeScore(self, P, R): |
|
P_isnumber = isinstance(P, (int, float)) |
|
R_isnumber = isinstance(R, (int, float)) |
|
|
|
|
|
assert P_isnumber == R_isnumber |
|
|
|
if not P_isnumber: |
|
assert(len(P) == len(R)) |
|
|
|
if P_isnumber and R_isnumber: |
|
if P == 0 and R == 0: return 1.0 |
|
return 1 - (np.sum(np.abs(R - P) / (np.abs(R) + np.abs(P)))) |
|
else: |
|
if len(P) == 0 and len(R) == 0: return 1.0 |
|
n = len(P) |
|
P = np.array(P) |
|
R = np.array(R) |
|
denominator = np.abs(R) + np.abs(P) |
|
|
|
|
|
denominator[denominator == 0] = 1 |
|
|
|
return 1 - (1.0/n * np.sum(np.abs(R - P) / denominator)) |
|
|
|
|
|
def getLineCountScore(self, pred, ref): |
|
pred_lines_amt = len(pred.splitlines()) |
|
ref_lines_amt = len(ref.splitlines()) |
|
|
|
|
|
|
|
|
|
return self.smapeScore(pred_lines_amt, ref_lines_amt) |
|
|
|
def replaceNumbers(self, text:str): |
|
text = self.int_pattern.sub(r'<|INT|>', text) |
|
text = self.float_pattern.sub(r'<|FLOAT|>', text) |
|
return text |
|
|
|
|
|
def getLineContentScore(self, pred_logMessages, ref_logMessages): |
|
if pred_logMessages == [] and ref_logMessages == []: |
|
pred_logMessages = [""] |
|
ref_logMessages = [""] |
|
sacrebleu_score = self.sacrebleu_metric.compute(predictions=pred_logMessages, references=ref_logMessages)["score"] / 100.0 |
|
|
|
smape_length_score = self.get_length_score(pred_logMessages, ref_logMessages) |
|
|
|
vectorized_replaceNumbers = np.vectorize(self.replaceNumbers) |
|
|
|
cleaned_pred_logMessages = vectorized_replaceNumbers(pred_logMessages) |
|
cleaned_ref_logMessages = vectorized_replaceNumbers(ref_logMessages) |
|
|
|
sacrebleu_withoutExplicitNumbers_score = self.sacrebleu_metric.compute(predictions=cleaned_pred_logMessages, references=cleaned_ref_logMessages)["score"] / 100.0 |
|
|
|
|
|
return sacrebleu_score, sacrebleu_withoutExplicitNumbers_score, smape_length_score |
|
|
|
|
|
def getTimestampsScore(self, pred_timestamps, ref_timestamps): |
|
timestamp_amt_score = self.smapeScore(len(pred_timestamps), len(ref_timestamps)) |
|
|
|
if (len(pred_timestamps) == 0) and (len(ref_timestamps) == 0): |
|
return timestamp_amt_score, 1.0, 1.0, 1.0 |
|
|
|
|
|
if (len(pred_timestamps) == 0) and (len(ref_timestamps) != 0): |
|
return timestamp_amt_score, 1.0, 1.0, 0.0 |
|
|
|
|
|
|
|
pred_timestring_pattern = re.sub(r'\d', r'\\d', re.escape(pred_timestamps[0])) |
|
|
|
matchesPatternScore = 1.0 |
|
monotonicallyIncreasingScore = 1.0 |
|
pred_timedeltas = [] |
|
|
|
|
|
prev_datetime = None |
|
|
|
|
|
for i in range(len(pred_timestamps)): |
|
ts = pred_timestamps[i] |
|
try: |
|
|
|
|
|
matchesPattern = re.fullmatch(pred_timestring_pattern, ts) is not None |
|
|
|
cur_datetime = dateutil.parser.parse(ts) |
|
if prev_datetime == None: |
|
monotonicallyIncreasing = True |
|
else: |
|
monotonicallyIncreasing = prev_datetime <= cur_datetime |
|
pred_timedeltas.append((cur_datetime - prev_datetime).total_seconds()) |
|
|
|
prev_datetime = cur_datetime |
|
|
|
|
|
matchesPatternScore = 0.0 if (not matchesPattern) else matchesPatternScore |
|
monotonicallyIncreasingScore = 0.0 if (not monotonicallyIncreasing) else monotonicallyIncreasingScore |
|
|
|
|
|
except Exception as e: |
|
|
|
matchesPatternScore = 0.0 |
|
monotonicallyIncreasingScore = 0.0 |
|
pred_timedeltas.append(-1) |
|
|
|
|
|
if (len(pred_timestamps) != 0) and (len(ref_timestamps) == 0): |
|
return timestamp_amt_score, matchesPatternScore, monotonicallyIncreasingScore, 0.0 |
|
|
|
|
|
ref_timedeltas = [] |
|
prev_datetime = None |
|
for i in range(len(ref_timestamps)): |
|
ts = ref_timestamps[i] |
|
try: |
|
cur_datetime = dateutil.parser.parse(ts) |
|
if prev_datetime == None: |
|
pass |
|
else: |
|
ref_timedeltas.append((cur_datetime - prev_datetime).total_seconds()) |
|
|
|
prev_datetime = cur_datetime |
|
|
|
except Exception as e: |
|
ref_timedeltas.append(-1) |
|
|
|
minlength = min(len(pred_timedeltas), len(ref_timedeltas)) |
|
|
|
pred_timedeltas = pred_timedeltas[:minlength] |
|
ref_timedeltas = ref_timedeltas[:minlength] |
|
|
|
print("pred_timedeltas:", pred_timedeltas) |
|
print("ref_timedeltas:", ref_timedeltas) |
|
|
|
|
|
timestampDeltaScore = self.smapeScore(pred_timedeltas, ref_timedeltas) |
|
|
|
print("timestampDeltaScore:", timestampDeltaScore) |
|
|
|
return timestamp_amt_score, matchesPatternScore, monotonicallyIncreasingScore, timestampDeltaScore |
|
|
|
|
|
|
|
def getLogMetric(self, pred : str, ref : str): |
|
ref = ref.strip(' \t\n\r') |
|
pred = pred.strip(' \t\n\r') |
|
|
|
linecount_difference_SMAPE = self.getLineCountScore(pred, ref) |
|
|
|
|
|
|
|
pred_split_log = self.timestamp_pattern.split(pred) |
|
ref_split_log = self.timestamp_pattern.split(ref) |
|
|
|
|
|
|
|
|
|
|
|
pred_timestamps = [] |
|
pred_logMessages = [] |
|
|
|
ref_timestamps = [] |
|
ref_logMessages = [] |
|
|
|
for i in range(1, len(pred_split_log), 2): |
|
|
|
pred_timestamps.append(pred_split_log[i]) |
|
pred_logMessages.append(pred_split_log[i+1]) |
|
|
|
|
|
for i in range(1, len(ref_split_log), 2): |
|
|
|
ref_timestamps.append(ref_split_log[i]) |
|
ref_logMessages.append(ref_split_log[i+1]) |
|
|
|
|
|
max_logentries = max(len(pred_logMessages), len(ref_logMessages)) |
|
|
|
pred_logMessages += (max_logentries - len(pred_logMessages)) * [" "] |
|
ref_logMessages += (max_logentries- len(ref_logMessages)) * [" "] |
|
|
|
linecontent_sacrebleu, linecontent_sacrebleu_withoutExplicitNumbers, linecontentlength_difference_SMAPE = self.getLineContentScore(pred_logMessages, ref_logMessages) |
|
|
|
timestamps_difference_SMAPE, timestamps_formatConsistency_absolute, timestamps_monotinicity_absolute, timestamps_delta_SMAPE = self.getTimestampsScore(pred_timestamps, ref_timestamps) |
|
|
|
|
|
|
|
return {"linecount_difference_SMAPE_score": linecount_difference_SMAPE, |
|
"linecontentlength_difference_SMAPE_score": linecontentlength_difference_SMAPE, |
|
"linecontent_sacrebleu_score": linecontent_sacrebleu, |
|
"linecontent_sacrebleu_withoutExplicitNumbers_score": linecontent_sacrebleu_withoutExplicitNumbers, |
|
"timestamps_SMAPE_difference_score": timestamps_difference_SMAPE, |
|
"timestamps_formatConsistency_score": timestamps_formatConsistency_absolute, |
|
"timestamps_monotinicity_score": timestamps_monotinicity_absolute, |
|
"timestamps_delta_SMAPE_score" : timestamps_delta_SMAPE |
|
} |
|
|
|
def _compute(self, predictions, references): |
|
"""Returns the scores""" |
|
|
|
|
|
|
|
t_before_logmetric = time.perf_counter() |
|
metric_dicts = [self.getLogMetric(p,r) for p,r in zip(predictions,references)] |
|
|
|
keys = metric_dicts[0].keys() |
|
|
|
|
|
values = np.array([list(d.values()) for d in metric_dicts]) |
|
|
|
|
|
mean_values = np.mean(values, axis=0) |
|
|
|
|
|
metric_result = dict(zip(keys, mean_values)) |
|
|
|
t_after_logmetric = time.perf_counter() |
|
logmetric_duration = f"{t_after_logmetric - t_before_logmetric:0.10f}" |
|
|
|
|
|
return metric_result |
|
|