File size: 14,833 Bytes
02fde7e 422e946 9ad5350 a23e88d 02fde7e f81df75 02fde7e e9f8d41 02fde7e 422e946 b7d851e 3484514 81740ff b8bf7c3 81740ff b8bf7c3 81740ff a23e88d 422e946 02fde7e d0d47f8 02fde7e 422e946 02fde7e 0b41e7e f07bac5 0b41e7e 009c9e0 0b41e7e 37ee0fe 0b41e7e 37ee0fe 0b41e7e 37ee0fe 0b41e7e 37ee0fe aa85c6a 37ee0fe 0b41e7e 37ee0fe 279e629 37ee0fe 781f224 37ee0fe 279e629 37ee0fe 81740ff b8bf7c3 81740ff 37ee0fe 23b4a0b e0b17bb 23b4a0b 422e946 37ee0fe e25b5dd 81740ff e25b5dd 81740ff 37ee0fe 4843304 a50fad5 37ee0fe a50fad5 e25b5dd 422e946 37ee0fe 422e946 37ee0fe a50fad5 e25b5dd 422e946 cfb39ec 37ee0fe 422e946 e25b5dd 422e946 a50fad5 422e946 e25b5dd 796bd91 422e946 e25b5dd a50fad5 37ee0fe a50fad5 37ee0fe a50fad5 37ee0fe 59248ff 37ee0fe b705ef3 b6570bd 81740ff 37ee0fe a50fad5 37ee0fe e25b5dd 796bd91 37ee0fe 81740ff 37ee0fe a50fad5 37ee0fe 422e946 838a5f0 02fde7e 422e946 e25b5dd ac839ac 37ee0fe ac839ac 37ee0fe aa85c6a f81df75 37ee0fe 5832d50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import evaluate
import datasets
import re
import dateutil.parser
import numpy as np
from difflib import SequenceMatcher
import sacrebleu
import time
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""
# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""
# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of predictions to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Returns:
accuracy: description of the first score,
another_score: description of the second score,
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> my_new_module = evaluate.load("my_new_module")
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
>>> print(results)
{'accuracy': 1.0}
"""
# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class LogMetric(evaluate.Metric):
"""TODO: Short description of my evaluation module."""
# Constant regex to get timestrings
timestamp_regex = r'^\s*\[?\s*(\d{4}[-/.]\d{2}[-/.]\d{2}(?:[ T]\d{2}[:]\d{2}(?:[:]\d{2}(?:[.,]\d+)?)?(?:Z|[+-]\d{2}[:]\d{2})?)?)\s*\]?\s*'
timestamp_pattern = re.compile(timestamp_regex, re.MULTILINE)
int_regex = r'(-?\d+)'
int_pattern = re.compile(int_regex)
float_regex = r'(-?\d+\.\d+)'
float_pattern = re.compile(float_regex)
sacrebleu_metric = evaluate.load("evaluate-metric/sacrebleu")
def _info(self):
# TODO: Specifies the evaluate.EvaluationModuleInfo object
return evaluate.MetricInfo(
# This is the description that will appear on the modules page.
module_type="metric",
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
# This defines the format of each prediction and reference
# Both prediction and reference are strings
features=datasets.Features({
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}),
# Homepage of the module for documentation
homepage="http://module.homepage",
# Additional links to the codebase or references
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
reference_urls=["http://path.to.reference.url/new_module"]
)
def _download_and_prepare(self, dl_manager):
"""Optional: download external resources useful to compute the scores"""
# TODO: Download external resources if needed
pass
# Jaccard Similarity to measure closeness of two log-messages
def get_jaccard_similarity(self, set1, set2):
intersection = set1.intersection(set2)
union = set1.union(set2)
if (len(union) == 0):
return 1.0
return len(intersection) / len(union)
# A score depending on the difference in length of two sentences
def get_length_score(self, preds_split, refs_split):
pred_content_lengths = np.vectorize(len)(preds_split)
ref_content_lengths = np.vectorize(len)(refs_split)
return self.smapeScore(pred_content_lengths, ref_content_lengths)
# helper function that computes the smape_score either between two numbers or two lists of numbers (must be the same length)
def smapeScore(self, P, R):
P_isnumber = isinstance(P, (int, float))
R_isnumber = isinstance(R, (int, float))
# either both must be numbers or both must be no number
assert P_isnumber == R_isnumber
if not P_isnumber:
assert(len(P) == len(R))
if P_isnumber and R_isnumber:
if P == 0 and R == 0: return 1.0 # since this leads to (|R| + |P|) = 0
return 1 - (np.sum(np.abs(R - P) / (np.abs(R) + np.abs(P)))) # (n = 1)
else:
if len(P) == 0 and len(R) == 0: return 1.0 # since this leads to n = 0
n = len(P)
P = np.array(P)
R = np.array(R)
denominator = np.abs(R) + np.abs(P)
# Replace zeros in the denominator with 1 to avoid division by zero.
# the denominator[i] = 0 is only possible if R[i] == P[i] == 0, hence we can set denominator[i] = 1 and still achieve the result of 0 after division at index i
denominator[denominator == 0] = 1
return 1 - (1.0/n * np.sum(np.abs(R - P) / denominator))
# splits both strings at \n and then computes the smape_score of their lengths
def getLineCountScore(self, pred, ref):
pred_lines_amt = len(pred.splitlines())
ref_lines_amt = len(ref.splitlines())
# print("#pred_lines:", pred_lines_amt)
# print("#ref_lines:", ref_lines_amt)
return self.smapeScore(pred_lines_amt, ref_lines_amt)
def replaceNumbers(self, text:str):
text = self.int_pattern.sub(r'<|INT|>', text)
text = self.float_pattern.sub(r'<|FLOAT|>', text)
return text
# Get differenct scores regarding the content of a log-message
def getLineContentScore(self, pred_logMessages, ref_logMessages):
if pred_logMessages == [] and ref_logMessages == []:
pred_logMessages = [""]
ref_logMessages = [""]
sacrebleu_score = self.sacrebleu_metric.compute(predictions=pred_logMessages, references=ref_logMessages)["score"] / 100.0
smape_length_score = self.get_length_score(pred_logMessages, ref_logMessages)
vectorized_replaceNumbers = np.vectorize(self.replaceNumbers)
cleaned_pred_logMessages = vectorized_replaceNumbers(pred_logMessages)
cleaned_ref_logMessages = vectorized_replaceNumbers(ref_logMessages)
sacrebleu_withoutExplicitNumbers_score = self.sacrebleu_metric.compute(predictions=cleaned_pred_logMessages, references=cleaned_ref_logMessages)["score"] / 100.0
return sacrebleu_score, sacrebleu_withoutExplicitNumbers_score, smape_length_score
# get different scores regarding the timestamp
def getTimestampsScore(self, pred_timestamps, ref_timestamps):
timestamp_amt_score = self.smapeScore(len(pred_timestamps), len(ref_timestamps))
if (len(pred_timestamps) == 0) and (len(ref_timestamps) == 0):
return timestamp_amt_score, 1.0, 1.0, 1.0
# if there are no predicted timestamps, return early. It is still consistent and monotonic.
if (len(pred_timestamps) == 0) and (len(ref_timestamps) != 0):
return timestamp_amt_score, 1.0, 1.0, 0.0
# replace all digits in the reference timestamp (first timestamp) with '/d' to get
# a regex that describes the format
pred_timestring_pattern = re.sub(r'\d', r'\\d', re.escape(pred_timestamps[0]))
matchesPatternScore = 1.0
monotonicallyIncreasingScore = 1.0
pred_timedeltas = []
# A variable to save the previous timestamp (as datetime obj) to check monotonicity
prev_datetime = None
# Convert matches to datetime objects
for i in range(len(pred_timestamps)):
ts = pred_timestamps[i]
try:
# Check if the format matches with the format of the first timestamp
# TODO!! Check this later, maybe it is too restricting for training a llm
matchesPattern = re.fullmatch(pred_timestring_pattern, ts) is not None
# Check if the timestamps are monotonically increasing
cur_datetime = dateutil.parser.parse(ts)
if prev_datetime == None:
monotonicallyIncreasing = True
else:
monotonicallyIncreasing = prev_datetime <= cur_datetime
pred_timedeltas.append((cur_datetime - prev_datetime).total_seconds())
prev_datetime = cur_datetime
# If one entry doesn't fulfill the matching pattern property or the monotinicity property, set to 0 for whole log
matchesPatternScore = 0.0 if (not matchesPattern) else matchesPatternScore
monotonicallyIncreasingScore = 0.0 if (not monotonicallyIncreasing) else monotonicallyIncreasingScore
except Exception as e:
# e.g. date format not parsable by dateutil.parser
matchesPatternScore = 0.0
monotonicallyIncreasingScore = 0.0
pred_timedeltas.append(-1)
if (len(pred_timestamps) != 0) and (len(ref_timestamps) == 0):
return timestamp_amt_score, matchesPatternScore, monotonicallyIncreasingScore, 0.0
ref_timedeltas = []
prev_datetime = None
for i in range(len(ref_timestamps)):
ts = ref_timestamps[i]
try:
cur_datetime = dateutil.parser.parse(ts)
if prev_datetime == None:
pass
else:
ref_timedeltas.append((cur_datetime - prev_datetime).total_seconds())
prev_datetime = cur_datetime
except Exception as e:
ref_timedeltas.append(-1)
minlength = min(len(pred_timedeltas), len(ref_timedeltas))
pred_timedeltas = pred_timedeltas[:minlength]
ref_timedeltas = ref_timedeltas[:minlength]
print("pred_timedeltas:", pred_timedeltas)
print("ref_timedeltas:", ref_timedeltas)
timestampDeltaScore = self.smapeScore(pred_timedeltas, ref_timedeltas)
print("timestampDeltaScore:", timestampDeltaScore)
# matchesPatternScore and monotonicallyIncreasingScore are in {0,1}
return timestamp_amt_score, matchesPatternScore, monotonicallyIncreasingScore, timestampDeltaScore
def getLogMetric(self, pred : str, ref : str):
ref = ref.strip(' \t\n\r')
pred = pred.strip(' \t\n\r')
linecount_difference_SMAPE = self.getLineCountScore(pred, ref)
# Split log on timestamps
pred_split_log = self.timestamp_pattern.split(pred)
ref_split_log = self.timestamp_pattern.split(ref)
# One logentry always consists of timestamp + log-message
# pred_logentries = []
# ref_logentries = []
pred_timestamps = []
pred_logMessages = []
ref_timestamps = []
ref_logMessages = []
# reorganize log into logentry-tuples, consisting of timestamp + log-message
for i in range(1, len(pred_split_log), 2):
# pred_logentries.append((pred_split_log[i],pred_split_log[i+1]))
pred_timestamps.append(pred_split_log[i])
pred_logMessages.append(pred_split_log[i+1])
for i in range(1, len(ref_split_log), 2):
# ref_logentries.append((ref_split_log[i],ref_split_log[i+1]))
ref_timestamps.append(ref_split_log[i])
ref_logMessages.append(ref_split_log[i+1])
# We extend the shorter list to the length of the longer one
max_logentries = max(len(pred_logMessages), len(ref_logMessages))
pred_logMessages += (max_logentries - len(pred_logMessages)) * [" "]
ref_logMessages += (max_logentries- len(ref_logMessages)) * [" "]
linecontent_sacrebleu, linecontent_sacrebleu_withoutExplicitNumbers, linecontentlength_difference_SMAPE = self.getLineContentScore(pred_logMessages, ref_logMessages)
timestamps_difference_SMAPE, timestamps_formatConsistency_absolute, timestamps_monotinicity_absolute, timestamps_delta_SMAPE = self.getTimestampsScore(pred_timestamps, ref_timestamps)
# return weighted overall score of all the different scores
return {"linecount_difference_SMAPE_score": linecount_difference_SMAPE,
"linecontentlength_difference_SMAPE_score": linecontentlength_difference_SMAPE,
"linecontent_sacrebleu_score": linecontent_sacrebleu,
"linecontent_sacrebleu_withoutExplicitNumbers_score": linecontent_sacrebleu_withoutExplicitNumbers,
"timestamps_SMAPE_difference_score": timestamps_difference_SMAPE,
"timestamps_formatConsistency_score": timestamps_formatConsistency_absolute,
"timestamps_monotinicity_score": timestamps_monotinicity_absolute,
"timestamps_delta_SMAPE_score" : timestamps_delta_SMAPE
}
def _compute(self, predictions, references):
"""Returns the scores"""
# TODO: get separate log entries (split before timestamps), replace timestamps with token and compare the log entry with BLEU
t_before_logmetric = time.perf_counter()
metric_dicts = [self.getLogMetric(p,r) for p,r in zip(predictions,references)]
# Extract keys (assuming all dictionaries have the same keys)
keys = metric_dicts[0].keys()
# Convert list of dictionaries into a 2D numpy array
values = np.array([list(d.values()) for d in metric_dicts])
# Calculate the mean along the vertical axis (axis=0)
mean_values = np.mean(values, axis=0)
# a dictionary, matching the keys with their corresponding mean values
metric_result = dict(zip(keys, mean_values))
t_after_logmetric = time.perf_counter()
logmetric_duration = f"{t_after_logmetric - t_before_logmetric:0.10f}"
return metric_result
|