# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import evaluate import datasets import re import dateutil.parser import numpy as np import time # TODO: Add BibTeX citation _CITATION = """\ @InProceedings{huggingface:module, title = {A great new module}, authors={huggingface, Inc.}, year={2020} } """ # TODO: Add description of the module here _DESCRIPTION = """\ This new module is designed to solve this great ML task and is crafted with a lot of care. """ # TODO: Add description of the arguments of the module here _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of predictions to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Returns: accuracy: description of the first score, another_score: description of the second score, Examples: Examples should be written in doctest format, and should illustrate how to use the function. >>> my_new_module = evaluate.load("my_new_module") >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) >>> print(results) {'accuracy': 1.0} """ # TODO: Define external resources urls if needed BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class LogMetric(evaluate.Metric): """TODO: Short description of my evaluation module.""" # Constant regex to get timestrings timestamp_regex = r'(^\d{4}[-/.]\d{2}[-/.]\d{2}(?:[ T]\d{2}[:]\d{2}(?:[:]\d{2}(?:[.,]\d+)?)?(?:Z|[+-]\d{2}[:]\d{2})?)?)' timestamp_pattern = re.compile(timestamp_regex, re.MULTILINE) def _info(self): # TODO: Specifies the evaluate.EvaluationModuleInfo object return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference # Both prediction and reference are strings features=datasets.Features({ "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), }), # Homepage of the module for documentation homepage="http://module.homepage", # Additional links to the codebase or references codebase_urls=["http://github.com/path/to/codebase/of/new_module"], reference_urls=["http://path.to.reference.url/new_module"] ) def _download_and_prepare(self, dl_manager): """Optional: download external resources useful to compute the scores""" # TODO: Download external resources if needed pass def getLogMetric(self, pred : str, ref : str): ref = ref.strip(' \t\n\r') pred = pred.strip(' \t\n\r') # Find all timestrings in the log pred_timestrings = self.timestamp_pattern.findall(pred) ref_timestrings = self.timestamp_pattern.findall(ref) #Check if there is the correct amount of timestrings in the prediction if(len(pred_timestrings) != len(ref_timestrings)): return 0.0 # If there are no timestrings, we must not check anything, we can directly return 1.0 if (len(pred_timestrings) == 0): return 1.0 # replace all digits in the reference timestamp (first timestamp) with '/d' to get # a regex that describes the format pred_timestring_pattern = re.sub(r'\d', r'\\d', re.escape(pred_timestrings[0])) # A variable to save the previous timestamp (as datetime obj) to check monotonicity prev_datetime = None # Convert matches to datetime objects for ts in pred_timestrings: try: # Check if the format matches with the format of the first timestamp matchesPattern = re.fullmatch(pred_timestring_pattern, ts) is not None # Check if the timestamps are monotonically increasing cur_datetime = dateutil.parser.parse(ts) monotonicallyIncreasing = True if prev_datetime == None else prev_datetime <= cur_datetime prev_datetime = cur_datetime if not (matchesPattern and monotonicallyIncreasing): # timestamps not consistent return 0.0 except Exception as e: # e.g. date format not parsable by dateutil.parser return 0.0 # Correct amt of timestrings, monotonically increasing, consistent + (by dateutil.parser) parsable format return 1.0 def _compute(self, predictions, references): """Returns the scores""" t_before_logmetric = time.perf_counter() timestamp_score = np.mean([self.getLogMetric(p,r) for p,r in zip(predictions,references)]) t_after_logmetric = time.perf_counter() logmetric_duration = f" {t_after_logmetric - t_before_logmetric:0.10f}" return { "score": timestamp_score, "duration": logmetric_duration, }