# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""

import evaluate
import datasets
import re
import dateutil.parser
import numpy as np

import time


# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:module,
title = {A great new module},
authors={huggingface, Inc.},
year={2020}
}
"""

# TODO: Add description of the module here
_DESCRIPTION = """\
This new module is designed to solve this great ML task and is crafted with a lot of care.
"""


# TODO: Add description of the arguments of the module here
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores
Args:
    predictions: list of predictions to score. Each predictions
        should be a string with tokens separated by spaces.
    references: list of reference for each prediction. Each
        reference should be a string with tokens separated by spaces.
Returns:
    accuracy: description of the first score,
    another_score: description of the second score,
Examples:
    Examples should be written in doctest format, and should illustrate how
    to use the function.

    >>> my_new_module = evaluate.load("my_new_module")
    >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
    >>> print(results)
    {'accuracy': 1.0}
"""

# TODO: Define external resources urls if needed
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"


@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class LogMetric(evaluate.Metric):
    """TODO: Short description of my evaluation module."""

    # Constant regex to get timestrings
    timestamp_regex = r'^\s*(\d{4}[-/.]\d{2}[-/.]\d{2}(?:[ T]\d{2}[:]\d{2}(?:[:]\d{2}(?:[.,]\d+)?)?(?:Z|[+-]\d{2}[:]\d{2})?)?)\s*'
    timestamp_pattern = re.compile(timestamp_regex, re.MULTILINE)
    sacrebleu = evaluate.load("sacrebleu")
    

    def _info(self):
        # TODO: Specifies the evaluate.EvaluationModuleInfo object
        return evaluate.MetricInfo(
            # This is the description that will appear on the modules page.
            module_type="metric",
            description=_DESCRIPTION,
            citation=_CITATION,
            inputs_description=_KWARGS_DESCRIPTION,
            # This defines the format of each prediction and reference
            # Both prediction and reference are strings
            features=datasets.Features({
                "predictions": datasets.Value("string", id="sequence"),
                "references": datasets.Value("string", id="sequence"),
            }),
            # Homepage of the module for documentation
            homepage="http://module.homepage",
            # Additional links to the codebase or references
            codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
            reference_urls=["http://path.to.reference.url/new_module"]
        )

    def _download_and_prepare(self, dl_manager):
        """Optional: download external resources useful to compute the scores"""
        # TODO: Download external resources if needed
        pass

    def getLogMetric(self, pred : str, ref : str, sacrebleu):
        ref = ref.strip(' \t\n\r')
        pred = pred.strip(' \t\n\r')
        
        # Find all timestrings in the log
        # pred_timestrings = self.timestamp_pattern.findall(pred)
        pred_split_log = self.timestamp_pattern.split(pred)
        # ref_timestrings = self.timestamp_pattern.findall(ref)
        ref_split_log = self.timestamp_pattern.split(ref)

        # This should alwas hold (safety feature)
        # TODO: remove this after testing
        assert(len(pred_split_log) % 2 == len(ref_split_log) % 2 == 1)

        # One logentry always consists of timestamp + log-message
        pred_logentries = []
        ref_logentries = []

        # reorganize log into logentry-tuples, consisting of timestamp + log-message
        for i in range(1, len(pred_split_log), 2):
            pred_logentries.append((pred_split_log[i],pred_split_log[i+1]))
        
        for i in range(1, len(ref_split_log), 2):
            ref_logentries.append((ref_split_log[i],ref_split_log[i+1]))

        # The number of logentries of the reference/prediction which has more/less entries/timestamps
        max_logentries = max(len(pred_logentries), len(ref_logentries))
        min_logentries = min(len(pred_logentries), len(ref_logentries))
                

        # Case there are no timestamps in reference and none in prediction
        # we can compute bleu directly from original prediction (ref will be empty, but we offload this to the bleu metric)
        if (len(pred_logentries) == 0 and len(ref_logentries) == 0):
            # TODO: remove this later, for testing purposes only
            assert(pred == "")
            # any sensible log reference is empty if there is no timestamp, hence it suffices to check exact match
            logmsg_score = 100.0 if pred == ref else 0.0
            return 0.3 * 100.0 + 0.7 * logmsg_score
        
        # Case one has 0 timestamps, other has >0 timestamps
        if (len(pred_logentries) == 0 or len(ref_logentries) == 0):
            # It is nonsensical to compare something in this case
            return 0.0

 
        # replace all digits in the reference timestamp (first timestamp) with '/d' to get
        # a regex that describes the format  
        pred_timestring_pattern = re.sub(r'\d', r'\\d', re.escape(pred_logentries[0][0]))
        
        matchesPatternScore = 100.0
        monotonicallyIncreasingScore = 100.0
        
        # An array to save score per logentry
        logmessage_scores = []
        # TODO: Idea to penalize too long/ short logs-> add the amount of(max_len - min_len) between timestamps times score 0 at the end
        # A variable to save the previous timestamp (as datetime obj) to check monotonicity
        prev_datetime = None
        # Convert matches to datetime objects
        # TODO TODO TODO fix this:
        for i in range(min_logentries):
            ts, pred_lm = pred_logentries[i]
            _, ref_lm = ref_logentries[i]
            try:
                # Check if the format matches with the format of the first timestamp
                # TODO!! Check this later, maybe it is too restricting for training a llm
                matchesPattern = re.fullmatch(pred_timestring_pattern, ts) is not None
                # Check if the timestamps are monotonically increasing
                cur_datetime = dateutil.parser.parse(ts)
                monotonicallyIncreasing = True if prev_datetime == None else prev_datetime <= cur_datetime
                prev_datetime = cur_datetime

                # If one entry doesn't fulfill the matching pattern property or the monotinicity property, set to 0 for whole log
                if (not matchesPattern):
                    matchesPatternScore = 0.0
                if (not monotonicallyIncreasing):
                    monotonicallyIncreasingScore = 0.0

            except Exception as e:
                # e.g. date format not parsable by dateutil.parser
                matchesPatternScore = 0.0
                monotonicallyIncreasingScore = 0.0
            
            logmessage_scores.append(sacrebleu.compute(predictions=[pred_lm], references=[ref_lm])["score"])

        # TODO: remove later. Used only for testing purposes
        assert(len(logmessage_scores) == min_logentries)
        # we aggregate the bleu scores where we weight the difference in logentries with a score of 0
        logmessage_aggregated_score = ((min_logentries / max_logentries) * np.mean(logmessage_scores))
        # Correct amt of timestrings, monotonically increasing, consistent + (by dateutil.parser) parsable format
        return 0.2 * monotonicallyIncreasingScore + 0.1 * matchesPatternScore + 0.7 * logmessage_aggregated_score

    def _compute(self, predictions, references):
        """Returns the scores"""

        # TODO: get separate log entries (split before timestamps), replace timestamps with token and compare the log entry with BLEU

        t_before_logmetric = time.perf_counter()
        timestamp_score = np.mean([self.getLogMetric(p,r, self.sacrebleu) for p,r in zip(predictions,references)])
        t_after_logmetric = time.perf_counter()

        logmetric_duration = f" {t_after_logmetric - t_before_logmetric:0.10f}"

        return {
            "score": timestamp_score,
            "duration": logmetric_duration,
        }