# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Add test cases, Remove tokenize_sentences flag since it can be determined from the input itself. """Sem-F1 metric""" import abc import sys from typing import List, Optional, Tuple, Union import datasets import evaluate import nltk import numpy as np from numpy.typing import NDArray from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity import torch from tqdm import tqdm from utils import is_list_of_strings_at_depth, Scores, slice_embeddings, flatten_list _CITATION = """\ @inproceedings{bansal-etal-2022-sem, title = "{SEM}-F1: an Automatic Way for Semantic Evaluation of Multi-Narrative Overlap Summaries at Scale", author = "Bansal, Naman and Akter, Mousumi and Karmaker Santu, Shubhra Kanti", editor = "Goldberg, Yoav and Kozareva, Zornitsa and Zhang, Yue", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.emnlp-main.49", doi = "10.18653/v1/2022.emnlp-main.49", pages = "780--792", abstract = "Recent work has introduced an important yet relatively under-explored NLP task called Semantic Overlap Summarization (SOS) that entails generating a summary from multiple alternative narratives which conveys the common information provided by those narratives. Previous work also published a benchmark dataset for this task by collecting 2,925 alternative narrative pairs from the web and manually annotating 411 different reference summaries by engaging human annotators. In this paper, we exclusively focus on the automated evaluation of the SOS task using the benchmark dataset. More specifically, we first use the popular ROUGE metric from text-summarization literature and conduct a systematic study to evaluate the SOS task. Our experiments discover that ROUGE is not suitable for this novel task and therefore, we propose a new sentence-level precision-recall style automated evaluation metric, called SEM-F1 (Semantic F1). It is inspired by the benefits of the sentence-wise annotation technique using overlap labels reported by the previous work. Our experiments show that the proposed SEM-F1 metric yields a higher correlation with human judgment and higher inter-rater agreement compared to the ROUGE metric.", } """ _DESCRIPTION = """\ Sem-F1 metric leverages the pre-trained contextual embeddings and evaluates the model generated semantic overlap summary with the reference overlap summary. It evaluates the semantic overlap summary at the sentence level and computes precision, recall and F1 scores. """ _KWARGS_DESCRIPTION = """ Sem-F1 compares the system generated overlap summary with ground truth reference overlap. Args: predictions: list - List of predictions (Details below) references: list - List of references (Details below) reference should be a string with tokens separated by spaces. model_type: str - Model to use. [pv1, stsb, use] Options: pv1 - paraphrase-distilroberta-base-v1 (Default) stsb - stsb-roberta-large use - Universal Sentence Encoder tokenize_sentences: bool - Sentence tokenize the input document (prediction/reference). Default: True. gpu: Union[bool, int] - Whether to use GPU or CPU. Options: False - CPU (Default) True - GPU, device 0 n: int - GPU, device n batch_size: int - Batch Size, Default = 32. Returns: precision: Precision. recall: Recall. f1: F1 score. There are 4 possible cases for inputs corresponding to predictions and references arguments Case 1: Multi-Ref = False, tokenize_sentences = False predictions: List[List[str]] - List of predictions where each prediction is a list of sentences. references: List[List[str]] - List of references where each reference is a list of sentences. Case 2: Multi-Ref = False, tokenize_sentences = True predictions: List[str] - List of predictions where each prediction is a document references: List[str] - List of references where each reference is a document Case 3: Multi-Ref = True, tokenize_sentences = False predictions: List[List[str]] - List of predictions where each prediction is a list of sentences. references: List[List[List[str]]] - List of multi-references i.e. [[r11, r12, ...], [r21, r22, ...], ...] where each rij is further a list of sentences Case 4: Multi-Ref = True, tokenize_sentences = True predictions: List[str] - List of predictions where each prediction is a document references: List[List[str]] - List of multi-references i.e. [[r11, r12, ...], [r21, r22, ...], ...] where each rij is a document This can be seen in the form of truth table as follows: Case | Multi-Ref | tokenize_sentences | predictions | references 1 | 0 | 0 | List[List[str]] | List[List[str]] 2 | 0 | 1 | List[str] | List[str] 3 | 1 | 0 | List[List[str]] | List[List[List[str]]] 4 | 1 | 1 | List[str] | List[List[str]] It is automatically determined whether it is Multi-Ref case Single-Ref case. Examples: >>> import evaluate >>> predictions = [ ["I go to School.", "You are stupid."], ["I love adventure sports."], ] >>> references = [ ["I go to School.", "You are stupid."], ["I love adventure sports."], ] >>> metric = evaluate.load("semf1") >>> results = metric.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["f1"]]) [0.77, 0.56] """ _PREDICTION_TYPE = Union[List[str], List[List[str]]] _REFERENCE_TYPE = Union[List[str], List[List[str]], List[List[List[str]]]] class Encoder(metaclass=abc.ABCMeta): @abc.abstractmethod def encode(self, prediction: List[str]) -> NDArray: pass class USE(Encoder): def __init__(self): pass def encode(self, prediction: List[str]) -> NDArray: pass class SBertEncoder(Encoder): def __init__(self, model_name: str, device: Union[str, int], batch_size: int): self.model = SentenceTransformer(model_name) self.device = device self.batch_size = batch_size def encode(self, prediction: List[str]) -> NDArray: """Returns sentence embeddings of dim: Batch x Dim""" # SBert output is always Batch x Dim return self.model.encode(prediction, device=self.device, batch_size=self.batch_size) def _get_encoder(model_name: str, device: Union[str, int], batch_size: int) -> Encoder: if model_name == "use": return SBertEncoder(model_name, device, batch_size) # return USE() # TODO: This will change depending on PyTorch USE VS TF USE model else: return SBertEncoder(model_name, device, batch_size) def _compute_cosine_similarity(pred_embeds: NDArray, ref_embeds: NDArray) -> Tuple[float, float]: cosine_scores = cosine_similarity(pred_embeds, ref_embeds) precision_per_sentence_sim = np.max(cosine_scores, axis=-1) recall_per_sentence_sim = np.max(cosine_scores, axis=0) return np.mean(precision_per_sentence_sim).item(), np.mean(recall_per_sentence_sim).item() def _get_gpu(gpu: Union[bool, int]) -> Union[str, int]: # Ensure gpu index is within the range of total available gpus gpu_available = torch.cuda.is_available() if gpu_available: gpu_count = torch.cuda.device_count() if isinstance(gpu, int) and gpu >= gpu_count: raise ValueError( f"There are {gpu_count} gpus available. Provide the correct gpu index. You provided: {gpu}" ) # get the device if gpu is False: device = "cpu" elif gpu is True and gpu_available: device = 0 # by default run on device 0 elif isinstance(gpu, int): device = gpu else: # This will never happen raise ValueError(f"gpu must be bool or int. Provided value: {gpu}") return device def _validate_input_format( tokenize_sentences: bool, multi_references: bool, predictions: _PREDICTION_TYPE, references: _REFERENCE_TYPE, ): if tokenize_sentences and multi_references: condition = is_list_of_strings_at_depth(predictions, 1) and is_list_of_strings_at_depth(references, 2) elif not tokenize_sentences and multi_references: condition = is_list_of_strings_at_depth(predictions, 2) and is_list_of_strings_at_depth(references, 3) elif tokenize_sentences and not multi_references: condition = is_list_of_strings_at_depth(predictions, 1) and is_list_of_strings_at_depth(references, 1) else: condition = is_list_of_strings_at_depth(predictions, 2) and is_list_of_strings_at_depth(references, 2) if not condition: raise ValueError("Predictions are references are not valid input format. Refer to documentation.") @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class SemF1(evaluate.Metric): _MODEL_TYPE_TO_NAME = { "pv1": "paraphrase-distilroberta-base-v1", "stsb": "stsb-roberta-large", "use": "sentence-transformers/use-cmlm-multilingual", # TODO: check PyTorch USE VS TF USE } def _info(self): return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=[ # Multi References: False, Tokenize_Sentences = False datasets.Features( { # predictions: List[List[str]] - List of predictions where prediction is a list of sentences "predictions": datasets.Sequence(datasets.Value("string", id="sequence"), id="predictions"), # references: List[List[str]] - List of references where each reference is a list of sentences "references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), } ), # Multi References: False, Tokenize_Sentences = True datasets.Features( { # predictions: List[str] - List of predictions "predictions": datasets.Value("string", id="sequence"), # references: List[str] - List of documents "references": datasets.Value("string", id="sequence"), } ), # Multi References: True, Tokenize_Sentences = False datasets.Features( { # predictions: List[List[str]] - List of predictions where prediction is a list of sentences "predictions": datasets.Sequence(datasets.Value("string", id="sequence"), id="predictions"), # references: List[List[List[str]]] - List of multi-references. # So each "reference" is also a list (r1, r2, ...). # Further, each ri's are also list of sentences. "references": datasets.Sequence( datasets.Sequence(datasets.Value("string", id="sequence"), id="ref"), id="references"), } ), # Multi References: True, Tokenize_Sentences = True datasets.Features( { # predictions: List[str] - List of predictions "predictions": datasets.Value("string", id="sequence"), # references: List[List[List[str]]] - List of multi-references. # So each "reference" is also a list (r1, r2, ...). "references": datasets.Sequence(datasets.Value("string", id="ref"), id="references"), } ), ], # # Homepage of the module for documentation # Additional links to the codebase or references reference_urls=["https://aclanthology.org/2022.emnlp-main.49/"] ) def _get_model_name(self, model_type: Optional[str] = None) -> str: if model_type is None: model_type = "pv1" # TODO: Change it to use if model_type not in self._MODEL_TYPE_TO_NAME.keys(): raise ValueError(f"Provide a correct model_type.\n" f"Options: {self._MODEL_TYPE_TO_NAME.keys()}\n" f"Currently provided: {model_type}") return self._MODEL_TYPE_TO_NAME[model_type] def _download_and_prepare(self, dl_manager): """Optional: download external resources useful to compute the scores""" import nltk nltk.download("punkt", quiet=True) # if not nltk.data.find("tokenizers/punkt"): # TODO: check why it is not working # pass def _compute( self, predictions, references, model_type: Optional[str] = None, tokenize_sentences: bool = True, multi_references: bool = False, gpu: Union[bool, int] = False, batch_size: int = 32, ) -> List[Scores]: """ Compute precision, recall, and F1 scores for given predictions and references. :param predictions :param references :param model_type: Type of model to use for encoding. :param tokenize_sentences: Flag to sentence tokenize the document. :param multi_references: Flag to indicate multiple references. :param gpu: GPU device to use. :param batch_size: Batch size for encoding. :return: List of Scores dataclass with precision, recall, and F1 scores. """ # Validate inputs corresponding to flags _validate_input_format(tokenize_sentences, multi_references, predictions, references) # Get GPU device = _get_gpu(gpu) # Get the encoder model model_name = self._get_model_name(model_type) encoder = _get_encoder(model_name, device=device, batch_size=batch_size) # We'll handle the single reference and multi-reference case same way. So change the data format accordingly if not multi_references: references = [[ref] for ref in references] # Tokenize sentences if required if tokenize_sentences: predictions = [nltk.tokenize.sent_tokenize(pred) for pred in predictions] references = [[nltk.tokenize.sent_tokenize(ref) for ref in refs] for refs in references] # Flatten the data for batch processing all_sentences = flatten_list(predictions) + flatten_list(references) # Get num of sentences to get the corresponding embeddings prediction_sentences_count = [len(pred) for pred in predictions] reference_sentences_count = [[len(ref) for ref in refs] for refs in references] # Note: This is the most optimal way of doing it # Encode all sentences in one go embeddings = encoder.encode(all_sentences) # Get embeddings corresponding to predictions and references pred_embeddings = slice_embeddings(embeddings, prediction_sentences_count) ref_embeddings = slice_embeddings(embeddings[sum(prediction_sentences_count):], reference_sentences_count) # Init output scores results = [] # Compute scores for preds, refs in zip(pred_embeddings, ref_embeddings): # Precision: Concatenate all the sentences in all the references concat_refs = np.concatenate(refs, axis=0) precision, _ = _compute_cosine_similarity(preds, concat_refs) # Recall: Compute individually for each reference recall_scores = [_compute_cosine_similarity(r_embeds, preds) for r_embeds in refs] recall_scores = [r_scores for (r_scores, _) in recall_scores] results.append(Scores(precision, recall_scores)) return results