|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T12:45:19.315677Z" |
|
}, |
|
"title": "XplaiNLI: Explainable Natural Language Inference through Visual Analytics", |
|
"authors": [ |
|
{ |
|
"first": "Aikaterini-Lida", |
|
"middle": [], |
|
"last": "Kalouli", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rita", |
|
"middle": [], |
|
"last": "Sevastjanova", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Konstanz", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Valeria", |
|
"middle": [], |
|
"last": "De Paiva", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Topos Institute", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mennatallah", |
|
"middle": [], |
|
"last": "El-Assady", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Konstanz", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Advances in Natural Language Inference (NLI) have helped us understand what state-of-the-art models really learn and what their generalization power is. Recent research has revealed some heuristics and biases of these models. However, to date, there is no systematic effort to capitalize on those insights through a system that uses these to explain the NLI decisions. To this end, we propose XplaiNLI, an eXplainable, interactive, visualization interface that computes NLI with different methods and provides explanations for the decisions made by the different approaches.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Advances in Natural Language Inference (NLI) have helped us understand what state-of-the-art models really learn and what their generalization power is. Recent research has revealed some heuristics and biases of these models. However, to date, there is no systematic effort to capitalize on those insights through a system that uses these to explain the NLI decisions. To this end, we propose XplaiNLI, an eXplainable, interactive, visualization interface that computes NLI with different methods and provides explanations for the decisions made by the different approaches.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "We present XplaiNLI, an interactive visualization, web-based interface that computes Natural Language Inference (NLI) with three different approaches and provides sketches of explanations for the decision made by each approach. 1 An overview of XplaiNLI is found in Figure 1 . The user on the frontend (right) inputs a premise (P) and a hypothesis (H). The pair is passed to the backend (left) where it goes through a symbolic and a deep learning (DL) component, which compute an inference label each. Each component also determines the rules and features that lead to the decision: for the symbolic one, we use Natural Logic (Valencia, 1991) inference rules to explain the inference label, while for the DL approach, we use insights gained from relevant work (Naik et al., 2018; Gururangan et al., 2018; Dasgupta et al., 2018; McCoy et al., 2019) to account for the decision. The complete output enters the hybrid component, which combines the strengths of the symbolic NLI engine and the DL model and determines which approach's label should be trusted based on semantic characteristics of the sentences. All output is forwarded to the frontend, where an intuitive visualization encodes the inference labels of the three approaches as well the corresponding explanations. The user can interact further with the interface by adding her own heuristics and by providing feedback on the inference label, which is used for improving the separate components.", |
|
"cite_spans": [ |
|
{ |
|
"start": 228, |
|
"end": 229, |
|
"text": "1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 642, |
|
"text": "(Valencia, 1991)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 779, |
|
"text": "(Naik et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 780, |
|
"end": 804, |
|
"text": "Gururangan et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 805, |
|
"end": 827, |
|
"text": "Dasgupta et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 847, |
|
"text": "McCoy et al., 2019)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 266, |
|
"end": 274, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Work on interpretability for NLI is still at an early stage. One strand of research explains the models by \"stress-testing\" them and revealing the phenomena that the models cannot handle or by detecting bias in the training data (Gururangan et al., 2018; Dasgupta et al., 2018; McCoy et al., 2019, inter alia) . Another strand of research has approached the task by directly learning natural language explanations along with the inference decision (Camburu et al., 2018) or creating distributional representations of syntactic and semantic inference rules (Zanzotto and Ferrone, 2017) and training machine-learning models on them. Although all these approaches shed light on the processes behind the reasoning task, the insights gained have not yet been used in their full potential; XplaiNLI seeks to fill this gap.", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 254, |
|
"text": "(Gururangan et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 255, |
|
"end": 277, |
|
"text": "Dasgupta et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 309, |
|
"text": "McCoy et al., 2019, inter alia)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 448, |
|
"end": 470, |
|
"text": "(Camburu et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 556, |
|
"end": 584, |
|
"text": "(Zanzotto and Ferrone, 2017)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The backend outputs the inference relation for a given pair, as well as the features that lead to that decision, based on each of the following three approaches. The exact backend implementation and the performance Figure 1 : The high-level architecture of XplaiNLI: on the left, the three NLI approaches providing an inference label and explainable features, and on the right, the interactive, explainable, visual frontend. of each of the approaches is detailed in Kalouli et al. (2020) ; this paper focuses on explainability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 487, |
|
"text": "Kalouli et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 215, |
|
"end": 223, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "XplaiNLI Backend Model", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For the DL component we use BERT-base (Devlin et al., 2018) , one of the state-of-the-art models for NLI, which we fine-tune for our task. For fine-tuning, we use the SemEval 2014 version of SICK (Marelli et al., 2014) . We utilize a corrected version of the corpus 2 to mitigate some of the shortcomings of the original corpus, e.g., event and entity coreference issues. We do not fine-tune on other commonly-used benchmarks, such as MNLI (Williams et al., 2017) , as these corpora suffer from similar problems. For fine-tuning, we use the HuggingFace implementation 3 and we fine-tune the parameters suggested by the authors: batch size, learning rate and number of epochs. Our best performing model uses a batch size of 32, learning rate of 2e-5 and 3 epochs. The trained model classifies an input pair into E(ntailment), C(ontradiction) or N(eutral).", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 59, |
|
"text": "(Devlin et al., 2018)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 218, |
|
"text": "(Marelli et al., 2014)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 440, |
|
"end": 463, |
|
"text": "(Williams et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Deep Learning Component", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To provide potential explanations for the model's decision, we implement the findings of Naik et al. (2018) , Gururangan et al. (2018), Dasgupta et al. (2018) and McCoy et al. (2019) . Their work has revealed specific heuristics and artifacts that arguably appear in the training sets of these models and can thus explain to some extent the way the models label a pair. Particularly, we implement four kinds of heuristics/explanations. First, the presence of negation. As observed by Naik et al. (2018) , Dasgupta et al. (2018) and McCoy et al. (2019) , negation words such as no, not, don't, nobody, etc. make the model predict C, consistent with the heuristic found in the SNLI training set. Second, we follow Dasgupta et al. (2018) , Naik et al. (2018) and McCoy et al. (2019) and compute the lexical overlap of the two sentences. It is argued that whenever H is completely contained in P, the models tend to predict E, no matter the word order or other constraints. The third heuristic of sentence length is similar (Naik et al., 2018; Gururangan et al., 2018) : Hs that are much longer than their Ps tend to be neutral, while Hs that are shorter than their Ps tend to be entailed. Last, we add relation-specific word heuristics. According to the findings of Gururangan et al. (2018), specific words being present in H or/and P are characteristic for a specific inference relation. So, generic words like animal, instrument, outdoors are mostly found in the Hs of entailments, while modifiers and superlatives like sad, tall, best, first are mostly found in neutral pairs.", |
|
"cite_spans": [ |
|
{ |
|
"start": 89, |
|
"end": 107, |
|
"text": "Naik et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 158, |
|
"text": "Dasgupta et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 182, |
|
"text": "McCoy et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 502, |
|
"text": "Naik et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 505, |
|
"end": 527, |
|
"text": "Dasgupta et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 532, |
|
"end": 551, |
|
"text": "McCoy et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 712, |
|
"end": 734, |
|
"text": "Dasgupta et al. (2018)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 755, |
|
"text": "Naik et al. (2018)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 779, |
|
"text": "McCoy et al. (2019)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1039, |
|
"text": "(Naik et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 1040, |
|
"end": 1064, |
|
"text": "Gururangan et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Deep Learning Component", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The symbolic component implements a version of Natural Logic (NL) (Valencia, 1991) . NL attempts to explain inferences through monotonicity, i.e., by whether the concepts expressed in a sentence can become \"more general\" or \"more specific\" salva veritate. For example, in the sentence a woman is walking, woman can be replaced by the more general person while preserving truth. The symbolic component is based on an improved version of the Graphical Knowledge Representation (GKR) by Kalouli and Crouch (2018) -GKR allows for the kind of inference mechanism we require. In the first stage of the process, P and H are parsed to their GKR representations, each producing six default GKR graphs: a dependency graph, a conceptual graph, a contextual graph, a lexical graph, a properties graph and a coreference graph. In the next stage, the lexical graphs, which contain, for each content word, the WordNet (Fellbaum, 1998) senses, synonyms, antonyms, hypernyms, hyponyms and the SUMO (Niles and Pease, 2001 ) concepts, superconcepts and subconcepts are used to determine matches between H and P and their specificity. For example, person in H can be matched to woman in P and be assigned the specificity superclass: person is a hypernym of woman. One of the four specificity markers (equal, subclass, superclass, disjoint) can be assigned. In the next stage, the determined specificities are updated based on the predicate-argument structure of each sentence, captured in the concept graph. For instance, woman is a subclass of person but it is not a subclass of tall person (not all women are tall). For the two terms of a match, the system considers if both, none or only one of them have dependents (modifiers/arguments) in their respective concept graph. Based on that, different update rules apply. For example, if person in H has additional dependents such as tall but woman in P does not, then the match becomes more specific: since H (person) was already more general than P (woman) (specificity superclass), then making this match more specific leads to the specificity becoming undetermined (none). After updating all H-P matches, the exact inference relation is determined based on the GKR context graphs, the instantiabilities they contain and the specificities of the matches. For example, if the H-term is instantiated and more or equally specific than the uninstantiated P-term (a womanno woman), there is a contradiction. If the H-term is instantiated and more general (a personno woman) than the P-term, we cannot determine the relation. Similarly for entailments: if the match is equally or more specific and both terms are instantiated, there is an entailment (a woman -a woman. See Kalouli et al. (2020) for more details on the symbolic engine.", |
|
"cite_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 82, |
|
"text": "(Valencia, 1991)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 509, |
|
"text": "Kalouli and Crouch (2018)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 903, |
|
"end": 919, |
|
"text": "(Fellbaum, 1998)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 981, |
|
"end": 1003, |
|
"text": "(Niles and Pease, 2001", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 2699, |
|
"end": 2720, |
|
"text": "Kalouli et al. (2020)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Symbolic Component", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "These rules, i.e. the exact combinations of specificity relations and contexts, can be used straightforwardly to explain the decision made by the symbolic component.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Symbolic Component", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The hybrid approach is based on the fact that distributional features are suitable for dealing with conceptual aspects of the meanings of words, phrases, and sentences, but struggle with Boolean and contextual phenomena like modals, quantifiers, negation, implicatives, propositional attitudes, conditionals, etc. (Dasgupta et al., 2018; Naik et al., 2018; McCoy et al., 2019 , to name only a few). These are phenomena to which more symbolic/structural approaches are well suited. Thus, we expect that \"easy\" cases which do not involve such phenomena will be best handled by the DL approach, while hard linguistic phenomena like the ones mentioned will be best handled by the symbolic approach. Thus, the hybrid component determines whether to use the symbolic or the DL label as its own inference label, based on specific semantic characteristics of the pair.", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 337, |
|
"text": "(Dasgupta et al., 2018;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 356, |
|
"text": "Naik et al., 2018;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 375, |
|
"text": "McCoy et al., 2019", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Hybrid Component", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "During training, the hybrid classifier learns for each pair which of the components delivers the right label (again based on the SICK-train corpus): the symbolic one (S), the DL one (DL) or both of them (B). 4 With this, the classifier indirectly learns whether the pair is \"easy\" or hard: if S is right, the pair is probably hard; if DL is right, the pair is probably easier; if both are right, we cannot make any claims about the nature of the pair. The learning is based on the implemented rules of the symbolic component (cf. Section 3.2), which are converted to features, e.g., the pair P: The woman is walking. H: The person is not walking would be assigned the features veridical, antiveridical, superclass because the match person-woman has the superclass specificity and the highest match walk-walk is instantiated in P and uninstantiated in H. These features (rules) capture the effects of hard linguistic phenomena like modals, negation, quantifiers, implicatives, factives, etc. To target explainability and as decision trees have been shown to be one of the most interpretable models (Guidotti et al., 2018) , we train a Random Forest classifier (Gini impurity) with 30 estimators: 5 each pair is classified as one of S, DL or B, and then mapped to the respective label: if classified as S or DL, the symbolic or the DL inference label are used, respectively; if classified as B, then either one of S or DL can be chosen but we use the DL label for higher robustness.", |
|
"cite_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 209, |
|
"text": "4", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1097, |
|
"end": 1120, |
|
"text": "(Guidotti et al., 2018)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Hybrid Component", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The features used for prediction are also used for explainability purposes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Hybrid Component", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The user interface (Figure 1, right) features three main components, all emphasizing the role of the human-in-the-loop. Two text fields (for P and H) allow users to insert the inference pair to be computed.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 19, |
|
"end": 36, |
|
"text": "(Figure 1, right)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Explainable Visual Interface", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Visualizing Explanations With the submission of the input pair, the system on the backend computes one inference label for each approach as well as explanations for each label. The results are visualized with an intuitive visualization schema (Figure 1, right) : each sentence of the pair is presented along with all features that could lead to a certain inference label. On the left side, the user can find the features (rules) of the symbolic approach and on the right, the features of the DL model. The features that are relevant for this pair are colored and contain , if the feature's value is true, or no , if the value is false. The color of the features encodes the inference relation that each approach predicted: green is for E, red for C and grey for N. Some DL features might have lower opacity: this means that they should -according to the literature -lead to a different label than the one actually predicted by the model. In this way, the user can verify previous literature findings or discover new patterns. The colored features are then linked with the predicted inference label, also encoded by color. No link between the DL features and the label means that the prediction is not based on any of these features. In the middle of the visualization, the user can find the label of the hybrid approach, marked with bold text. Again, links visualize the behavior of the approach: if there is a link between the symbolic decision and the hybrid one, the hybrid approach chose the symbolic label; if the link is between the DL label and the hybrid one, the hybrid approach chose the DL label. If both links exist, then the labels of symbolic and DL were the same and so the hybrid approach just chose one of them. In terms of visualization, all features used for the hybrid decision are marked with a grey H in increasing opacity: the darker the color, the more weight this feature had for the decision.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 260, |
|
"text": "(Figure 1, right)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Explainable Visual Interface", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "User-defined Heuristics Along with the input pair, users can also input words -also words not found in P or H -that are expected to act as heuristics for a certain inference relation. The option of input words is available for both P and H and for all three inference relations. For instance, the user can insert the word asleep in the Contradiction field of H to check the artifact that hypotheses containing the word asleep are bound to be labeled as C by a DL model. Due to the system's architecture (see Section 3), only the DL model might get explained by additional heuristics; the symbolic approach is based on predefined inference rules and the hybrid approach uses semantic features to make its decision, independently from surface heuristics. The current version of the system only supports the search for specific words as heuristics; future versions will extend to further user-defined heuristics, e.g. Part-Of-Speech tags.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Visual Interface", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Learning from User Feedback The labels of the hybrid decision are at the same time clickable buttons for users to provide their annotation of the pair. With this annotation, an (offline) learning process is initiated: the pair and the user's annotation are added to the training pool of the DL model so that the model can be re-trained on increasingly large data. Whenever enough data has been collected, the model is re-trained; this re-training also triggers the re-training of the hybrid model, leading to improved results.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Explainable Visual Interface", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "This paper presented an interactive visualization interface for explainable NLI. The interface uses three different approaches to compute inference and visualizes the features that lead to each decision. In contrast to black-box machine-learning models, this approach enables users to get intuitions of the decision-making process (Spinner et al., 2020) , as well as to distill linguistic knowledge about the analyzed phenomena. The options for user-defined heuristics and user-driven learning can help refine the used models and components and optimize them to the users' intuition and domain understanding. To increase explainability and comparability, future work will allow the user to a) choose between different DL models for training, b) choose between hybrid models trained on different datasets, c) define their own rules for the hybrid classifier, and d) display the decision tree of the hybrid classifier for better exploration.", |
|
"cite_spans": [ |
|
{ |
|
"start": 331, |
|
"end": 353, |
|
"text": "(Spinner et al., 2020)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Available under github.com/kkalouli/SICK-processing 3 Available under github.com/huggingface/transformers", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If none of them delivers the right label, then we cannot make any claims about the nature of the pair. 5 This classifier is different from the one inKalouli et al. (2020), where the focus is on performance rather than explainability.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "e-snli: Natural language inference with natural language explanations", |
|
"authors": [ |
|
{ |
|
"first": "Oana-Maria", |
|
"middle": [], |
|
"last": "Camburu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lukasiewicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Phil", |
|
"middle": [], |
|
"last": "Blunsom", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "9539--9549", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Oana-Maria Camburu, Tim Rockt\u00e4schel, Thomas Lukasiewicz, and Phil Blunsom. 2018. e-snli: Natural language inference with natural language explanations. In Advances in Neural Information Processing Systems, pages 9539-9549.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Evaluating Compositionality in Sentence Embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Ishita", |
|
"middle": [], |
|
"last": "Dasgupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Demi", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Stuhlm\u00fcller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Gershman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Goodman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "CoRR", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ishita Dasgupta, Demi Guo, Andreas Stuhlm\u00fcller, Samuel J. Gershman, and Noah D. Goodman. 2018. Evaluating Compositionality in Sentence Embeddings. CoRR, abs/1802.04302.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: Pre-training of Deep Bidirec- tional Transformers for Language understanding. CoRR, abs/1810.04805.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "WordNet: An Electronic Lexical Database (Language, Speech, and Communication)", |
|
"authors": [ |
|
{ |
|
"first": "Christiane", |
|
"middle": [], |
|
"last": "Fellbaum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christiane Fellbaum. 1998. WordNet: An Electronic Lexical Database (Language, Speech, and Communication). The MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "A Survey of Methods for Explaining Black Box Models", |
|
"authors": [ |
|
{ |
|
"first": "Riccardo", |
|
"middle": [], |
|
"last": "Guidotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Monreale", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salvatore", |
|
"middle": [], |
|
"last": "Ruggieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franco", |
|
"middle": [], |
|
"last": "Turini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fosca", |
|
"middle": [], |
|
"last": "Giannotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dino", |
|
"middle": [], |
|
"last": "Pedreschi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Comput. Surv", |
|
"volume": "51", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Riccardo Guidotti, Anna Monreale, Salvatore Ruggieri, Franco Turini, Fosca Giannotti, and Dino Pedreschi. 2018. A Survey of Methods for Explaining Black Box Models. ACM Comput. Surv., 51(5), August.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Annotation Artifacts in Natural Language Inference Data", |
|
"authors": [ |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roy", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Schwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "107--112", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. 2018. Annotation Artifacts in Natural Language Inference Data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 107-112. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "GKR: the Graphical Knowledge Representation for semantic parsing", |
|
"authors": [ |
|
{ |
|
"first": "Aikaterini-", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lida", |
|
"middle": [], |
|
"last": "Kalouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Workshop on Computational Semantics beyond Events and Roles", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "27--37", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aikaterini-Lida Kalouli and Richard Crouch. 2018. GKR: the Graphical Knowledge Representation for semantic parsing. In Proceedings of the Workshop on Computational Semantics beyond Events and Roles, pages 27-37, New Orleans, Louisiana. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "WordNet for \"Easy\" Textual Inferences", |
|
"authors": [ |
|
{ |
|
"first": "Aikaterini-Lida", |
|
"middle": [], |
|
"last": "Kalouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Livy", |
|
"middle": [], |
|
"last": "Real", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valeria", |
|
"middle": [], |
|
"last": "De Paiva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aikaterini-Lida Kalouli, Livy Real, and Valeria de Paiva. 2018. WordNet for \"Easy\" Textual Inferences. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Paris, France, may. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Hy-NLI: a Hybrid system for Natural Language Inference", |
|
"authors": [ |
|
{ |
|
"first": "Aikaterini-Lida", |
|
"middle": [], |
|
"last": "Kalouli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Crouch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Valeria", |
|
"middle": [], |
|
"last": "De Paiva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics, COLING '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aikaterini-Lida Kalouli, Richard Crouch, and Valeria de Paiva. 2020. Hy-NLI: a Hybrid system for Natural Lan- guage Inference. In Proceedings of the 28th International Conference on Computational Linguistics, COLING '20. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "SemEval-2014 Task 1: Evaluation of Compositional Distributional Semantic Models on Full Sentences through Semantic Relatedness and Textual Entailment", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Marelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Menini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Zamparelli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Marelli, Luisa Bentivogli, Marco Baroni, Raffaella Bernardi, Stefano Menini, and Roberto Zamparelli. 2014. SemEval-2014 Task 1: Evaluation of Compositional Distributional Semantic Models on Full Sentences through Semantic Relatedness and Textual Entailment. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 1-8, Dublin, Ireland, August. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Mccoy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ellie", |
|
"middle": [], |
|
"last": "Pavlick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tal", |
|
"middle": [], |
|
"last": "Linzen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3428--3448", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom McCoy, Ellie Pavlick, and Tal Linzen. 2019. Right for the wrong reasons: Diagnosing syntactic heuristics in natural language inference. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3428-3448, Florence, Italy, July. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Stress Test Evaluation for Natural Language Inference", |
|
"authors": [ |
|
{ |
|
"first": "Aakanksha", |
|
"middle": [], |
|
"last": "Naik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhilasha", |
|
"middle": [], |
|
"last": "Ravichander", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Norman", |
|
"middle": [], |
|
"last": "Sadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carolyn", |
|
"middle": [], |
|
"last": "Rose", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Graham", |
|
"middle": [], |
|
"last": "Neubig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2340--2353", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aakanksha Naik, Abhilasha Ravichander, Norman Sadeh, Carolyn Rose, and Graham Neubig. 2018. Stress Test Evaluation for Natural Language Inference. In Proceedings of the 27th International Conference on Compu- tational Linguistics, pages 2340-2353, Santa Fe, New Mexico, USA, August. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Toward a Standard Upper Ontology", |
|
"authors": [ |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Niles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Pease", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the 2nd International Conference on Formal Ontology in Information Systems (FOIS-2001)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ian Niles and Adam Pease. 2001. Toward a Standard Upper Ontology. In Chris Welty and Barry Smith, editors, Proceedings of the 2nd International Conference on Formal Ontology in Information Systems (FOIS-2001), pages 2-9.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "explAIner: A Visual Analytics Framework for Interactive and Explainable Machine Learning. IEEE Transactions on Visualization and Computer Graphics", |
|
"authors": [ |
|
{ |
|
"first": "Thilo", |
|
"middle": [], |
|
"last": "Spinner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Udo", |
|
"middle": [], |
|
"last": "Schlegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Sch\u00e4fer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Menna", |
|
"middle": [], |
|
"last": "El-Assady", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "26", |
|
"issue": "", |
|
"pages": "1064--1074", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thilo Spinner, Udo Schlegel, Hannah Sch\u00e4fer, and Menna El-Assady. 2020. explAIner: A Visual Analytics Framework for Interactive and Explainable Machine Learning. IEEE Transactions on Visualization and Com- puter Graphics, 26(1):1064-1074, Jan.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Studies on Natural Logic and Categorial Grammar", |
|
"authors": [ |
|
{ |
|
"first": "Valencia", |
|
"middle": [], |
|
"last": "Victor S\u00e1nchez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1991, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor S\u00e1nchez Valencia. 1991. Studies on Natural Logic and Categorial Grammar. Ph.D. thesis, University of Amsterdam.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference", |
|
"authors": [ |
|
{ |
|
"first": "Adina", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikita", |
|
"middle": [], |
|
"last": "Nangia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adina Williams, Nikita Nangia, and Samuel R. Bowman. 2017. A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference. CoRR, abs/1704.05426.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Can we explain natural language inference decisions taken with neural networks? Inference rules in distributed representations", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Zanzotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Ferrone", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "2017 International Joint Conference on Neural Networks (IJCNN)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3680--3687", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. M. Zanzotto and L. Ferrone. 2017. Can we explain natural language inference decisions taken with neural networks? Inference rules in distributed representations. In 2017 International Joint Conference on Neural Networks (IJCNN), pages 3680-3687, May.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |