|
{ |
|
"paper_id": "E17-1007", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:52:57.412252Z" |
|
}, |
|
"title": "Hypernyms under Siege: Linguistically-motivated Artillery for Hypernymy Detection", |
|
"authors": [ |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Bar-Ilan University", |
|
"location": { |
|
"settlement": "Ramat-Gan", |
|
"country": "Israel" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Singapore University of Technology and Design", |
|
"location": { |
|
"country": "Singapore" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Dominik", |
|
"middle": [], |
|
"last": "Schlechtweg", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Stuttgart", |
|
"location": { |
|
"settlement": "Stuttgart", |
|
"country": "Germany" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "The fundamental role of hypernymy in NLP has motivated the development of many methods for the automatic identification of this relation, most of which rely on word distribution. We investigate an extensive number of such unsupervised measures, using several distributional semantic models that differ by context type and feature weighting. We analyze the performance of the different methods based on their linguistic motivation. Comparison to the state-of-the-art supervised methods shows that while supervised methods generally outperform the unsupervised ones, the former are sensitive to the distribution of training instances, hurting their reliability. Being based on general linguistic hypotheses and independent from training data, unsupervised measures are more robust, and therefore are still useful artillery for hypernymy detection.", |
|
"pdf_parse": { |
|
"paper_id": "E17-1007", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "The fundamental role of hypernymy in NLP has motivated the development of many methods for the automatic identification of this relation, most of which rely on word distribution. We investigate an extensive number of such unsupervised measures, using several distributional semantic models that differ by context type and feature weighting. We analyze the performance of the different methods based on their linguistic motivation. Comparison to the state-of-the-art supervised methods shows that while supervised methods generally outperform the unsupervised ones, the former are sensitive to the distribution of training instances, hurting their reliability. Being based on general linguistic hypotheses and independent from training data, unsupervised measures are more robust, and therefore are still useful artillery for hypernymy detection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "In the last two decades, the NLP community has invested a consistent effort in developing automated methods to recognize hypernymy. Such effort is motivated by the role this semantic relation plays in a large number of tasks, such as taxonomy creation (Snow et al., 2006; Navigli et al., 2011) and recognizing textual entailment (Dagan et al., 2013) . The task has appeared to be, however, a challenging one, and the numerous approaches proposed to tackle it have often shown limitations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 252, |
|
"end": 271, |
|
"text": "(Snow et al., 2006;", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 272, |
|
"end": 293, |
|
"text": "Navigli et al., 2011)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 329, |
|
"end": 349, |
|
"text": "(Dagan et al., 2013)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Early corpus-based methods have exploited patterns that may indicate hypernymy (e.g. \"animals such as dogs\") (Hearst, 1992; Snow et al., 2005) , but the recall limitation of this approach, requiring both words to co-occur in a sentence, motivated the development of methods that rely on adaptations of the distributional hypothesis (Harris, 1954) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 109, |
|
"end": 123, |
|
"text": "(Hearst, 1992;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 124, |
|
"end": 142, |
|
"text": "Snow et al., 2005)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 332, |
|
"end": 346, |
|
"text": "(Harris, 1954)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The first distributional approaches were unsupervised, assigning a score for each (x, y) wordpair, which is expected to be higher for hypernym pairs than for negative instances. Evaluation is performed using ranking metrics inherited from information retrieval, such as Average Precision (AP) and Mean Average Precision (MAP). Each measure exploits a certain linguistic hypothesis such as the distributional inclusion hypothesis (Weeds and Weir, 2003; Kotlerman et al., 2010) and the distributional informativeness hypothesis (Santus et al., 2014; Rimell, 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 429, |
|
"end": 451, |
|
"text": "(Weeds and Weir, 2003;", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 452, |
|
"end": 475, |
|
"text": "Kotlerman et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 526, |
|
"end": 547, |
|
"text": "(Santus et al., 2014;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 548, |
|
"end": 561, |
|
"text": "Rimell, 2014)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In the last couple of years, the focus of the research community shifted to supervised distributional methods, in which each (x, y) word-pair is represented by a combination of x and y's word vectors (e.g. concatenation or difference), and a classifier is trained on these resulting vectors to predict hypernymy (Baroni et al., 2012; Roller et al., 2014; Weeds et al., 2014) . While the original methods were based on count-based vectors, in recent years they have been used with word embeddings (Mikolov et al., 2013; Pennington et al., 2014) , and have gained popularity thanks to their ease of use and their high performance on several common datasets. However, there have been doubts on whether they can actually learn to recognize hypernymy (Levy et al., 2015b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 312, |
|
"end": 333, |
|
"text": "(Baroni et al., 2012;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 334, |
|
"end": 354, |
|
"text": "Roller et al., 2014;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 374, |
|
"text": "Weeds et al., 2014)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 518, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 543, |
|
"text": "Pennington et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 766, |
|
"text": "(Levy et al., 2015b)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Additional recent hypernymy detection methods include a multimodal perspective (Kiela et al., 2015) , a supervised method using unsupervised measure scores as features (Santus et al., 2016a) , and a neural method integrating path-based and distributional information (Shwartz et al., 2016) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 99, |
|
"text": "(Kiela et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 190, |
|
"text": "(Santus et al., 2016a)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 289, |
|
"text": "(Shwartz et al., 2016)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper we perform an extensive evaluation of various unsupervised distributional measures for hypernymy detection, using several distributional semantic models that differ by context type and feature weighting. Some measure vari-ants and context-types are tested for the first time. 1 We demonstrate that since each of these measures captures a different aspect of the hypernymy relation, there is no single measure that consistently performs well in discriminating hypernymy from different semantic relations. We analyze the performance of the measures in different settings and suggest a principled way to select the suitable measure, context type and feature weighting according to the task setting, yielding consistent performance across datasets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 291, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also compare the unsupervised measures to the state-of-the-art supervised methods. We show that supervised methods outperform the unsupervised ones, while also being more efficient, computed on top of low-dimensional vectors. At the same time, however, our analysis reassesses previous findings suggesting that supervised methods do not actually learn the relation between the words, but only characteristics of a single word in the pair (Levy et al., 2015b) . Moreover, since the features in embedding-based classifiers are latent, it is difficult to tell what the classifier has learned. We demonstrate that unsupervised methods, on the other hand, do account for the relation between words in a pair, and are easily interpretable, being based on general linguistic hypotheses.", |
|
"cite_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 461, |
|
"text": "(Levy et al., 2015b)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We created multiple distributional semantic spaces that differ in their context type and feature weighting. As an underlying corpus we used a concatenation of the following two corpora: ukWaC (Ferraresi, 2007) , a 2-billion word corpus constructed by crawling the .uk domain, and WaCkypedia EN (Baroni et al., 2009) , a 2009 dump of the English Wikipedia. Both corpora include POS, lemma and dependency parse annotations. Our vocabulary (of target and context words) includes only nouns, verbs and adjectives that occurred at least 100 times in the corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 209, |
|
"text": "(Ferraresi, 2007)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 294, |
|
"end": 315, |
|
"text": "(Baroni et al., 2009)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Distributional Semantic Spaces", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We use several context types:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Window-based contexts: the contexts of a target word w i are the words surrounding it in a ksized window:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "w i\u2212k , ..., w i\u22121 , w i+1 , ..., w i+k .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "If the context-type is directional, words occurring before and after w i are marked differently, i.e.: Figure 1 : An example dependency tree of the sentence cute cats drink milk, with the target word cats. The dependencybased contexts are drink-v:nsubj and cute-a:amod \u22121 . The joint-dependency context is drink-v#milk-n. Differently from Chersoni et al. (2016) , we exclude the dependency tags to mitigate the sparsity of contexts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 339, |
|
"end": 361, |
|
"text": "Chersoni et al. (2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 111, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "w i\u2212k /l, ..., w i\u22121 /l, w i+1 /r, ..., w i+k /r.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Out-of-vocabulary words are filtered out before applying the window. We experimented with window sizes 2 and 5, directional and indirectional (win2, win2d, win5, win5d).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Dependency-based contexts: rather than adjacent words in a window, we consider neighbors in a dependency parse tree (Pad\u00f3 and Lapata, 2007; Baroni and Lenci, 2010) . The contexts of a target word w i are its parent and daughter nodes in the dependency tree (dep). We also experimented with a joint dependency context inspired by Chersoni et al. (2016) , in which the contexts of a target word are the parent-sister pairs in the dependency tree (joint). See Feature Weighting Each distributional semantic space is spanned by a matrix M in which each row corresponds to a target word while each column corresponds to a context. The value of each cell M i,j represents the association between the target word w i and the context c j . We experimented with two feature weightings:", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 141, |
|
"text": "(Pad\u00f3 and Lapata, 2007;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 142, |
|
"end": 165, |
|
"text": "Baroni and Lenci, 2010)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 353, |
|
"text": "Chersoni et al. (2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Frequency -raw frequency (no weighting): M i,j is the number of co-occurrences of w i and c j in the corpus.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Positive PMI (PPMI) -pointwise mutual information (PMI) (Church and Hanks, 1990) is defined as the log ratio between the joint probability of w and c and the product of their marginal probabilities:", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 82, |
|
"text": "(Church and Hanks, 1990)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P M I(w, c) = logP (w,c) P (w)P (c)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": ", whereP (w),P (c), andP (w, c) are estimated by the relative frequencies of a word w, a context c and a word-context pair (w, c), respectively. To handle unseen pairs (w, c), yielding P M I(w, c) = log(0) = \u2212\u221e, PPMI (Bullinaria and Levy, 2007) assigns zero to negative PMI scores: P P M I(w, c) = max(P M I(w, c), 0).", |
|
"cite_spans": [ |
|
{ |
|
"start": 233, |
|
"end": 244, |
|
"text": "Levy, 2007)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In addition, one of the measures we used (Santus et al., 2014) required a third feature weighting:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Positive LMI (PLMI) -positive local mutual information (PLMI) (Evert, 2005; Evert, 2008) . PPMI was found to have a bias towards rare events. PLMI simply balances PPMI by multiplying it by the co-occurrence frequency of w and c: P LM I(w, c) = f req(w, c) \u2022 P P M I(w, c).", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 77, |
|
"text": "(Evert, 2005;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 78, |
|
"end": 90, |
|
"text": "Evert, 2008)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We experiment with a large number of unsupervised measures proposed in the literature for distributional hypernymy detection, with some new variants. In the following section, v x and v y denote x and y's word vectors (rows in the matrix M ). We consider the scores as measuring to what extent y is a hypernym of x (x \u2192 y).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Unsupervised Hypernymy Detection Measures", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Following the distributional hypothesis (Harris, 1954), similar words share many contexts, thus have a high similarity score. Although the hypernymy relation is asymmetric, similarity is one of its properties (Santus et al., 2014) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 230, |
|
"text": "(Santus et al., 2014)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 Cosine Similarity (Salton and McGill, 1986 ) A symmetric similarity measure: (Lin, 1998) A symmetric similarity measure that quantifies the ratio of shared contexts to the contexts of each word:", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 44, |
|
"text": "(Salton and McGill, 1986", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 90, |
|
"text": "(Lin, 1998)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "cos(x, y) = v x \u2022 v y v x \u2022 v y \u2022 Lin Similarity", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Lin(x, y) = \u03a3 c\u2208 vx\u2229 vy [ v x [c] + v y [c]] \u03a3 c\u2208 vx v x [c] + \u03a3 c\u2208 vy v y [c]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 APSyn (Santus et al., 2016b) A symmetric measure that computes the extent of intersection among the N most related contexts of two words, weighted according to the rank of the shared contexts (with N as a hyper-parameter):", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 30, |
|
"text": "(Santus et al., 2016b)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "AP Syn(x, y) = \u03a3 c\u2208N ( vx)\u2229N ( vy) 1 rankx(c)+ranky(c) 2", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Similarity Measures", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "According to the distributional inclusion hypothesis, the prominent contexts of a hyponym (x) are expected to be included in those of its hypernym (y).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 Weeds Precision (Weeds and Weir, 2003) A directional precision-based similarity measure. This measure quantifies the weighted inclusion of x's contexts by y's contexts:", |
|
"cite_spans": [ |
|
{ |
|
"start": 18, |
|
"end": 40, |
|
"text": "(Weeds and Weir, 2003)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "W eedsP rec(x \u2192 y) = \u03a3 c\u2208 vx\u2229 vy v x [c] \u03a3 c\u2208 vx v x [c]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 cosWeeds (Lenci and Benotto, 2012) Geometric mean of cosine similarity and Weeds precision:", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 36, |
|
"text": "(Lenci and Benotto, 2012)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "cosW eeds(x \u2192 y) = cos(x, y) \u2022 W eedsP rec(x \u2192 y)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 ClarkeDE (Clarke, 2009) Computes degree of inclusion, by quantifying weighted coverage of the hyponym's contexts by those of the hypernym:", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 25, |
|
"text": "(Clarke, 2009)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "CDE(x \u2192 y) = \u03a3 c\u2208 vx\u2229 vy min( vx[c], vy[c]) \u03a3 c\u2208 vx vx[c]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 balAPinc (Kotlerman et al., 2010) Balanced average precision inclusion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 11, |
|
"end": 35, |
|
"text": "(Kotlerman et al., 2010)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "AP inc(x \u2192 y) = Ny r=1 [P (r) \u2022 rel(cr)] Ny", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is an adaptation of the average precision measure from information retrieval for the inclusion hypothesis. N y is the number of non-zero contexts of y and P (r) is the precision at rank r, defined as the ratio of shared contexts with y among the top r contexts of x. rel(c) is the relevance of a context c, set to 0 if c is not a context of y, and to 1 \u2212", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "ranky(c)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Ny+1 otherwise, where rank y (c) is the rank of the context c in y's sorted vector. Finally,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "balAP inc(x \u2192 y) = Lin(x, y) \u2022 AP inc(x \u2192 y)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the geometric mean of APinc and Lin similarity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u2022 invCL (Lenci and Benotto, 2012) Measures both distributional inclusion of x in y and distributional non-inclusion of y in x:", |
|
"cite_spans": [ |
|
{ |
|
"start": 8, |
|
"end": 33, |
|
"text": "(Lenci and Benotto, 2012)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "invCL(x \u2192 y) = CDE(x \u2192 y) \u2022 (1 \u2212 CDE(y \u2192 x))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inclusion Measures", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "According to the distributional informativeness hypothesis, hypernyms tend to be less informative than hyponyms, as they are likely to occur in more general contexts than their hyponyms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 SLQS (Santus et al., 2014 )", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 27, |
|
"text": "(Santus et al., 2014", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "SLQS(x \u2192 y) = 1 \u2212 E x E y", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "The informativeness of a word x is evaluated as the median entropy of its top N contexts:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "E x = median N i=1 (H(c i ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": ", where H(c) is the entropy of context c.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 SLQS Sub A new variant of SLQS based on the assumption that if y is judged to be a hypernym of x to a certain extent, then x should be judged to be a hyponym of y to the same extent (which is not the case for regular SLQS). This is achieved by subtraction:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "SLQS sub (x \u2192 y) = E y \u2212 E x", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "It is weakly symmetric in the sense that", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "SLQS sub (x \u2192 y) = \u2212SLQS sub (y \u2192 x).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "SLQS and SLQS Sub have 3 hyper-parameters: i) the number of contexts N ; ii) whether to use median or average entropy among the top N contexts; and iii) the feature weighting used to sort the contexts by relevance (i.e., PPMI or PLMI).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 SLQS Row Differently from SLQS, SLQS Row computes the entropy of the target rather than the average/median entropy of the contexts, as an alternative way to compute the generality of a word. 2 In addition, parallel to SLQS we tested SLQS Row with subtraction, SLQS Row Sub.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 RCTC (Rimell, 2014) Ratio of change in topic coherence:", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 21, |
|
"text": "(Rimell, 2014)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "RCT C(x \u2192 y) = T C(tx)/T C(t x\\y ) T C(ty)/T C(t y\\x )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "where t x are the top N contexts of x, considered as x's topic, and t x\\y are the top N contexts of x which are not contexts of y. T C(A) is the topic coherence of a set of words A, defined as the median pairwise PMI scores between words in A. N is a hyper-parameter. The measure is based on the assumptions that excluding y's contexts from x's increases the coherence of the topic, while excluding x's contexts from y's decreases the coherence of the topic. We include this measure under the informativeness inclusion, as it is based on a similar hypothesis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Informativeness Measures", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "These measures are motivated by the fact that, even though-being more general-hypernyms are expected to occur in a larger set of contexts, sentences like \"the vertebrate barks\" or \"the mammal arrested the thieves\" are not common, since hyponyms are more specialized and are hence more appropriate in such contexts. hand, hyponyms are likely to occur in broad contexts (e.g. eat, live), where hypernyms are also appropriate. In this sense, we can define the reversed inclusion hypothesis: \"hypernym's contexts are likely to be included in the hyponym's contexts\". The following variants are tested for the first time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed Inclusion Measures", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "RevW", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Reversed Weeds", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "eeds(x \u2192 y) = W eeds(y \u2192 x) \u2022 Reversed ClarkeDE RevCDE(x \u2192 y) = CDE(y \u2192 x)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "\u2022 Reversed Weeds", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We use four common semantic relation datasets: BLESS (Baroni and Lenci, 2011) , EVALution (Santus et al., 2015) , Lenci/Benotto (Benotto, 2015) , and Weeds (Weeds et al., 2014) . The datasets were constructed either using knowledge resources (e.g. WordNet, Wikipedia), crowdsourcing or both. The semantic relations and the size of each dataset are detailed in Table 1 . In our distributional semantic spaces, a target word is represented by the word and its POS tag. While BLESS and Lenci/Benotto contain this information, we needed to add POS tags to the other datasets. For each pair (x, y), we considered 3 pairs (x-p, y-p) for p \u2208 {noun, adjective, verb}, and added the respective pair to the dataset only if the words were present in the corpus. Table 2 : Best performing unsupervised measures on each dataset in terms of Average Precision (AP) at k = 100, for hypernym vs. all other relations and vs. each single relation. AP for k = all is also reported for completeness. We excluded the experiments of hypernym vs. random-(n, v, j) for brevity; most of the similarity and some of the inclusion measures achieve AP @100 = 1.0 in these experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 77, |
|
"text": "(Baroni and Lenci, 2011)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 90, |
|
"end": 111, |
|
"text": "(Santus et al., 2015)", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 128, |
|
"end": 143, |
|
"text": "(Benotto, 2015)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 176, |
|
"text": "(Weeds et al., 2014)", |
|
"ref_id": "BIBREF39" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 360, |
|
"end": 367, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 751, |
|
"end": 758, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We split each dataset randomly to 90% test and 10% validation. The validation sets are used to tune the hyper-parameters of several measures: SLQS (Sub), APSyn and RCTC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "In order to evaluate the unsupervised measures described in Section 3, we compute the measure scores for each (x, y) pair in each dataset. We first measure the method's ability to discriminate hypernymy from all other relations in the dataset, i.e. by considering hypernyms as positive instances, and other word pairs as negative instances. In addition, we measure the method's ability to discriminate hypernymy from every other relation in the dataset by considering one relation at a time. For a relation R we consider only (x, y) pairs that are annotated as either hypernyms (positive instances) or R (negative instances). We rank the pairs according to the measure score and compute average precision (AP) at k = 100 and k = all. 5 lated differently in each sense. We consider y as a hypernym of x if hypernymy holds in some of the words' senses. Therefore, when a pair is assigned both hypernymy and another relation, we only keep it as hypernymy. 5 We tried several cut-offs and chose the one that seemed to be more informative in distinguishing between the unsupervised measures. Table 2 reports the best performing measure(s), with respect to AP @100, for each relation in each dataset. The first observation is that there is no single combination of measure, context type and feature weighting that performs best in discriminating hypernymy from all other relations. In order to better understand the results, we focus on the second type of evaluation, in which we discriminate hypernyms from each other relation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 953, |
|
"end": 954, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1087, |
|
"end": 1094, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparing Unsupervised Measures", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The results show preference to the syntactic context-types (dep and joint), which might be explained by the fact that these contexts are richer (as they contain both proximity and syntactic information) and therefore more discriminative. In feature weighting there is no consistency, but interestingly, raw frequency appears to be successful in hypernymy detection, contrary to previously reported results for word similarity tasks, where PPMI was shown to outperform it (Bullinaria and Levy, 2007; Levy et al., 2015a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 498, |
|
"text": "Levy, 2007;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 499, |
|
"end": 518, |
|
"text": "Levy et al., 2015a)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparing Unsupervised Measures", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The new SLQS variants are on top of the list in many settings. In particular they perform well in discriminating hypernyms from symmetric relations (antonymy, synonymy, coordination).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparing Unsupervised Measures", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The measures based on the reversed inclusion hypothesis performed inconsistently, achieving perfect score in the discrimination of hypernyms from unrelated words, and performing well in few other cases, always in combination with syntactic contexts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparing Unsupervised Measures", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Finally, the results show that there is no single combination of measure and parameters that performs consistently well for all datasets and classification tasks. In the following section we analyze the best combination of measure, context type and feature weighting to distinguish hypernymy from any other relation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparing Unsupervised Measures", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We considered all relations that occurred in two datasets. For such relation, for each dataset, we ranked the measures by their AP@100 score, selecting those with score \u2265 0.8. 6 Table 3 displays the intersection of the datasets' best measures.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 185, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Hypernym vs. Meronym The inclusion hypothesis seems to be most effective in discriminating between hypernyms and meronyms under syntactic contexts. We conjecture that the windowbased contexts are less effective since they capture topical context words, that might be shared also among holonyms and their meronyms (e.g. car will occur with many of the neighbors of wheel). However, since meronyms and holonyms often have different functions, their functional contexts, which are expressed in the syntactic context-types, are less shared. This is where they mostly differ from hyponym-hypernym pairs, which are of the same function (e.g. cat is a type of animal). Table 2 shows that SLQS performs well in this task on BLESS. This is contrary to previous findings that suggested that SLQS is weak in discriminating between hypernyms and meronyms, as in many cases the holonym is more general than the meronym (Shwartz et al., 2016) . 7 The surprising result could be explained by the nature of meronymy in this dataset: most holonyms in BLESS are rather specific words.", |
|
"cite_spans": [ |
|
{ |
|
"start": 906, |
|
"end": 928, |
|
"text": "(Shwartz et al., 2016)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 931, |
|
"end": 932, |
|
"text": "7", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 662, |
|
"end": 669, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "BLESS was built starting from 200 basic level concepts (e.g. goldfish) used as the x words, to which y words in different relations were associated (e.g. eye, for meronymy; animal, for hypernymy). x words represent hyponyms in the hyponym-hypernym pairs, and should therefore not be too general. Indeed, SLQS assigns high scores to hyponym-hypernym pairs. At the same time, in the meronymy relation in BLESS, x is the holonym and y is the meronym. For consistency with EVALution, we switched those pairs in BLESS, placing the meronym in the x slot and the holonym in the y slot. As a consequence, after the switching, holonyms in BLESS are usually rather specific words (e.g., there are no holonyms like animal and vehicle, as these words were originally in the y slot). In most cases, they are not more general than their meronyms ((eye, goldfish)), yielding low SLQS scores which are easy to separate from hypernyms. We note that this is a weakness of the BLESS dataset, rather than a strength of the measure. For instance, on EVALution, SLQS performs worse (ranked only as high as 13th), as this dataset has no such restriction on the basic level concepts, and may contain pairs like (eye, animal).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Hypernym vs. Attribute Symmetric similarity measures computed on syntactic contexts succeed to discriminate between hypernyms and attributes. Since attributes are syntactically different from hypernyms (in attributes, y is an adjective), it is unsurprising that they occur in different syntactic contexts, yielding low similarity scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "nearly 50% of the SLQS false positive pairs were meronymholonym pairs, in many of which the holonym is more general than the meronym by definition, e.g. (mauritius, africa Table 4 : Best performance on the validation set (10%) of each dataset for the supervised and unsupervised measures, in terms of Average Precision (AP) at k = 100, for hypernym vs. each single relation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 172, |
|
"end": 179, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Hypernym vs. Antonym In all our experiments, antonyms were the hardest to distinguish from hypernyms, yielding the lowest performance. We found that SLQS performed reasonably well in this setting. However, the measure variations, context types and feature weightings were not consistent across datasets. SLQS relies on the assumption that y is a more general word than x, which is not true for antonyms, making it the most suitable measure for this setting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Hypernym vs. Synonym SLQS performs well also in discriminating between hypernyms and synonyms, in which y is also not more general than x. We observed that in the joint context type, the difference in SLQS scores between synonyms and hypernyms was the largest. This may stem from the restrictiveness of this context type. For instance, among the most salient contexts we would expect to find informative contexts like drinks milk for cat and less informative ones like drinks water for animal, whereas the nonrestrictive single dependency context drinks would probably be present for both.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Another measure that works well is invCL: interestingly, other inclusion-based measures assign high scores to (x, y) when y includes many of x's contexts, which might be true also for synonyms (e.g. elevator and lift share many contexts). in-vCL, on the other hand, reduces with the ratio of y's contexts included in x, yielding lower scores for synonyms.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Hypernym vs. Coordination We found no consistency among BLESS and Weeds. On Weeds, inclusion-based measures (ClarkeDE, invCL and Weeds) showed the best results. The best performing measures on BLESS, however, were variants of SLQS, that showed to perform well in cases where the negative relation is symmetric (antonym, synonym and coordination). The difference could be explained by the nature of the datasets: the BLESS test set contains 1,185 hypernymy pairs, with only 129 distinct ys, many of which are general words like animal and object. The Weeds test set, on the other hand, was intentionally constructed to contain an overall unique y in each pair, and therefore contains much more specific ys (e.g. (quirk, strangeness)). For this reason, generality-based measures perform well on BLESS, and struggle with Weeds, which is handled better using inclusion-based measures.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 135, |
|
"text": "Weeds, inclusion-based measures (ClarkeDE, invCL and Weeds)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best Measure Per Classification Task", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "For comparison with the state-of-the-art, we evaluated several supervised hypernymy detection methods, based on the word embeddings of x and y: (Weeds et al., 2014) , and ASYM (Roller et al., 2014) . We downloaded several pretrained embeddings (Mikolov et al., 2013; Pennington et al., 2014; Levy and Goldberg, 2014) , and trained a logistic regression classifier to predict hypernymy. We used the 90% portion (originally the test set) as the train set, and the other 10% (originally the validation set) as a test set, reporting the best results among different vectors, method AP@100 original AP@100 switched \u2206 supervised concat, word2vec, L1 0.995 0.575 -0.42 unsupervised cosWeeds, win2d, ppmi 0.818 0.882 +0.064 Table 5 : Average Precision (AP) at k = 100 of the best supervised and unsupervised methods for hypernym vs. random-n, on the original BLESS validation set and the validation set with the artificially added switched hypernym pairs. method and regularization factor. 8 Table 4 displays the performance of the best classifier on each dataset, in a hypernym vs. a single relation setting. We also re-evaluated the unsupervised measures, this time reporting the results on the validation set (10%) for comparison.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 164, |
|
"text": "(Weeds et al., 2014)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 197, |
|
"text": "(Roller et al., 2014)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 266, |
|
"text": "(Mikolov et al., 2013;", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 267, |
|
"end": 291, |
|
"text": "Pennington et al., 2014;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 316, |
|
"text": "Levy and Goldberg, 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 716, |
|
"end": 723, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 984, |
|
"end": 991, |
|
"text": "Table 4", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "concatenation v x \u2295 v y (Baroni et al., 2012), dif- ference v y \u2212 v x", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The overall performance of the embeddingbased classifiers is almost perfect, and in particular the best performance is achieved using the concatenation method (Baroni et al., 2012) with either GloVe (Pennington et al., 2014) or the dependency-based embeddings (Levy and Goldberg, 2014) . As expected, the unsupervised measures perform worse than the embedding-based classifiers, though generally not bad on their own.", |
|
"cite_spans": [ |
|
{ |
|
"start": 159, |
|
"end": 180, |
|
"text": "(Baroni et al., 2012)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 224, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 260, |
|
"end": 285, |
|
"text": "(Levy and Goldberg, 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "These results may suggest that unsupervised methods should be preferred only when no training data is available, leaving all the other cases to supervised methods. This is, however, not completely true. As others previously noticed, supervised methods do not actually learn the relation between x and y, but rather separate properties of either x or y. Levy et al. (2015b) named this the \"lexical memorization\" effect, i.e. memorizing that certain ys tend to appear in many positive pairs (prototypical hypernyms).", |
|
"cite_spans": [ |
|
{ |
|
"start": 353, |
|
"end": 372, |
|
"text": "Levy et al. (2015b)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "On that account, the Weeds dataset has been designed to avoid such memorization, with every word occurring once in each slot of the relation. While the performance of the supervised methods on this dataset is substantially lower than their performance on other datasets, it is yet well above the random baseline which we might expect from a method that can only memorize words it has seen during training. 9 This is an indication that supervised methods can abstract away from the words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Indeed, when we repeated the experiment with a lexical split of each dataset, i.e., such that the train and test set consist of distinct vocabularies, we found that the supervised methods' performance did not decrease dramatically, in contrast to the findings of Levy et al. (2015b) . The large performance gaps reported by Levy et al. (2015b) might be attributed to the size of their training sets. Their lexical split discarded around half of the pairs in the dataset and split the rest of the pairs equally to train and test, resulting in a relatively small train set. We performed the split such that only around 30% of the pairs in each dataset were discarded, and split the train and test sets with a ratio of roughly 90/10%, obtaining large enough train sets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 282, |
|
"text": "Levy et al. (2015b)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 343, |
|
"text": "Levy et al. (2015b)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Our experiment suggests that rather than memorizing the verbatim prototypical hypernyms, the supervised models might learn that certain regions in the vector space pertain to prototypical hypernyms. For example, device (from the BLESS train set) and appliance (from the BLESS test set) are two similar words, which are both prototypical hypernyms. Another interesting observation was recently made by Roller and Erk (2016) : they showed that when dependency-based embeddings are used, supervised distributional methods trace x and y's separate occurrences in different slots of Hearst patterns (Hearst, 1992) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 401, |
|
"end": 422, |
|
"text": "Roller and Erk (2016)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 608, |
|
"text": "(Hearst, 1992)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Whether supervised methods only memorize or also learn, it is more consensual that they lack the ability to capture the relation between x and y, and that they rather indicate how likely y (x) is to be a hypernym (hyponym) (Levy et al., 2015b; Santus et al., 2016a; Shwartz et al., 2016; Roller and Erk, 2016) . While this information is valuable, it cannot be solely relied upon for classification.", |
|
"cite_spans": [ |
|
{ |
|
"start": 223, |
|
"end": 243, |
|
"text": "(Levy et al., 2015b;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 244, |
|
"end": 265, |
|
"text": "Santus et al., 2016a;", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 266, |
|
"end": 287, |
|
"text": "Shwartz et al., 2016;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 288, |
|
"end": 309, |
|
"text": "Roller and Erk, 2016)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To better understand the extent of this limitation, we conducted an experiment in a similar manner to the switched hypernym pairs in Santus et al. (2016a) . We used BLESS, which is the only dataset with random pairs. For each hypernym pair (x 1 , y 1 ), we sampled a word y 2 that participates in another hypernym pair (x 2 , y 2 ), such that (x 1 , y 2 ) is not in the dataset, and added (x 1 , y 2 ) as a random pair. We added 139 new pairs to the validation set, such as (rifle, animal) and (salmon, weapon). We then used the best supervised and unsupervised methods for hypernym vs. randomn on BLESS to re-classify the revised validation set. Table 5 displays the experiment results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 154, |
|
"text": "Santus et al. (2016a)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 647, |
|
"end": 654, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The switched hypernym experiment paints a much less optimistic picture of the embeddings' actual performance, with a drop of 42 points in average precision. 121 out of the 139 switched hypernym pairs were falsely classified as hypernyms. Examining the y words of these pairs reveals general words that appear in many hypernym pairs (e.g. animal, object, vehicle). The unsupervised measure was not similarly affected by the switched pairs, and the performance even slightly increased. This result is not surprising, since most unsupervised measures aim to capture aspects of the relation between x and y, while not relying on information about one of the words in the pair. 10", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Comparison to State-of-the-art Supervised Methods", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The results in Section 5 suggest that a supervised method using the unsupervised measures as features could possibly be the best of both worlds. We would expect it to be more robust than embeddingbased methods on the one hand, while being more informative than any single unsupervised measure on the other hand.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Such a method was developed by Santus et al. (2016a) , however using mostly features that describe a single word, e.g. frequency and entropy. It was shown to be competitive with the state-of-theart supervised methods. With that said, it was also shown to be sensitive to the distribution of training examples in a specific dataset, like the embeddingbased methods.", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 52, |
|
"text": "Santus et al. (2016a)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We conducted a similar experiment, with a much larger number of unsupervised features, namely the various measure scores, and encountered the same issue. While the performance was good, it dropped dramatically when the model was tested on a different test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We conjecture that the problem stems from the currently available datasets, which are all somewhat artificial and biased. Supervised methods which are strongly based on the relation between the words, e.g. those that rely on path-based information (Shwartz et al., 2016) , manage to overcome the bias. Distributional methods, on the other hand, are based on a weaker notion of the relation between words, hence are more prone to overfit the distribution of training instances in a specific dataset. In the future, we hope that new datasets will be available for the task, which would be drawn from corpora and will reflect more realistic distributions of words and semantic relations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 248, |
|
"end": 270, |
|
"text": "(Shwartz et al., 2016)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We performed an extensive evaluation of unsupervised methods for discriminating hypernyms from other semantic relations. We found that there is no single combination of measure and parameters which is always preferred; however, we suggested a principled linguistic-based analysis of the most suitable measure for each task that yields consistent performance across different datasets.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We investigated several new variants of existing methods, and found that some variants of SLQS turned out to be superior on certain tasks. In addition, we have tested for the first time the joint context type (Chersoni et al., 2016) , which was found to be very discriminative, and might hopefully benefit other semantic tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 232, |
|
"text": "(Chersoni et al., 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "For comparison, we evaluated the state-ofthe-art supervised methods on the datasets, and they have shown to outperform the unsupervised ones, while also being efficient and easier to use. However, a deeper analysis of their performance demonstrated that, as previously suggested, these methods do not capture the relation between x and y, but rather indicate the \"prior probability\" of either word to be a hyponym or a hypernym. As a consequence, supervised methods are sensitive to the distribution of examples in a particular dataset, making them less reliable for real-world applications. Being motivated by linguistic hypotheses, and independent from training data, unsupervised measures were shown to be more robust. In this sense, unsupervised methods can still play a relevant role, especially if combined with supervised methods, in the decision whether the relation holds or not.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In our preliminary experiments, we noticed that the entropies of the targets and those of the contexts are not highly correlated, yielding a Spearman's correlation of up to 0.448 for window based spaces, and up to 0.097 for the dependency-based ones (p < 0.01).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We removed the entailment relation, which had too few instances, and conflated relations to coarse-grained relations (e.g. HasProperty and HasA into attribute).4 Lenci/Benotto includes pairs to which more than one relation is assigned, e.g. when x or y are polysemous, and re-", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We considered at least 10 measures, allowing scores slightly lower than 0.8 when others were unavailable.7 In the hypernymy dataset ofShwartz et al. (2016),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In our preliminary experiments we also trained other classifiers used in the distributional hypernymy detection literature (SVM and SVM+RBF kernel), that performed similarly. We report the results for logistic regression, since we use the prediction probabilities to measure average precision.9 The dataset is balanced between its two classes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Turney and Mohammad (2015) have also shown that unsupervised methods are more robust than supervised ones in a transfer-learning experiment, when the \"training data\" was used to tune their parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The authors would like to thank Ido Dagan, Alessandro Lenci, and Yuji Matsumoto for their help and advice. Vered Shwartz is partially supported by an Intel ICRI-CI grant, the Israel Science Foundation grant 880/12, and the German Research Foundation through the German-Israeli Project Cooperation (DIP, grant DA 1600/1-1). Enrico Santus is partially supported by HK PhD Fellowship Scheme under PF12-13656.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Distributional memory: A general framework for corpus-based semantics", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computational Linguistics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "673--721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni and Alessandro Lenci. 2010. Dis- tributional memory: A general framework for corpus-based semantics. Computational Linguis- tics, 36(4):673-721.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Proceedings of the gems 2011 workshop on geometrical models of natural language semantics. In How we BLESSed distributional semantic evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni and Alessandro Lenci. 2011. Proceed- ings of the gems 2011 workshop on geometrical models of natural language semantics. In How we BLESSed distributional semantic evaluation, pages 1-10. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The wacky wide web: a collection of very large linguistically processed web-crawled corpora. Language resources and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [], |
|
"last": "Bernardini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adriano", |
|
"middle": [], |
|
"last": "Ferraresi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eros", |
|
"middle": [], |
|
"last": "Zanchetta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "209--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni, Silvia Bernardini, Adriano Ferraresi, and Eros Zanchetta. 2009. The wacky wide web: a collection of very large linguistically pro- cessed web-crawled corpora. Language resources and evaluation, 43(3):209-226.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Entailment above the word level in distributional semantics", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raffaella", |
|
"middle": [], |
|
"last": "Bernardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ngoc-Quynh", |
|
"middle": [], |
|
"last": "Do", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chung-Chieh", |
|
"middle": [], |
|
"last": "Shan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "23--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Baroni, Raffaella Bernardi, Ngoc-Quynh Do, and Chung-chieh Shan. 2012. Entailment above the word level in distributional semantics. In Proceed- ings of the 13th Conference of the European Chap- ter of the Association for Computational Linguistics, pages 23-32. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Distributional models for semantic relations: A sudy on hyponymy and antonymy", |
|
"authors": [ |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Benotto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giulia Benotto. 2015. Distributional models for semantic relations: A sudy on hyponymy and antonymy. PhD Thesis, University of Pisa.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Extracting semantic representations from word co-occurrence statistics: A computational study. Behavior research methods", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph P", |
|
"middle": [], |
|
"last": "Bullinaria", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "39", |
|
"issue": "", |
|
"pages": "510--526", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John A Bullinaria and Joseph P Levy. 2007. Extracting semantic representations from word co-occurrence statistics: A computational study. Behavior re- search methods, 39(3):510-526.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Representing verbs with rich contexts: an evaluation on verb similarity", |
|
"authors": [ |
|
{ |
|
"first": "Emmanuele", |
|
"middle": [], |
|
"last": "Chersoni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philippe", |
|
"middle": [], |
|
"last": "Blache", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1967--1972", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emmanuele Chersoni, Enrico Santus, Alessandro Lenci, Philippe Blache, and Chu-Ren Huang. 2016. Representing verbs with rich contexts: an evaluation on verb similarity. In Proceedings of the 2016 Con- ference on Empirical Methods in Natural Language Processing, pages 1967-1972. Association for Com- putational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Word association norms, mutual information, and lexicography", |
|
"authors": [ |
|
{ |
|
"first": "Kenneth", |
|
"middle": [ |
|
"Ward" |
|
], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Hanks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Computational linguistics", |
|
"volume": "16", |
|
"issue": "1", |
|
"pages": "22--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenneth Ward Church and Patrick Hanks. 1990. Word association norms, mutual information, and lexicog- raphy. Computational linguistics, 16(1):22-29.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Context-theoretic semantics for natural language: an overview", |
|
"authors": [ |
|
{ |
|
"first": "Daoud", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Workshop on Geometrical Models of Natural Language Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "112--119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daoud Clarke. 2009. Context-theoretic semantics for natural language: an overview. In Proceedings of the Workshop on Geometrical Models of Natural Language Semantics, pages 112-119. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Recognizing textual entailment", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Ido Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sammons", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ido Dagan, Dan Roth, and Mark Sammons. 2013. Rec- ognizing textual entailment. Morgan & Claypool Publishers.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The statistics of word cooccurrences: word pairs and collocations. Dissertation", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [ |
|
"Evert" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Evert. 2005. The statistics of word cooccur- rences: word pairs and collocations. Dissertation.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Corpora and collocations. Corpus linguistics. An international handbook", |
|
"authors": [ |
|
{ |
|
"first": "Stefan", |
|
"middle": [ |
|
"Evert" |
|
], |
|
"last": "", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "223--233", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stefan Evert. 2008. Corpora and collocations. Corpus linguistics. An international handbook, 2:223-233.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Building a very large corpus of english obtained by web crawling: ukwac. Masters thesis", |
|
"authors": [ |
|
{ |
|
"first": "Adriano", |
|
"middle": [], |
|
"last": "Ferraresi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adriano Ferraresi. 2007. Building a very large corpus of english obtained by web crawling: ukwac. Mas- ters thesis, University of Bologna, Italy.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Distributional structure. Word", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Zellig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1954, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "146--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zellig S. Harris. 1954. Distributional structure. Word, 10(2-3):146-162.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Automatic acquisition of hyponyms from large text corpora", |
|
"authors": [ |
|
{ |
|
"first": "Marti", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Hearst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "The 15th International Conference on Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marti A. Hearst. 1992. Automatic acquisition of hy- ponyms from large text corpora. In COLING 1992 Volume 2: The 15th International Conference on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Exploiting image generality for lexical entailment detection", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Rimell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Vuli\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--124", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Laura Rimell, Ivan Vuli\u0107, and Stephen Clark. 2015. Exploiting image generality for lex- ical entailment detection. In Proceedings of the 53rd Annual Meeting of the Association for Compu- tational Linguistics and the 7th International Joint Conference on Natural Language Processing (Vol- ume 2: Short Papers), pages 119-124. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Directional distributional similarity for lexical inference", |
|
"authors": [ |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Kotlerman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Idan", |
|
"middle": [], |
|
"last": "Szpektor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maayan", |
|
"middle": [], |
|
"last": "Zhitomirsky-Geffet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Natural Language Engineering", |
|
"volume": "16", |
|
"issue": "04", |
|
"pages": "359--389", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lili Kotlerman, Ido Dagan, Idan Szpektor, and Maayan Zhitomirsky-Geffet. 2010. Directional distribu- tional similarity for lexical inference. Natural Lan- guage Engineering, 16(04):359-389.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Identifying hypernyms in distributional semantic spaces", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Benotto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "75--79", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Lenci and Giulia Benotto. 2012. Identi- fying hypernyms in distributional semantic spaces. In *SEM 2012: The First Joint Conference on Lexi- cal and Computational Semantics -Volume 1: Pro- ceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth Interna- tional Workshop on Semantic Evaluation (SemEval 2012), pages 75-79. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Dependencybased word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "302--308", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- based word embeddings. In Proceedings of the 52nd Annual Meeting of the Association for Computa- tional Linguistics (Volume 2: Short Papers), pages 302-308. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Improving distributional similarity with lessons learned from word embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "211--225", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015a. Improving distributional similarity with lessons learned from word embeddings. Transactions of the Association for Computational Linguistics, 3:211- 225.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Do supervised distributional methods really learn lexical inference relations?", |
|
"authors": [ |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Remus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "970--976", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omer Levy, Steffen Remus, Chris Biemann, and Ido Dagan. 2015b. Do supervised distributional meth- ods really learn lexical inference relations? In Pro- ceedings of the 2015 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 970-976. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "An information-theoretic definition of similarity", |
|
"authors": [ |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "ICML", |
|
"volume": "98", |
|
"issue": "", |
|
"pages": "296--304", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dekang Lin. 1998. An information-theoretic defini- tion of similarity. ICML, 98:296-304.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Cor- rado, and Jeff Dean. 2013. Distributed representa- tions of words and phrases and their compositional- ity. In Advances in neural information processing systems, pages 3111-3119.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A graph-based algorithm for inducing lexical taxonomies from scratch", |
|
"authors": [ |
|
{ |
|
"first": "Roberto", |
|
"middle": [], |
|
"last": "Navigli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Velardi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Faralli", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "1872--1877", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roberto Navigli, Paola Velardi, and Stefano Faralli. 2011. A graph-based algorithm for inducing lex- ical taxonomies from scratch. In Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence, volume 11, pages 1872- 1877.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Dependency-based construction of semantic space models", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Pad\u00f3", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Computational Linguistics", |
|
"volume": "33", |
|
"issue": "2", |
|
"pages": "161--199", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sebastian Pad\u00f3 and Mirella Lapata. 2007. Dependency-based construction of semantic space models. Computational Linguistics, 33(2):161-199.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the 2014 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Distributional lexical entailment by topic coherence", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Rimell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "511--519", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Rimell. 2014. Distributional lexical entailment by topic coherence. In Proceedings of the 14th Con- ference of the European Chapter of the Association for Computational Linguistics, pages 511-519. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Relations such as hypernymy: Identifying and exploiting hearst patterns in distributional vectors for lexical entailment", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2163--2172", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Roller and Katrin Erk. 2016. Relations such as hypernymy: Identifying and exploiting hearst pat- terns in distributional vectors for lexical entailment. In Proceedings of the 2016 Conference on Empiri- cal Methods in Natural Language Processing, pages 2163-2172. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Inclusive yet selective: Supervised distributional hypernymy detection", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Roller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katrin", |
|
"middle": [], |
|
"last": "Erk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gemma", |
|
"middle": [], |
|
"last": "Boleda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1025--1036", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Roller, Katrin Erk, and Gemma Boleda. 2014. Inclusive yet selective: Supervised distributional hy- pernymy detection. In Proceedings of COLING 2014, the 25th International Conference on Compu- tational Linguistics: Technical Papers, pages 1025- 1036. Dublin City University and Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Introduction to modern information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Gerard", |
|
"middle": [], |
|
"last": "Salton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Mcgill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1986, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gerard Salton and Michael J. McGill. 1986. Introduc- tion to modern information retrieval. McGraw-Hill, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Chasing hypernyms in vector spaces with entropy", |
|
"authors": [ |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sabine", |
|
"middle": [], |
|
"last": "Schulte Im Walde", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "38--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrico Santus, Alessandro Lenci, Qin Lu, and Sabine Schulte im Walde. 2014. Chasing hypernyms in vector spaces with entropy. In Proceedings of the 14th Conference of the European Chapter of the As- sociation for Computational Linguistics, volume 2: Short Papers, pages 38-42. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Proceedings of the 4th workshop on linked data in linguistics: Resources and applications. In EVALution 1.0: an Evolving Semantic Dataset for Training and Evaluation of Distributional Semantic Models", |
|
"authors": [ |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frances", |
|
"middle": [], |
|
"last": "Yung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "64--69", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrico Santus, Frances Yung, Alessandro Lenci, and Chu-Ren Huang. 2015. Proceedings of the 4th workshop on linked data in linguistics: Resources and applications. In EVALution 1.0: an Evolving Se- mantic Dataset for Training and Evaluation of Dis- tributional Semantic Models, pages 64-69. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Nine features in a random forest to learn taxonomical semantic relations", |
|
"authors": [ |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tin-Shing", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4557--4564", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrico Santus, Alessandro Lenci, Tin-Shing Chiu, Qin Lu, and Chu-Ren Huang. 2016a. Nine features in a random forest to learn taxonomical semantic re- lations. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pages 4557-4564. European Lan- guage Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Unsupervised measure of word similarity: How to outperform cooccurrence and vector cosine in vsms", |
|
"authors": [ |
|
{ |
|
"first": "Enrico", |
|
"middle": [], |
|
"last": "Santus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tin-Shing", |
|
"middle": [], |
|
"last": "Chiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qin", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chu-Ren", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Thirtieth AAAI Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4260--4261", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Enrico Santus, Alessandro Lenci, Tin-Shing Chiu, Qin Lu, and Chu-Ren Huang. 2016b. Unsupervised measure of word similarity: How to outperform co- occurrence and vector cosine in vsms. In Thirtieth AAAI Conference on Artificial Intelligence, pages 4260-4261.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Improving hypernymy detection with an integrated path-based and distributional method", |
|
"authors": [ |
|
{ |
|
"first": "Vered", |
|
"middle": [], |
|
"last": "Shwartz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2389--2398", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vered Shwartz, Yoav Goldberg, and Ido Dagan. 2016. Improving hypernymy detection with an integrated path-based and distributional method. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 2389-2398. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Learning syntactic patterns for automatic hypernym discovery", |
|
"authors": [ |
|
{ |
|
"first": "Rion", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "17", |
|
"issue": "", |
|
"pages": "1297--1304", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rion Snow, Daniel Jurafsky, and Andrew Y. Ng. 2005. Learning syntactic patterns for automatic hypernym discovery. In Advances in Neural Information Pro- cessing Systems 17, pages 1297-1304. MIT Press.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Semantic taxonomy induction from heterogenous evidence", |
|
"authors": [ |
|
{ |
|
"first": "Rion", |
|
"middle": [], |
|
"last": "Snow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Ng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "801--808", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rion Snow, Daniel Jurafsky, and Andrew Y. Ng. 2006. Semantic taxonomy induction from heterogenous evidence. In Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computa- tional Linguistics, pages 801-808. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Experiments with three approaches to recognizing lexical entailment", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Turney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Natural Language Engineering", |
|
"volume": "21", |
|
"issue": "03", |
|
"pages": "437--476", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter D. Turney and Saif M. Mohammad. 2015. Ex- periments with three approaches to recognizing lex- ical entailment. Natural Language Engineering, 21(03):437-476.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "A general framework for distributional similarity", |
|
"authors": [ |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Weeds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weir", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julie Weeds and David Weir. 2003. A general frame- work for distributional similarity. In Proceedings of the 2003 Conference on Empirical Methods in Nat- ural Language Processing, pages 81-88.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Learning to distinguish hypernyms and co-hyponyms", |
|
"authors": [ |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Weeds", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daoud", |
|
"middle": [], |
|
"last": "Clarke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Reffin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Weir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bill", |
|
"middle": [], |
|
"last": "Keller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of COL-ING 2014, the 25th International Conference on Computational Linguistics: Technical Papers", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2249--2259", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julie Weeds, Daoud Clarke, Jeremy Reffin, David Weir, and Bill Keller. 2014. Learning to distinguish hy- pernyms and co-hyponyms. In Proceedings of COL- ING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pages 2249-2259. Dublin City University and Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"type_str": "figure", |
|
"num": null, |
|
"uris": null, |
|
"text": "Figure 1 for an illustration." |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>dataset</td><td>relations</td><td>#instances</td><td>size</td></tr><tr><td/><td>hypernym</td><td>1,337</td><td/></tr><tr><td/><td>meronym</td><td>2,943</td><td/></tr><tr><td/><td>coordination</td><td>3,565</td><td/></tr><tr><td>BLESS</td><td>event attribute</td><td>3,824 2,731</td><td>26,554</td></tr><tr><td/><td>random-n</td><td>6,702</td><td/></tr><tr><td/><td>random-v</td><td>3,265</td><td/></tr><tr><td/><td>random-j</td><td>2,187</td><td/></tr><tr><td/><td>hypernym</td><td>3,637</td><td/></tr><tr><td>EVALution</td><td>meronym attribute</td><td>1,819 2,965</td><td>13,465 3</td></tr><tr><td/><td>synonym</td><td>1,888</td><td/></tr><tr><td/><td>antonym</td><td>3,156</td><td/></tr><tr><td/><td>hypernym</td><td>1,933</td><td/></tr><tr><td>Lenci/Benotto</td><td>synonym</td><td>1,311</td><td>5,010</td></tr><tr><td/><td>antonym</td><td>1,766</td><td/></tr><tr><td>Weeds</td><td>hypernym coordination</td><td>1,469 1,459</td><td>2,928</td></tr><tr><td>Table 1</td><td/><td/><td/></tr><tr><td>On the other</td><td/><td/><td/></tr></table>", |
|
"text": "The semantic relations, number of instances in each relation, and size of each dataset.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>relation</td><td>measure</td><td colspan=\"2\">context type feature weighting</td></tr><tr><td/><td>cosWeeds</td><td>dep</td><td>ppmi</td></tr><tr><td>meronym</td><td>Weeds</td><td>dep / joint</td><td>ppmi</td></tr><tr><td/><td>ClarkeDE</td><td>dep / joint</td><td>ppmi / freq</td></tr><tr><td/><td>APSyn</td><td>joint</td><td>freq</td></tr><tr><td>attribute</td><td>cosine Lin</td><td>joint dep</td><td>freq ppmi</td></tr><tr><td/><td>cosine</td><td>dep</td><td>ppmi</td></tr><tr><td>antonym</td><td>SLQS</td><td>-</td><td>-</td></tr><tr><td/><td>SLQS row</td><td>joint</td><td>(freq/ppmi/plmi)</td></tr><tr><td>synonym</td><td>SLQS row/SLQS row sub</td><td>dep</td><td>ppmi</td></tr><tr><td/><td>invCL</td><td>win2/5/5d</td><td>freq</td></tr><tr><td>coordination</td><td/><td>-</td><td/></tr></table>", |
|
"text": "Intersection of datasets' top-performing measures when discriminating between hypernymy and each other relation.", |
|
"type_str": "table", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |