|
{ |
|
"paper_id": "S13-1014", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:42:15.953317Z" |
|
}, |
|
"title": "DeepPurple: Lexical, String and Affective Feature Fusion for Sentence-Level Semantic Similarity Estimation", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Malandrakis", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Signal Analysis and Interpretation Laboratory (SAIL)", |
|
"institution": "USC", |
|
"location": { |
|
"postCode": "90089", |
|
"settlement": "Los Angeles", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Elias", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Crete", |
|
"location": { |
|
"postCode": "73100", |
|
"settlement": "Chania", |
|
"country": "Greece" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Vassiliki", |
|
"middle": [], |
|
"last": "Prokopi", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Crete", |
|
"location": { |
|
"postCode": "73100", |
|
"settlement": "Chania", |
|
"country": "Greece" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Alexandros", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Technical University of Crete", |
|
"location": { |
|
"postCode": "73100", |
|
"settlement": "Chania", |
|
"country": "Greece" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shrikanth", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Signal Analysis and Interpretation Laboratory (SAIL)", |
|
"institution": "USC", |
|
"location": { |
|
"postCode": "90089", |
|
"settlement": "Los Angeles", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper describes our submission for the *SEM shared task of Semantic Textual Similarity. We estimate the semantic similarity between two sentences using regression models with features: 1) n-gram hit rates (lexical matches) between sentences, 2) lexical semantic similarity between non-matching words, 3) string similarity metrics, 4) affective content similarity and 5) sentence length. Domain adaptation is applied in the form of independent models and a model selection strategy achieving a mean correlation of 0.47.", |
|
"pdf_parse": { |
|
"paper_id": "S13-1014", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper describes our submission for the *SEM shared task of Semantic Textual Similarity. We estimate the semantic similarity between two sentences using regression models with features: 1) n-gram hit rates (lexical matches) between sentences, 2) lexical semantic similarity between non-matching words, 3) string similarity metrics, 4) affective content similarity and 5) sentence length. Domain adaptation is applied in the form of independent models and a model selection strategy achieving a mean correlation of 0.47.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Text semantic similarity estimation has been an active research area, thanks to a variety of potential applications and the wide availability of data afforded by the world wide web. Semantic textual similarity (STS) estimates can be used for information extraction (Szpektor and Dagan, 2008) , question answering (Harabagiu and Hickl, 2006) and machine translation (Mirkin et al., 2009) . Term-level similarity has been successfully applied to problems like grammar induction (Meng and Siu, 2002) and affective text categorization (Malandrakis et al., 2011) . In this work, we built on previous research and our submission to SemEval'2012 (Malandrakis et al., 2012) to create a sentence-level STS model for the shared task of *SEM 2013 (Agirre et al., 2013) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 291, |
|
"text": "(Szpektor and Dagan, 2008)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 313, |
|
"end": 340, |
|
"text": "(Harabagiu and Hickl, 2006)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 365, |
|
"end": 386, |
|
"text": "(Mirkin et al., 2009)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 476, |
|
"end": 496, |
|
"text": "(Meng and Siu, 2002)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 531, |
|
"end": 557, |
|
"text": "(Malandrakis et al., 2011)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 665, |
|
"text": "(Malandrakis et al., 2012)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 736, |
|
"end": 757, |
|
"text": "(Agirre et al., 2013)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Semantic similarity between words has been well researched, with a variety of knowledge-based (Miller, 1990; Budanitsky and Hirst, 2006) and corpus-based (Baroni and Lenci, 2010; Potamianos, 2010) metrics proposed. Moving to sentences increases the complexity exponentially and as a result has led to measurements of similarity at various levels: lexical (Malakasiotis and Androutsopoulos, 2007) , syntactic (Malakasiotis, 2009; Zanzotto et al., 2009) , and semantic (Rinaldi et al., 2003; Bos and Markert, 2005) . Machine translation evaluation metrics can be used to estimate lexical level similarity (Finch et al., 2005; Perez and Alfonseca, 2005) , including BLEU (Papineni et al., 2002) , a metric using word n-gram hit rates. The pilot task of sentence STS in SemEval 2012 (Agirre et al., 2012) showed a similar trend towards multi-level similarity, with the top performing systems utilizing large amounts of partial similarity metrics and domain adaptation (the use of separate models for each input domain) (B\u00e4r et al., 2012; \u0160ari\u0107 et al., 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 108, |
|
"text": "(Miller, 1990;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 109, |
|
"end": 136, |
|
"text": "Budanitsky and Hirst, 2006)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 178, |
|
"text": "(Baroni and Lenci, 2010;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 395, |
|
"text": "(Malakasiotis and Androutsopoulos, 2007)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 408, |
|
"end": 428, |
|
"text": "(Malakasiotis, 2009;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 429, |
|
"end": 451, |
|
"text": "Zanzotto et al., 2009)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 489, |
|
"text": "(Rinaldi et al., 2003;", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 490, |
|
"end": 512, |
|
"text": "Bos and Markert, 2005)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 623, |
|
"text": "(Finch et al., 2005;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 650, |
|
"text": "Perez and Alfonseca, 2005)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 691, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 779, |
|
"end": 800, |
|
"text": "(Agirre et al., 2012)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1015, |
|
"end": 1033, |
|
"text": "(B\u00e4r et al., 2012;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1034, |
|
"end": 1053, |
|
"text": "\u0160ari\u0107 et al., 2012)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our approach is originally motivated by BLEU and primarily utilizes \"hard\" and \"soft\" n-gram hit rates to estimate similarity. Compared to last year, we utilize different alignment strategies (to decide which n-grams should be compared with which). We also include string similarities (at the token and character level) and similarity of affective content, expressed through the difference in sentence arousal and valence ratings. Finally we added domain adaptation: the creation of separate models per domain and a strategy to select the most appropriate model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our model is based upon that submitted for the same task in 2012 (Malandrakis et al., 2012) . To estimate semantic similarity metrics we use a supervised model with features extracted using corpus-based word-level similarity metrics. To combine these metrics into a sentence-level similarity score we use a modification of BLEU (Papineni et al., 2002) that utilizes word-level semantic similarities, string level comparisons and comparisons of affective content, detailed below.", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 91, |
|
"text": "(Malandrakis et al., 2012)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 328, |
|
"end": 351, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Co-occurrence-based. The semantic similarity between two words, w i and w j , is estimated as their pointwise mutual information (Church and Hanks, 1990) : I(i, j) = logp (i,j) p(i)p(j) , wherep(i) andp(j) are the occurrence probabilities of w i and w j , respectively, while the probability of their co-occurrence is denoted byp(i, j). In our previous participation in SemEval12-STS task (Malandrakis et al., 2012) we employed a modification of the pointwise mutual information based on the maximum sense similarity assumption (Resnik, 1995) and the minimization of the respective error in similarity estimation. In particular, exponential weights \u03b1 were introduced in order to reduce the overestimation of denominator probabilities. The modified metric I a (i, j), is defined as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 153, |
|
"text": "(Church and Hanks, 1990)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 176, |
|
"text": "(i,j)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 415, |
|
"text": "(Malandrakis et al., 2012)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 528, |
|
"end": 542, |
|
"text": "(Resnik, 1995)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word level semantic similarity", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "I a (i, j) = 1 2 logp (i, j) p \u03b1 (i)p(j) + logp (i, j) p(i)p \u03b1 (j)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word level semantic similarity", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": ". 1The weight \u03b1 was estimated on the corpus of (Iosif and Potamianos, 2012) in order to maximize word sense coverage in the semantic neighborhood of each word. The I a (i, j) metric using the estimated value of \u03b1 = 0.8 was shown to significantly outperform I(i, j) and to achieve state-of-the-art results on standard semantic similarity datasets (Rubenstein and Goodenough, 1965; Miller and Charles, 1998; Finkelstein et al., 2002) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 346, |
|
"end": 379, |
|
"text": "(Rubenstein and Goodenough, 1965;", |
|
"ref_id": "BIBREF31" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 405, |
|
"text": "Miller and Charles, 1998;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 431, |
|
"text": "Finkelstein et al., 2002)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Word level semantic similarity", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "The fundamental assumption behind context-based metrics is that similarity of context implies similarity of meaning (Harris, 1954) . A contextual window of size 2H + 1 words is centered on the word of interest w i and lexical features are extracted. For every instance of w i in the corpus the H words left and right of w i formulate a feature vector v i . For a given value of H the context-based semantic similarity between two words, w i and w j , is computed as the cosine of their feature vectors:", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 130, |
|
"text": "(Harris, 1954)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context-based:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Q H (i, j) = v i .v j ||v i || ||v j || .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context-based:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The elements of feature vectors can be weighted according various schemes [ (Iosif and Potamianos, 2010) ], while, here we use a binary scheme.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 104, |
|
"text": "(Iosif and Potamianos, 2010)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context-based:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Network-based: The aforementioned similarity metrics were used for the definition of a semantic network . A number of similarity metrics were proposed under either the attributional similarity (Turney, 2006) or the maximum sense similarity (Resnik, 1995) assumptions of lexical semantics 1 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 193, |
|
"end": 207, |
|
"text": "(Turney, 2006)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 254, |
|
"text": "(Resnik, 1995)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context-based:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To utilize word-level semantic similarities in the sentence-level task we use a modified version of BLEU (Papineni et al., 2002) . The model works in two passes: the first pass identifies exact matches (similar to baseline BLEU), the second pass compares non-matched terms using semantic similarity. Non-matched terms from the hypothesis sentence are compared with all terms of the reference sentence (regardless of whether they were matched during the first pass). In the case of bigram and higher order terms, the process is applied recursively: the bigrams are decomposed into two words and the similarity between them is estimated by applying the same method to the words. All word similarity metrics used are peak-to-peak normalized in the [0,1] range, so they serve as a \"degree-of-match\". The semantic similarity scores from term pairs are summed (just like n-gram hits) to obtain a BLEU-like hit-rate. Alignment is performed via maximum similarity: we iterate on the hypothesis n-grams, left-to-right, and compare each with the most similar n-gram in the reference. The features produced by this process are \"soft\" hit-rates (for 1-, 2-, 3-, 4-grams) 2 . We also use the \"hard\" hit rates produced by baseline BLEU as features of the final model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 128, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence level similarities", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "We use the following string-based similarity features: 1) Longest Common Subsequence Similarity (LCSS) (Lin and Och, 2004) based on the Longest Common Subsequence (LCS) character-based dy-namic programming algorithm. LCSS represents the length of the longest string (or strings) that is a substring (or are substrings) of two or more strings. 2) Skip bigram co-occurrence measures the overlap of skip-bigrams between two sentences or phrases. A skip-bigram is defined as any pair of words in the sentence order, allowing for arbitrary gaps between words (Lin and Och, 2004) . 3) Containment is defined as the percentage of a sentence that is contained in another sentence. It is a number between 0 and 1, where 1 means the hypothesis sentence is fully contained in the reference sentence (Broder, 1997) . We express containment as the amount of ngrams of a sentence contained in another. The containment metric is not symmetric and is calculated as: c(X, Y ) = |S(X) \u2229 S(Y )|/S(X), where S(X) and S(Y ) are all the n-grams of sentences X and Y respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 103, |
|
"end": 122, |
|
"text": "(Lin and Och, 2004)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 554, |
|
"end": 573, |
|
"text": "(Lin and Och, 2004)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 802, |
|
"text": "(Broder, 1997)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "String similarities", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We used the method proposed in (Malandrakis et al., 2011) to estimate affective features. Continuous (valence and arousal) ratings in [\u22121, 1] of any term are represented as a linear combination of a function of its semantic similarities to a set of seed words and the affective ratings of these words, as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 31, |
|
"end": 57, |
|
"text": "(Malandrakis et al., 2011)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affective similarity", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "v(w j ) = a 0 + N i=1 a i v(w i ) d ij ,", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Affective similarity", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where w j is the term we mean to characterize, w 1 ...w N are the seed words, v(w i ) is the valence rating for seed word w i , a i is the weight corresponding to seed word w i (that is estimated as described next), d ij is a measure of semantic similarity between w i and w j (for the purposes of this work, cosine similarity between context vectors is used). The weights a i are estimated over the Affective norms for English Words (ANEW) (Bradley and Lang, 1999) corpus. Using this model we generate affective ratings for every content word (noun, verb, adjective or adverb) of every sentence. We assume that these can adequately describe the affective content of the sentences. To create an \"affective similarity metric\" we use the difference of means of the word affective ratings between two sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 441, |
|
"end": 465, |
|
"text": "(Bradley and Lang, 1999)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affective similarity", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "d affect = 2 \u2212 |\u00b5(v(s 1 )) \u2212 \u00b5(v(s 2 ))|", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Affective similarity", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where \u00b5(v(s i )) the mean of content word ratings included in sentence i.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Affective similarity", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The aforementioned features are combined using one of two possible models. The first model is a Multiple Linear Regression (MLR) model", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fusion", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "D L = a 0 + k n=1 a n f k ,", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Fusion", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "whereD L is the estimated similarity, f k are the unsupervised semantic similarity metrics and a n are the trainable parameters of the model. The second model is motivated by an assumption of cognitive scaling of similarity scores: we expect that the perception of hit rates is non-linearly affected by the length of the sentences. We call this the hierarchical fusion scheme. It is a combination of (overlapping) MLR models, each matching a range of sentence lengths. The first model D L1 is trained with sentences with length up to l 1 , i.e., l \u2264 l 1 , the second model D L2 up to length l 2 etc. During testing, sentences with length l \u2208 [1, l 1 ] are decoded with D L1 , sentences with length l \u2208 (l 1 , l 2 ] with model D L2 etc. Each of these partial models is a linear fusion model as shown in (4). In this work, we use four models with l 1 = 10, l 2 = 20, l 3 = 30, l 4 = \u221e.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fusion", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "Domain adaptation is employed, by creating separate models per domain (training data source). Beyond that, we also create a unified model, trained on all data to be used as a fallback if an appropriate model can not be decided upon during evaluation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Fusion", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "Initially all sentences are pre-processed by the CoreNLP (Finkel et al., 2005; Toutanova et al., 2003) suite of tools, a process that includes named entity recognition, normalization, part of speech tagging, lemmatization and stemming. We evaluated multiple types of preprocessing per unsupervised metric and chose different ones depending on the metric. Word-level semantic similarities, used for soft comparisons and affective feature extraction, were computed over a corpus of 116 million web snippets collected by posing one query for every word in the Aspell spellchecker (asp, ) vocabulary to the Yahoo! search engine. Word-level emotional ratings in continuous valence and arousal scales were produced by a model trained on the ANEW dataset and using contextual similarities. Finally, string similarities were calculated over the original unmodified sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 57, |
|
"end": 78, |
|
"text": "(Finkel et al., 2005;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 79, |
|
"end": 102, |
|
"text": "Toutanova et al., 2003)", |
|
"ref_id": "BIBREF33" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Next, results are reported in terms of correlation between the generated scores and the ground truth, for each corpus in the shared task, as well as their weighted mean. Feature selection is applied to the large candidate feature set using a wrapperbased backward selection approach on the training data.The final feature set contains 15 features: soft hit rates calculated over content word 1-to 4grams (4 features), soft hit rates calculated over unigrams per part-of-speech, for adjectives, nouns, adverbs, verbs (4 features), BLEU unigram hit rates for all words and content words (2 features), skip and containment similarities, containment normalized by sum of sentence lengths or product of sentence lengths (3 features) and affective similarities for arousal and valence (2 features).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Domain adaptation methods are the only difference between the three submitted runs. For all three runs we train one linear model per training set and a fallback model. For the first run, dubbed linear, the fallback model is linear and model selection during evaluation is performed by file name, therefore results for the OnWN set are produced by a model trained with OnWN data, while the rest are produced by the fallback model. The second run, dubbed length, uses a hierarchical fallback model and model selection is performed by file name. The third run, dubbed adapt, uses the same models as the first run and each test set is assigned to a model (i.e., the fallback model is never used). The test setmodel (training) mapping for this run is: OnWN \u2192 OnWN, headlines \u2192 SMTnews, SMT \u2192 Europarl and FNWN \u2192 OnWN. Table 2 . Our best run was the simplest one, using a purely linear model and effectively no adaptation. Adding a more aggressive adaptation strategy improved results in the FNWN and SMT sets, so there is definitely some potential, however the improvement observed is nowhere near that observed in the training data or the same task of SemEval 2012. We have to question whether this improvement is an artifact of the rating distributions of these two sets (SMT contains virtually only high ratings, FNWN contains virtually only low ratings): such wild mismatches in priors among training and test sets can be mitigated using more elaborate machine learning algorithms (rather than employing better semantic similarity features or algorithms). Overall the system performs well in the two sets containing large similarity rating ranges.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 813, |
|
"end": 820, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experimental Procedure and Results", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We have improved over our previous model of sentence semantic similarity. The inclusion of stringbased similarities and more so of affective content measures proved significant, but domain adaptation provided mixed results. While expanding the model to include more layers of similarity estimates is clearly a step in the right direction, further work is required to include even more layers. Using syntactic information and more levels of abstraction (e.g. concepts) are obvious next steps.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The first four authors have been partially funded by the PortDial project (Language Resources for Portable Multilingual Spoken Dialog Systems) supported by the EU Seventh Framework Programme (FP7), grant number 296170.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The network-based metrics were applied only during the training phase of the shared task, due to time limitations. They exhibited almost identical performance as the metric defined by (1), which was used in the test runs.2 Note that the features are computed twice on each sentence pair and then averaged.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Semeval-2012 task 6: A pilot on semantic textual similarity", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "385--393", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Agirre, D. Cer, M. Diab, and A. Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pilot on semantic tex- tual similarity. In Proc. SemEval, pages 385-393.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "*sem 2013 shared task: Semantic textual similarity, including a pilot on typed-similarity", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weiwei", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proc. *SEM", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, Aitor Gonzalez- Agirre, and Weiwei Guo. 2013. *sem 2013 shared task: Semantic textual similarity, including a pilot on typed-similarity. In Proc. *SEM.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Ukp: Computing semantic textual similarity by combining multiple content similarity measures", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "B\u00e4r", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Biemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Zesch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "435--440", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. B\u00e4r, C. Biemann, I. Gurevych, and T. Zesch. 2012. Ukp: Computing semantic textual similarity by com- bining multiple content similarity measures. In Proc. SemEval, pages 435-440.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Distributional memory: A general framework for corpus-based semantics", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Baroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Lenci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Computational Linguistics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "673--721", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Baroni and A. Lenci. 2010. Distributional mem- ory: A general framework for corpus-based semantics. Computational Linguistics, 36(4):673-721.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Recognising textual entailment with logical inference", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Bos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Markert", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. Bos and K. Markert. 2005. Recognising textual en- tailment with logical inference. In Proceedings of the Human Language Technology Conference and Confer- ence on Empirical Methods in Natural Language Pro- cessing, page 628635.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Affective norms for English words (ANEW): Stimuli, instruction manual and affective ratings. Technical report C-1", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Bradley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Lang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1999, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M. Bradley and P. Lang. 1999. Affective norms for En- glish words (ANEW): Stimuli, instruction manual and affective ratings. Technical report C-1. The Center for Research in Psychophysiology, University of Florida.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On the resemblance and containment of documents", |
|
"authors": [ |
|
{ |
|
"first": "Andrei", |
|
"middle": [ |
|
"Z" |
|
], |
|
"last": "Broder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Compression and Complexity of Sequences", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "21--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andrei Z. Broder. 1997. On the resemblance and con- tainment of documents. In In Compression and Com- plexity of Sequences (SEQUENCES97, pages 21-29. IEEE Computer Society.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Evaluating WordNetbased measures of semantic distance", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Budanitsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Hirst", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "13--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Budanitsky and G. Hirst. 2006. Evaluating WordNet- based measures of semantic distance. Computational Linguistics, 32:13-47.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Word association norms, mutual information, and lexicography", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Church", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Hanks", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Computational Linguistics", |
|
"volume": "16", |
|
"issue": "1", |
|
"pages": "22--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. W. Church and P. Hanks. 1990. Word association norms, mutual information, and lexicography. Com- putational Linguistics, 16(1):22-29.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Using machine translation evaluation techniques to determine sentence-level semantic equivalence", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Finch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Sumita", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 3rd International Workshop on Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A. Finch, S. Y. Hwang, and E. Sumita. 2005. Using ma- chine translation evaluation techniques to determine sentence-level semantic equivalence. In Proceedings of the 3rd International Workshop on Paraphrasing, page 1724.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Incorporating non-local information into information extraction systems by gibbs sampling", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Finkel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Grenager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 43rd Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "363--370", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. R. Finkel, T. Grenager, and C. D. Manning. 2005. In- corporating non-local information into information ex- traction systems by gibbs sampling. In Proceedings of the 43rd Annual Meeting on Association for Computa- tional Linguistics, pages 363-370.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Placing search in context: The concept revisited", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Finkelstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Gabrilovich", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Matias", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Rivlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Solan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Wolfman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Ruppin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACM Transactions on Information Systems", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "116--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "L. Finkelstein, E. Gabrilovich, Y. Matias, E. Rivlin, Z. Solan, G. Wolfman, and E. Ruppin. 2002. Plac- ing search in context: The concept revisited. ACM Transactions on Information Systems, 20(1):116-131.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Methods for Using Textual Entailment in Open-Domain Question Answering", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Harabagiu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Hickl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "905--912", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Harabagiu and A. Hickl. 2006. Methods for Us- ing Textual Entailment in Open-Domain Question An- swering. In Proceedings of the 21st International Con- ference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguis- tics, pages 905-912.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Distributional structure. Word", |
|
"authors": [ |
|
{ |
|
"first": "Z", |
|
"middle": [], |
|
"last": "Harris", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1954, |
|
"venue": "", |
|
"volume": "10", |
|
"issue": "", |
|
"pages": "146--162", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Z. Harris. 1954. Distributional structure. Word, 10(23):146-162.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Unsupervised semantic similarity computation between terms using web documents", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "22", |
|
"issue": "11", |
|
"pages": "1637--1647", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Iosif and A. Potamianos. 2010. Unsupervised seman- tic similarity computation between terms using web documents. IEEE Transactions on Knowledge and Data Engineering, 22(11):1637-1647.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Semsim: Resources for normalized semantic similarity computation using lexical networks", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. Eighth International Conference on Language Resources and Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3499--3504", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Iosif and A. Potamianos. 2012. Semsim: Resources for normalized semantic similarity computation using lexical networks. In Proc. Eighth International Con- ference on Language Resources and Evaluation, pages 3499-3504.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Similarity Computation Using Semantic Networks Created From Web-Harvested Data", |
|
"authors": [ |
|
{ |
|
"first": "Elias", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandros", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Natural Language Engineering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elias Iosif and Alexandros Potamianos. 2013. Similarity Computation Using Semantic Networks Created From Web-Harvested Data. Natural Language Engineering, (submitted).", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Semantic similarity computation for abstract and concrete nouns using network-based distributional semantic models", |
|
"authors": [ |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Giannoudaki", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Zervanou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "10th International Conference on Computational Semantics (IWCS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "328--334", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "E. Iosif, A. Potamianos, M. Giannoudaki, and K. Zer- vanou. 2013. Semantic similarity computation for ab- stract and concrete nouns using network-based distri- butional semantic models. In 10th International Con- ference on Computational Semantics (IWCS), pages 328-334.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Automatic evaluation of machine translation quality using longest common subsequence and skip-bigram statistics", |
|
"authors": [ |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Franz Josef", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Proceedings of the 42nd Annual Meeting on Association for Computational Linguistics, ACL '04", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chin-Yew Lin and Franz Josef Och. 2004. Automatic evaluation of machine translation quality using longest common subsequence and skip-bigram statistics. In Proceedings of the 42nd Annual Meeting on Associa- tion for Computational Linguistics, ACL '04, Strouds- burg, PA, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Learning textual entailment using svms and string similarity measures", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Malakasiotis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Androutsopoulos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Malakasiotis and I. Androutsopoulos. 2007. Learn- ing textual entailment using svms and string similar- ity measures. In Proceedings of of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pages 42-47.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Paraphrase recognition using machine learning to combine similarity measures", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Malakasiotis", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 47th Annual Meeting of ACL and the 4th Int. Joint Conference on Natural Language Processing of AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "42--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Malakasiotis. 2009. Paraphrase recognition using ma- chine learning to combine similarity measures. In Pro- ceedings of the 47th Annual Meeting of ACL and the 4th Int. Joint Conference on Natural Language Pro- cessing of AFNLP, pages 42-47.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Kernel models for affective lexicon creation", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Malandrakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proc. Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2977--2980", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Malandrakis, A. Potamianos, E. Iosif, and S. Narayanan. 2011. Kernel models for affec- tive lexicon creation. In Proc. Interspeech, pages 2977-2980.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "DeepPurple: Estimating sentence semantic similarity using n-gram regression models and web snippets", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Malandrakis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Iosif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Potamianos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. Sixth International Workshop on Semantic Evaluation (SemEval) -The First Joint Conference on Lexical and Computational Semantics (*SEM)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "565--570", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. Malandrakis, E. Iosif, and A. Potamianos. 2012. DeepPurple: Estimating sentence semantic similarity using n-gram regression models and web snippets. In Proc. Sixth International Workshop on Semantic Eval- uation (SemEval) -The First Joint Conference on Lexical and Computational Semantics (*SEM), pages 565-570.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Semi-automatic acquisition of semantic structures for understanding domainspecific natural language queries", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Meng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K.-C", |
|
"middle": [], |
|
"last": "Siu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "IEEE Transactions on Knowledge and Data Engineering", |
|
"volume": "14", |
|
"issue": "1", |
|
"pages": "172--181", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Meng and K.-C. Siu. 2002. Semi-automatic acquisi- tion of semantic structures for understanding domain- specific natural language queries. IEEE Transactions on Knowledge and Data Engineering, 14(1):172-181.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Contextual correlates of semantic similarity. Language and Cognitive Processes", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W", |
|
"middle": [], |
|
"last": "Charles", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1998, |
|
"venue": "", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "1--28", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Miller and W. Charles. 1998. Contextual correlates of semantic similarity. Language and Cognitive Pro- cesses, 6(1):1-28.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Wordnet: An on-line lexical database", |
|
"authors": [ |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "International Journal of Lexicography", |
|
"volume": "3", |
|
"issue": "4", |
|
"pages": "235--312", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "G. Miller. 1990. Wordnet: An on-line lexical database. International Journal of Lexicography, 3(4):235-312.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Source-language entailment modeling for translating unknown terms", |
|
"authors": [ |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Mirkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Cancedda", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Dymetman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Idan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 47th Annual Meeting of ACL and the 4th Int. Joint Conference on Natural Language Processing of AFNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "791--799", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S. Mirkin, L. Specia, N. Cancedda, I. Dagan, M. Dymet- man, and S. Idan. 2009. Source-language entailment modeling for translating unknown terms. In Proceed- ings of the 47th Annual Meeting of ACL and the 4th Int. Joint Conference on Natural Language Processing of AFNLP, pages 791-799.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "T", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "W.-J", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Papineni, S. Roukos, T. Ward, and W.-J. Zhu. 2002. Bleu: a method for automatic evaluation of ma- chine translation. In Proceedings of the 40th Annual Meeting on Association for Computational Linguis- tics, pages 311-318.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Application of the bleu algorithm for recognizing textual entailments", |
|
"authors": [ |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Perez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "E", |
|
"middle": [], |
|
"last": "Alfonseca", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the PASCAL Challenges Worshop on Recognising Textual Entailment", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "D. Perez and E. Alfonseca. 2005. Application of the bleu algorithm for recognizing textual entailments. In Proceedings of the PASCAL Challenges Worshop on Recognising Textual Entailment.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Using information content to evaluate semantic similarity in a taxanomy", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1995, |
|
"venue": "Proc. of International Joint Conference for Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "448--453", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Resnik. 1995. Using information content to evalu- ate semantic similarity in a taxanomy. In Proc. of In- ternational Joint Conference for Artificial Intelligence, pages 448-453.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Exploiting paraphrases in a question answering system", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Rinaldi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Dowdall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Kaljurand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Hess", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Molla", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the 2nd International Workshop on Paraphrasing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "25--32", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Rinaldi, J. Dowdall, K. Kaljurand, M. Hess, and D. Molla. 2003. Exploiting paraphrases in a question answering system. In Proceedings of the 2nd Interna- tional Workshop on Paraphrasing, pages 25-32.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Contextual correlates of synonymy", |
|
"authors": [ |
|
{ |
|
"first": "H", |
|
"middle": [], |
|
"last": "Rubenstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Goodenough", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1965, |
|
"venue": "Communications of the ACM", |
|
"volume": "8", |
|
"issue": "10", |
|
"pages": "627--633", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H. Rubenstein and J. B. Goodenough. 1965. Contextual correlates of synonymy. Communications of the ACM, 8(10):627-633.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Learning entailment rules for unary templates", |
|
"authors": [ |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Szpektor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "I", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of the 22nd International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "849--856", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "I. Szpektor and I. Dagan. 2008. Learning entailment rules for unary templates. In Proceedings of the 22nd International Conference on Computational Linguis- tics, pages 849-856.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Feature-rich part-of-speech tagging with a cyclic dependency network", |
|
"authors": [ |
|
{ |
|
"first": "K", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Singer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "173--180", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "K. Toutanova, D. Klein, C. D. Manning, and Y. Singer. 2003. Feature-rich part-of-speech tagging with a cyclic dependency network. In Proceedings of Con- ference of the North American Chapter of the Associ- ation for Computational Linguistics on Human Lan- guage Technology, pages 173-180.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Similarity of semantic relations", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Turney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Computational Linguistics", |
|
"volume": "32", |
|
"issue": "3", |
|
"pages": "379--416", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P. Turney. 2006. Similarity of semantic relations. Com- putational Linguistics, 32(3):379-416.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Takelab: Systems for measuring semantic text similarity", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "\u0160ari\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Glava\u0161", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Karan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "\u0160najder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"Dalbelo" |
|
], |
|
"last": "Ba\u0161i\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proc. SemEval", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "441--448", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F.\u0160ari\u0107, G. Glava\u0161, M. Karan, J.\u0160najder, and B. Dal- belo Ba\u0161i\u0107. 2012. Takelab: Systems for measuring semantic text similarity. In Proc. SemEval, pages 441- 448.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "A machine-learning approach to textual entailment recognition", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Zanzotto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Pennacchiotti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Natural Language Engineering", |
|
"volume": "15", |
|
"issue": "4", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "F. Zanzotto, M. Pennacchiotti, and A. Moschitti. 2009. A machine-learning approach to textual en- tailment recognition. Natural Language Engineering, 15(4):551582.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF0": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Correlation performance for the linear model us-Results for the linear run using subsets of the final feature set are shown inTable 1. Lexical features (hit rates) are obviously the most valuable features. String similarities provided us with an improvement in the train-", |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"4\">ing lexical (L), string (S) and affect (A) features</td><td/></tr><tr><td colspan=\"5\">Feature headl. OnWN FNWN SMT mean</td></tr><tr><td>L</td><td>0.68</td><td>0.51</td><td>0.23 0.25</td><td>0.46</td></tr><tr><td>L+S</td><td>0.69</td><td>0.49</td><td>0.23 0.26</td><td>0.46</td></tr><tr><td>L+S+A</td><td>0.69</td><td>0.51</td><td>0.27 0.28</td><td>0.47</td></tr><tr><td colspan=\"4\">Results are shown in Tables 1 and 2.</td><td/></tr></table>" |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"num": null, |
|
"text": "Correlation performance on the evaluation set.", |
|
"html": null, |
|
"content": "<table><tr><td>Run</td><td colspan=\"4\">headl. OnWN FNWN SMT mean</td></tr><tr><td>linear</td><td>0.69</td><td>0.51</td><td>0.27 0.28</td><td>0.47</td></tr><tr><td>length</td><td>0.65</td><td>0.51</td><td>0.25 0.28</td><td>0.46</td></tr><tr><td>adapt</td><td>0.62</td><td>0.51</td><td>0.33 0.30</td><td>0.46</td></tr><tr><td colspan=\"5\">ing set which is not reflected in the test set. Af-</td></tr><tr><td colspan=\"5\">fect proved valuable, particularly in the most diffi-</td></tr><tr><td colspan=\"3\">cult sets of FNWN and SMT.</td><td/><td/></tr><tr><td colspan=\"5\">Results for the three submission runs are shown</td></tr><tr><td>in</td><td/><td/><td/><td/></tr></table>" |
|
} |
|
} |
|
} |
|
} |