|
{ |
|
"paper_id": "S17-2012", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:30:20.475622Z" |
|
}, |
|
"title": "CompiLIG at SemEval-2017 Task 1: Cross-Language Plagiarism Detection Methods for Semantic Textual Similarity", |
|
"authors": [ |
|
{ |
|
"first": "J\u00e9r\u00e9my", |
|
"middle": [], |
|
"last": "Ferrero", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "LIG-GETALP Univ. Grenoble Alpes", |
|
"location": { |
|
"addrLine": "276 rue du Mont Blanc", |
|
"postCode": "74540", |
|
"settlement": "Saint-F\u00e9lix", |
|
"country": "France, France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "LIG-GETALP Univ. Grenoble Alpes", |
|
"institution": "", |
|
"location": { |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Didier", |
|
"middle": [], |
|
"last": "Schwab", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "LIG-GETALP Univ. Grenoble Alpes", |
|
"institution": "", |
|
"location": { |
|
"country": "France" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Agn\u00e8s", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "We present our submitted systems for Semantic Textual Similarity (STS) Track 4 at SemEval-2017. Given a pair of Spanish-English sentences, each system must estimate their semantic similarity by a score between 0 and 5. In our submission, we use syntax-based, dictionary-based, context-based, and MT-based methods. We also combine these methods in unsupervised and supervised way. Our best run ranked 1 st on track 4a with a correlation of 83.02% with human annotations.", |
|
"pdf_parse": { |
|
"paper_id": "S17-2012", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "We present our submitted systems for Semantic Textual Similarity (STS) Track 4 at SemEval-2017. Given a pair of Spanish-English sentences, each system must estimate their semantic similarity by a score between 0 and 5. In our submission, we use syntax-based, dictionary-based, context-based, and MT-based methods. We also combine these methods in unsupervised and supervised way. Our best run ranked 1 st on track 4a with a correlation of 83.02% with human annotations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "CompiLIG is a collaboration between Compilatio 1 -a company particularly interested in crosslanguage plagiarism detection -and LIG research group on natural language processing (GETALP). Cross-language semantic textual similarity detection is an important step for cross-language plagiarism detection, and evaluation campaigns in this new domain are rare. For the first time, SemEval STS task (Agirre et al., 2016) was extended with a Spanish-English cross-lingual sub-task in 2016. This year, sub-task was renewed under track 4 (divided in two sub-corpora: track 4a and track 4b).", |
|
"cite_spans": [ |
|
{ |
|
"start": 393, |
|
"end": 414, |
|
"text": "(Agirre et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Given a sentence in Spanish and a sentence in English, the objective is to compute their semantic textual similarity according to a score from 0 1 www.compilatio.net to 5, where 0 means no similarity and 5 means full semantic similarity. The evaluation metric is a Pearson correlation coefficient between the submitted scores and the gold standard scores from human annotators. Last year, among 26 submissions from 10 teams, the method that achieved the best performance (Brychcin and Svoboda, 2016 ) was a supervised system (SVM regression with RBF kernel) based on word alignment algorithm presented in Sultan et al. (2015).", |
|
"cite_spans": [ |
|
{ |
|
"start": 471, |
|
"end": 498, |
|
"text": "(Brychcin and Svoboda, 2016", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our submission in 2017 is based on crosslanguage plagiarism detection methods combined with the best performing STS detection method published in 2016. CompiLIG team participated to SemEval STS for the first time in 2017. The methods proposed are syntax-based, dictionary-based, context-based, and MT-based. They show additive value when combined. The submitted runs consist in (1) our best single unsupervised approach (2) an unsupervised combination of best approaches (3) a fine-tuned combination of best approaches. The best of our three runs ranked 1 st with a correlation of 83.02% with human annotations on track 4a among all submitted systems (51 submissions from 20 teams for this track). Correlation results of all participants (including ours) on track 4b were much lower and we try to explain why (and question the validity of track 4b) in the last part of this paper. CL-CnG aims to measure the syntactical similarity between two texts. It is based on Mcnamee and Mayfield (2004) work used in information retrieval. It compares two texts under their n-grams vectors representation. The main advantage of this kind of method is that it does not require any translation between source and target text. After some tests on previous year's dataset to find the best n, we decide to use the Potthast et al. (2011)'s CL-C3G implementation. Let S x and S y two sentences in two different languages. First, the alphabet of these sentences is normalized to the ensemble = {a \u2212 z, 0 \u2212 9, }, so only spaces and alphanumeric characters are kept. Any other diacritic or symbol is deleted and the whole text is lower-cased. The texts are then segmented into 3-grams (sequences of 3 contiguous characters) and transformed into tf.idf vectors of character 3-grams. We directly build our idf model on the evaluation data. We use a double normalization K (with K = 0.5) as tf (Manning et al., 2008) and a inverse document frequency smooth as idf. Finally, a cosine similarity is computed between the vectors of source and target sentences.", |
|
"cite_spans": [ |
|
{ |
|
"start": 965, |
|
"end": 992, |
|
"text": "Mcnamee and Mayfield (2004)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1870, |
|
"end": 1892, |
|
"text": "(Manning et al., 2008)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Thesaurus-based Similarity (CL-CTS)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "CL-CTS (Gupta et al., 2012; Pataki, 2012) aims to measure the semantic similarity between two vectors of concepts. The model consists in representing texts as bag-of-words (or concepts) to compare them. The method also does not require explicit translation since the matching is performed using internal connections in the used \"ontology\". Let S a sentence of length n, the n words of the sentence are represented by w i as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 27, |
|
"text": "(Gupta et al., 2012;", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 28, |
|
"end": 41, |
|
"text": "Pataki, 2012)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S = {w 1 , w 2 , w 3 , ..., w n }", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "S x and S y are two sentences in two different languages. A bag-of-words S from each sentence S is built, by filtering stop words and by using a function that returns for a given word all its possible translations. These translations are jointly given by a linked lexical resource, DBNary (S\u00e9rasset, 2015) , and by cross-lingual word embeddings. More precisely, we use the top 10 closest words in the embeddings model and all the available translations from DBNary to build the bag-of-words of a word. We use the MultiVec (Berard et al., 2016) toolkit for computing and managing word embeddings. The corpora used to build the embeddings are Europarl and Wikipedia sub-corpus, part of the dataset of Ferrero et al. 20162 . For training our embeddings, we use CBOW model with a vector size of 100, a window size of 5, a negative sampling parameter of 5, and an alpha of 0.02.", |
|
"cite_spans": [ |
|
{ |
|
"start": 289, |
|
"end": 305, |
|
"text": "(S\u00e9rasset, 2015)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 543, |
|
"text": "(Berard et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "So, the sets of words S x and S y are the conceptual representations in the same language of S x and S y respectively. To calculate the similarity between S x and S y , we use a syntactically and frequentially weighted augmentation of the Jaccard distance, defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "J(S x , S y ) = \u2126(S x ) + \u2126(S y ) \u2126(S x ) + \u2126(S y )", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where S x and S y are the input sentences (also represented as sets of words), and \u2126 is the sum of the weights of the words of a set, defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u2126(S) = n i=1 , w i \u2208S \u03d5(w i )", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where w i is the i th word of the bag S, and \u03d5 is the weight of word in the Jaccard distance:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "\u03d5(w) = pos weight(w) 1\u2212\u03b1 . idf (w) \u03b1 (4)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "where pos weight is the function which gives the weight for each universal part-of-speech tag of a word, idf is the function which gives the inverse document frequency of a word, and . is the scalar product. Equation (4) is a way to syntactically (pos weight) and frequentially (idf ) weight the contribution of a word to the Jaccard distance (both contributions being controlled with the \u03b1 parameter). We assume that for one word, we have its part-of-speech within its original sentence, and its inverse document frequency. We use TreeTagger (Schmid, 1994) for POS tagging, and we normalize the tags with Universal Tagset of Petrov et al. (2012) . Then, we assign a weight for each of the 12 universal POS tags. The 12 POS weights and the value \u03b1 are optimized with Condor (Berghen and Bersini, 2005) in the same way as in Ferrero et al. (2017) . Condor applies a Newton's method with a trust region algorithm to determinate the weights that optimize a desired output score. No re-tuning of these hyperparameters for SemEval task was performed.", |
|
"cite_spans": [ |
|
{ |
|
"start": 543, |
|
"end": 557, |
|
"text": "(Schmid, 1994)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 626, |
|
"end": 646, |
|
"text": "Petrov et al. (2012)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 774, |
|
"end": 801, |
|
"text": "(Berghen and Bersini, 2005)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 824, |
|
"end": 845, |
|
"text": "Ferrero et al. (2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Conceptual", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "CL-WES (Ferrero et al., 2017) consists in a cosine similarity on distributed representations of sentences, which are obtained by the weighted sum of each word vector in a sentence. As in previous section, each word vector is syntactically and frequentially weighted.", |
|
"cite_spans": [ |
|
{ |
|
"start": 7, |
|
"end": 29, |
|
"text": "(Ferrero et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Word Embedding-based Similarity", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "If S x and S y are two sentences in two different languages, then CL-WES builds their (bilingual) common representation vectors V x and V y and applies a cosine similarity between them. A distributed representation V of a sentence S is calculated as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Word Embedding-based Similarity", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "V = n i=1 , w i \u2208S (vector(w i ) . \u03d5(w i ))", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Cross-Language Word Embedding-based Similarity", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where w i is the i th word of the sentence S, vector is the function which gives the word embedding vector of a word, \u03d5 is the same that in formula (4), and . is the scalar product. We make this method publicly available through MultiVec 3 (Berard et al., 2016) toolkit.", |
|
"cite_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 261, |
|
"text": "(Berard et al., 2016)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Cross-Language Word Embedding-based Similarity", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Alignment (T+WA)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The last method used is a two-step process. First, we translate the Spanish sentence into English with Google Translate (i.e. we are bringing the two sentences in the same language). Then, we align both utterances. We reuse the monolingual aligner 4 of Sultan et al. (2015) with the improvement of Brychcin and Svoboda (2016) , who won the cross-lingual sub-task in 2016 (Agirre et al., 2016) . Because this improvement has not been released by the initial authors, we propose to share our re-implementation on GitHub 5 . If S x and S y are two sentences in the same language, then we try to measure their similarity with the following formula:", |
|
"cite_spans": [ |
|
{ |
|
"start": 298, |
|
"end": 325, |
|
"text": "Brychcin and Svoboda (2016)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 371, |
|
"end": 392, |
|
"text": "(Agirre et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "J(S x , S y ) = \u03c9(A x ) + \u03c9(A y ) \u03c9(S x ) + \u03c9(S y )", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where S x and S y are the input sentences (represented as sets of words), A x and A y are the sets of aligned words for S x and S y respectively, and \u03c9 is a frequency weight of a set of words, defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "\u03c9(A) = n i=1 , w i \u2208A idf (w i ) (7)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where idf is the function which gives the inverse document frequency of a word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation + Monolingual Word", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "These methods are syntax-, dictionary-, contextand MT-based, and are thus potentially complementary. That is why we also combine them in unsupervised and supervised fashion. Our unsupervised fusion is an average of the outputs of each method. For supervised fusion, we recast fusion as a regression problem and we experiment all available methods in Weka 3.8.0 (Hall et al., 2009) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 361, |
|
"end": 380, |
|
"text": "(Hall et al., 2009)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "System Combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "3 Results on SemEval-2016 Dataset Table 1 reports the results of the proposed systems on SemEval-2016 STS cross-lingual evaluation dataset. The dataset, the annotation and the evaluation systems were presented in the SemEval-2016 STS task description paper (Agirre et al., 2016 ), so we do not re-detail them here. The lines in bold represent the methods that obtain the best mean score in each category of system (best method alone, unsupervised and supervised fusion). The scores for the supervised systems are obtained with 10-folds cross-validation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 257, |
|
"end": 277, |
|
"text": "(Agirre et al., 2016", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 34, |
|
"end": 41, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "System Combination", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "First, it is important to mention that our outputs are linearly re-scaled to a real-valued space [0 ; 5].", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Runs Submitted to SemEval-2017", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Run 1: Best Method Alone. Our first run is only based on the best method alone during our tests (see Table 1 ), i.e. Cross-Language Conceptual Thesaurus-based Similarity (CL-CTS) model, as described in section 2.2.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 101, |
|
"end": 108, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Runs Submitted to SemEval-2017", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Run 2: Fusion by Average. Our second run is a fusion by average on three methods: CL-C3G, CL-CTS and T+WA, all described in section 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Runs Submitted to SemEval-2017", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Run 3: M5 Model Tree. Unlike the two precedent runs, the third run is a supervised system. We have selected the system that obtained the best score during our tests on SemEval-2016 evaluation dataset (see Table 1 ), which is the M5 model tree (Wang and Witten, 1997) Weka 3.8.0 (Hall et al., 2009) ). Model trees have a conventional decision tree structure but use linear regression functions at the leaves instead of discrete class labels. The first implementation of model trees, M5, was proposed by Quinlan (1992) and the approach was refined and improved in a system called M5 by Wang and Witten (1997) . To learn the model, we use all the methods described in section 2 as features.", |
|
"cite_spans": [ |
|
{ |
|
"start": 243, |
|
"end": 266, |
|
"text": "(Wang and Witten, 1997)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 278, |
|
"end": 297, |
|
"text": "(Hall et al., 2009)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 502, |
|
"end": 516, |
|
"text": "Quinlan (1992)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 584, |
|
"end": 606, |
|
"text": "Wang and Witten (1997)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 212, |
|
"text": "Table 1", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Runs Submitted to SemEval-2017", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Dataset, annotation and evaluation systems are presented in SemEval-2017 STS task description paper (Cer et al., 2017) . We can see in Table 2 that our systems work well on SNLI 6 (Bowman et al., 2015) (track 4a), on which we ranked 1 st with more than 83% of correlation with human annotations. Conversely, correlations on the WMT corpus (track 4b) are strangely low. This difference is notable on the scores of all participating teams (Cer et al., 2017) 7 . This might be explained by the fact that WMT was annotated by only one annotator, while the SNLI corpus was annotated by many. To investigate deeper on this issue, we manually annotated 60 random pairs of each sub-corpus (120 annotated pairs among 500). These annotations provide a second annotator reference. We can see in Table 3 that, on SNLI corpus (4a), our methods behave the same way for both annotations (a difference of about 1.3%). However, the difference in correlation is huge between our annotations and SemEval gold standard on the WMT corpus (4b): 30% on average. The Pearson correlation between our annotated pairs and the related gold standard is 85.76% for the SNLI corpus and 29.16% for the WMT corpus. These results question the validity of the WMT corpus (4b) for semantic textual similarity detection.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 118, |
|
"text": "(Cer et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 455, |
|
"text": "(Cer et al., 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 142, |
|
"text": "Table 2", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 784, |
|
"end": 791, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results of the 2017 evaluation and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We described our submission to SemEval-2017 Semantic Textual Similarity task on track 4 (Sp-En cross-lingual sub-task). Our best results were achieved by a M5 model tree combination of various textual similarity detection techniques. This approach worked well on the SNLI corpus (4afinishes 1 st with more than 83% of correlation with human annotations), which corresponds to a real cross-language plagiarism detection scenario. We also questioned WMT corpus (4b) validity providing our own manual annotations and showing low correlations with those of SemEval.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/FerreroJeremy/ Cross-Language-Dataset", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://github.com/eske/multivec 4 https://github.com/ma-sultan/ monolingual-word-aligner 5 https://github.com/FerreroJeremy/ monolingual-word-aligner", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://nlp.stanford.edu/projects/ snli/ 7 The best score for this track is 34%, while for the other tracks it is around 85%.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "SemEval-2016 Task 1: Semantic Textual Similarity, Monolingual and Cross-Lingual Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carmen", |
|
"middle": [], |
|
"last": "Banea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aitor", |
|
"middle": [], |
|
"last": "Gonzalez-Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rada", |
|
"middle": [], |
|
"last": "Mihalcea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Janyce", |
|
"middle": [], |
|
"last": "Wiebe", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (Se-mEval 2016). Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "497--511", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eneko Agirre, Carmen Banea, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Rada Mihalcea, and Janyce Wiebe. 2016. SemEval-2016 Task 1: Se- mantic Textual Similarity, Monolingual and Cross- Lingual Evaluation. In Proceedings of the 10th In- ternational Workshop on Semantic Evaluation (Se- mEval 2016). Association for Computational Lin- guistics, San Diego, CA, USA, pages 497-511. http://www.aclweb.org/anthology/S16-1081.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "MultiVec: a Multilingual and Multilevel Representation Learning Toolkit for NLP", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Berard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christophe", |
|
"middle": [], |
|
"last": "Servan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Pietquin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16). European Language Resources Association (ELRA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4188--4192", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Berard, Christophe Servan, Olivier Pietquin, and Laurent Besacier. 2016. MultiVec: a Multilin- gual and Multilevel Representation Learning Toolkit for NLP. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16). European Language Resources Asso- ciation (ELRA), Portoroz, Slovenia, pages 4188- 4192.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "CONDOR, a new parallel, constrained extension of Powell's UOBYQA algorithm: Experimental results and comparison with the DFO algorithm", |
|
"authors": [ |
|
{ |
|
"first": "Hugues", |
|
"middle": [], |
|
"last": "Frank Vanden Berghen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bersini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Journal of Computational and Applied Mathematics", |
|
"volume": "181", |
|
"issue": "", |
|
"pages": "157--175", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.cam.2004.11.029" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Frank Vanden Berghen and Hugues Bersini. 2005. CONDOR, a new parallel, constrained extension of Powell's UOBYQA algorithm: Experimental results and comparison with the DFO algorithm. Journal of Computational and Applied Mathematics 181:157- 175. https://doi.org/10.1016/j.cam.2004.11.029.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A Large Annotated Corpus for Learning Natural Language Inference", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabor", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Angeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Potts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "632--642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A Large Annotated Corpus for Learning Natural Language Inference. In Proceedings of the 2015 Confer- ence on Empirical Methods in Natural Language Processing (EMNLP). Association for Computa- tional Linguistics, Lisbon, Portugal, pages 632-642. http://aclweb.org/anthology/D/D15/D15-1075.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "UWB at SemEval-2016 Task 1: Semantic textual similarity using lexical, syntactic, and semantic information", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Brychcin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lukas", |
|
"middle": [], |
|
"last": "Svoboda", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "588--594", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Brychcin and Lukas Svoboda. 2016. UWB at SemEval-2016 Task 1: Semantic textual sim- ilarity using lexical, syntactic, and semantic in- formation. In Proceedings of the 10th Inter- national Workshop on Semantic Evaluation (Se- mEval 2016). San Diego, CA, USA, pages 588- 594. https://www.aclweb.org/anthology/S/S16/S16- 1089.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mona", |
|
"middle": [], |
|
"last": "Diab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eneko", |
|
"middle": [], |
|
"last": "Agirre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Inigo", |
|
"middle": [], |
|
"last": "Lopez-Gazpio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Specia", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017). Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--14", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez- Gazpio, and Lucia Specia. 2017. Semeval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Eval- uation (SemEval-2017). Association for Computa- tional Linguistics, Vancouver, Canada, pages 1-14. http://www.aclweb.org/anthology/S17-2001.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "A Multilingual, Multi-style and Multi-granularity Dataset for Cross-language Textual Similarity Detection", |
|
"authors": [ |
|
{ |
|
"first": "J\u00e9r\u00e9my", |
|
"middle": [], |
|
"last": "Ferrero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Agn\u00e8s", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Didier", |
|
"middle": [], |
|
"last": "Schwab", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4162--4169", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00e9r\u00e9my Ferrero, Fr\u00e9d\u00e9ric Agn\u00e8s, Laurent Besacier, and Didier Schwab. 2016. A Multilingual, Multi-style and Multi-granularity Dataset for Cross-language Textual Similarity Detection. In Proceedings of the Tenth International Conference on Language Re- sources and Evaluation (LREC'16). European Lan- guage Resources Association (ELRA), Portoroz, Slovenia, pages 4162-4169. ISLRN: 723-785-513- 738-2. http://islrn.org/resources/723-785-513-738- 2.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Using Word Embedding for Cross-Language Plagiarism Detection", |
|
"authors": [ |
|
{ |
|
"first": "J\u00e9r\u00e9my", |
|
"middle": [], |
|
"last": "Ferrero", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurent", |
|
"middle": [], |
|
"last": "Besacier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Didier", |
|
"middle": [], |
|
"last": "Schwab", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9ric", |
|
"middle": [], |
|
"last": "Agn\u00e8s", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "415--421", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J\u00e9r\u00e9my Ferrero, Laurent Besacier, Didier Schwab, and Fr\u00e9d\u00e9ric Agn\u00e8s. 2017. Using Word Embedding for Cross-Language Plagiarism Detection. In Proceed- ings of the 15th Conference of the European Chap- ter of the Association for Computational Linguistics, (EACL 2017). Association for Computational Lin- guistics, Valencia, Spain, volume 2, pages 415-421. http://aclweb.org/anthology/E/E17/E17-2066.pdf.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Cross-language High Similarity Search using a Conceptual Thesaurus", |
|
"authors": [ |
|
{ |
|
"first": "Parth", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Barr\u00f3n-Cede\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Information Access Evaluation. Multilinguality, Multimodality, and Visual Analytics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/978-3-642-33247-08" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Parth Gupta, Alberto Barr\u00f3n-Cede\u00f1o, and Paolo Rosso. 2012. Cross-language High Similarity Search us- ing a Conceptual Thesaurus. In Information Access Evaluation. Multilinguality, Multimodality, and Vi- sual Analytics. Springer Berlin Heidelberg, Rome, Italy, pages 67-75. https://doi.org/10.1007/978-3- 642-33247-0 8.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "The WEKA Data Mining Software: An Update", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eibe", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Holmes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Pfahringer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Reutemann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "SIGKDD Explorations", |
|
"volume": "11", |
|
"issue": "", |
|
"pages": "10--18", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/1656274.1656278" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Hall, Eibe Frank, Geoffrey Holmes, Bernhard Pfahringer, Peter Reutemann, and Ian H. Witten. 2009. The WEKA Data Mining Software: An Up- date. In SIGKDD Explorations. volume 11, pages 10-18. https://doi.org/10.1145/1656274.1656278.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Scoring, term weighting, and the vector space model", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prabhakar", |
|
"middle": [], |
|
"last": "Raghavan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher D. Manning, Prabhakar Raghavan, and Hinrich Sch\u00fctze. 2008. Introduction to Information Retrieval, Cambridge Univer- sity Press, New York, chapter 6 -\"Scoring, term weighting, and the vector space model\", pages 109-133. ISBN: 9780511809071.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Character N-Gram Tokenization for European Language Text Retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Mcnamee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Mayfield", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "Information Retrieval Proceedings", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "73--97", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paul Mcnamee and James Mayfield. 2004. Charac- ter N-Gram Tokenization for European Language Text Retrieval. In Information Retrieval Proceed- ings. Kluwer Academic Publishers, volume 7, pages 73-97.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "A New Approach for Searching Translated Plagiarism", |
|
"authors": [ |
|
{ |
|
"first": "M\u00e0t\u00e9", |
|
"middle": [], |
|
"last": "Pataki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the 5th International Plagiarism Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--64", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "M\u00e0t\u00e9 Pataki. 2012. A New Approach for Searching Translated Plagiarism. In Proceedings of the 5th In- ternational Plagiarism Conference. Newcastle, UK, pages 49-64.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "European Language Resources Association (ELRA), Istanbul", |
|
"authors": [ |
|
{ |
|
"first": "Slav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2089--2096", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Slav Petrov, Dipanjan Das, and Ryan McDonald. 2012. A universal part-of-speech tagset. In Proceed- ings of the Eight International Conference on Lan- guage Resources and Evaluation (LREC'12). Euro- pean Language Resources Association (ELRA), Is- tanbul, Turkey, pages 2089-2096.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Cross-Language Plagiarism Detection. In Language Ressources and Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Potthast", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Barr\u00f3n-Cede\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benno", |
|
"middle": [], |
|
"last": "Stein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paolo", |
|
"middle": [], |
|
"last": "Rosso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "45", |
|
"issue": "", |
|
"pages": "45--62", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1007/s10579-009-9114-z" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Potthast, Alberto Barr\u00f3n-Cede\u00f1o, Benno Stein, and Paolo Rosso. 2011. Cross-Language Plagiarism Detection. In Language Ressources and Evaluation. volume 45, pages 45-62. https://doi.org/10.1007/s10579-009-9114-z.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Learning with continuous classes", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Quinlan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1992, |
|
"venue": "Proceedings of the Fifth Australian Joint Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "343--348", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "J. R. Quinlan. 1992. Learning with continuous classes. In Eds. Adams & Sterling, editor, Proceedings of the Fifth Australian Joint Conference on Artificial In- telligence. World Scientific, Singapore, pages 343- 348.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Probabilistic Part-of-Speech Tagging Using Decision Trees", |
|
"authors": [ |
|
{ |
|
"first": "Helmut", |
|
"middle": [], |
|
"last": "Schmid", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1994, |
|
"venue": "Proceedings of the International Conference on New Methods in Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "44--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Helmut Schmid. 1994. Probabilistic Part-of-Speech Tagging Using Decision Trees. In Proceedings of the International Conference on New Methods in Language Processing. Manchester, UK, pages 44- 49.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "DBnary: Wiktionary as a Lemon-Based Multilingual Lexical Resource in RDF", |
|
"authors": [ |
|
{ |
|
"first": "Gilles", |
|
"middle": [], |
|
"last": "S\u00e9rasset", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "In Semantic Web Journal (special issue on Multilingual Linked Open Data)", |
|
"volume": "6", |
|
"issue": "", |
|
"pages": "355--361", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3233/SW-140147" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gilles S\u00e9rasset. 2015. DBnary: Wiktionary as a Lemon-Based Multilingual Lexical Resource in RDF. In Semantic Web Journal (special issue on Multilingual Linked Open Data). volume 6, pages 355-361. https://doi.org/10.3233/SW-140147.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Sentence similarity from word alignment and semantic vector composition", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dls@cu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--153", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "DLS@CU: Sentence similar- ity from word alignment and semantic vector composition. In Proceedings of the 9th Inter- national Workshop on Semantic Evaluation (Se- mEval 2015). Denver, CO, USA, pages 148-153. http://www.aclweb.org/anthology/S15-2027.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Induction of model trees for predicting continuous classes", |
|
"authors": [ |
|
{ |
|
"first": "Yong", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [ |
|
"H" |
|
], |
|
"last": "Witten", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Proceedings of the poster papers of the European Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "128--137", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yong Wang and Ian H. Witten. 1997. Induction of model trees for predicting continuous classes. In Proceedings of the poster papers of the European Conference on Machine Learning. Prague, Czech Republic, pages 128-137.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"content": "<table><tr><td>Methods</td><td>News</td><td>Multi</td><td>Mean</td></tr><tr><td colspan=\"3\">Unsupervised systems</td><td/></tr><tr><td>CL-C3G (1)</td><td colspan=\"3\">0.7522 0.6550 0.7042</td></tr><tr><td>CL-CTS (2)</td><td colspan=\"3\">0.9072 0.8283 0.8682</td></tr><tr><td>CL-WES (3)</td><td colspan=\"3\">0.7028 0.6312 0.6674</td></tr><tr><td>T+WA (4)</td><td colspan=\"3\">0.9060 0.8144 0.8607</td></tr><tr><td>Average (1-2-3-4)</td><td colspan=\"3\">0.8589 0.7824 0.8211</td></tr><tr><td>Average (1-2-4)</td><td colspan=\"3\">0.9051 0.8347 0.8703</td></tr><tr><td>Average (2-3-4)</td><td colspan=\"3\">0.8923 0.8239 0.8585</td></tr><tr><td>Average (2-4)</td><td colspan=\"3\">0.9082 0.8299 0.8695</td></tr><tr><td colspan=\"3\">Supervised systems (fine-tuned fusion)</td><td/></tr><tr><td>GaussianProcesses</td><td colspan=\"3\">0.8712 0.7884 0.8303</td></tr><tr><td>LinearRegression</td><td colspan=\"3\">0.9099 0.8414 0.8761</td></tr><tr><td>MultilayerPerceptron</td><td colspan=\"3\">0.8966 0.7999 0.8488</td></tr><tr><td colspan=\"4\">SimpleLinearRegression 0.9048 0.8144 0.8601</td></tr><tr><td>SMOreg</td><td colspan=\"3\">0.9071 0.8375 0.8727</td></tr><tr><td>Ibk</td><td colspan=\"3\">0.8396 0.7330 0.7869</td></tr><tr><td>Kstar</td><td colspan=\"3\">0.8545 0.8173 0.8361</td></tr><tr><td>LWL</td><td colspan=\"3\">0.8572 0.7589 0.8086</td></tr><tr><td>DecisionTable</td><td colspan=\"3\">0.9139 0.8047 0.8599</td></tr><tr><td>M5Rules</td><td colspan=\"3\">0.9146 0.8406 0.8780</td></tr><tr><td>DecisionStump</td><td colspan=\"3\">0.8329 0.7380 0.7860</td></tr><tr><td>M5P</td><td colspan=\"3\">0.9154 0.8442 0.8802</td></tr><tr><td>RandomForest</td><td colspan=\"3\">0.9109 0.8418 0.8768</td></tr><tr><td>RandomTree</td><td colspan=\"3\">0.8364 0.7262 0.7819</td></tr><tr><td>REPTree</td><td colspan=\"3\">0.8972 0.7992 0.8488</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "(called M5P in" |
|
}, |
|
"TABREF2": { |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Results of the methods on SemEval-2016 STS cross-lingual evaluation dataset." |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td colspan=\"4\">Methods SNLI (4a) WMT (4b) Mean</td></tr><tr><td/><td colspan=\"2\">Our Annotations</td><td/></tr><tr><td>CL-CTS</td><td>0.7981</td><td>0.5248</td><td>0.6614</td></tr><tr><td>Average</td><td>0.8105</td><td>0.4031</td><td>0.6068</td></tr><tr><td>M5P</td><td>0.8622</td><td>0.5374</td><td>0.6998</td></tr><tr><td/><td colspan=\"2\">SemEval Gold Standard</td><td/></tr><tr><td>CL-CTS</td><td>0.8123</td><td>0.1739</td><td>0.4931</td></tr><tr><td>Average</td><td>0.8277</td><td>0.2209</td><td>0.5243</td></tr><tr><td>M5P</td><td>0.8536</td><td>0.1706</td><td>0.5121</td></tr></table>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Official results of our submitted systems on SemEval-2017 STS track 4 evaluation dataset." |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Results of our submitted systems scored on our 120 annotated pairs and on the same 120 SemEval annotated pairs." |
|
} |
|
} |
|
} |
|
} |