|
{ |
|
"paper_id": "P09-1007", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T08:53:34.020190Z" |
|
}, |
|
"title": "Cross Language Dependency Parsing using a Bilingual Lexicon *", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Hong", |
|
"location": { |
|
"addrLine": "Kong 83 Tat Chee Avenue", |
|
"settlement": "Kowloon, Hong Kong", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Hong", |
|
"location": { |
|
"addrLine": "Kong 83 Tat Chee Avenue", |
|
"settlement": "Kowloon, Hong Kong", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Chunyu", |
|
"middle": [], |
|
"last": "Kit", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Hong", |
|
"location": { |
|
"addrLine": "Kong 83 Tat Chee Avenue", |
|
"settlement": "Kowloon, Hong Kong", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Soochow University", |
|
"location": { |
|
"postCode": "215006", |
|
"settlement": "Suzhou", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper proposes an approach to enhance dependency parsing in a language by using a translated treebank from another language. A simple statistical machine translation method, word-byword decoding, where not a parallel corpus but a bilingual lexicon is necessary, is adopted for the treebank translation. Using an ensemble method, the key information extracted from word pairs with dependency relations in the translated text is effectively integrated into the parser for the target language. The proposed method is evaluated in English and Chinese treebanks. It is shown that a translated English treebank helps a Chinese parser obtain a state-ofthe-art result. * The study is partially supported by City University of Hong Kong through the Strategic Research Grant 7002037 and 7002388. The first author is sponsored by a research fellowship from CTL, City University of Hong Kong. 1 It is a tradition to call an annotated syntactic corpus as treebank in parsing community.", |
|
"pdf_parse": { |
|
"paper_id": "P09-1007", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper proposes an approach to enhance dependency parsing in a language by using a translated treebank from another language. A simple statistical machine translation method, word-byword decoding, where not a parallel corpus but a bilingual lexicon is necessary, is adopted for the treebank translation. Using an ensemble method, the key information extracted from word pairs with dependency relations in the translated text is effectively integrated into the parser for the target language. The proposed method is evaluated in English and Chinese treebanks. It is shown that a translated English treebank helps a Chinese parser obtain a state-ofthe-art result. * The study is partially supported by City University of Hong Kong through the Strategic Research Grant 7002037 and 7002388. The first author is sponsored by a research fellowship from CTL, City University of Hong Kong. 1 It is a tradition to call an annotated syntactic corpus as treebank in parsing community.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Although supervised learning methods bring stateof-the-art outcome for dependency parser inferring (McDonald et al., 2005; , a large enough data set is often required for specific parsing accuracy according to this type of methods. However, to annotate syntactic structure, either phrase-or dependency-based, is a costly job. Until now, the largest treebanks 1 in various languages for syntax learning are with around one million words (or some other similar units). Limited data stand in the way of further performance enhancement. This is the case for each individual language at least. But, this is not the case as we observe all treebanks in different languages as a whole. For example, of ten treebanks for CoNLL-2007 shared task, none includes more than 500K tokens, while the sum of tokens from all treebanks is about two million .", |
|
"cite_spans": [ |
|
{ |
|
"start": 99, |
|
"end": 122, |
|
"text": "(McDonald et al., 2005;", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As different human languages or treebanks should share something common, this makes it possible to let dependency parsing in multiple languages be beneficial with each other. In this paper, we study how to improve dependency parsing by using (automatically) translated texts attached with transformed dependency information. As a case study, we consider how to enhance a Chinese dependency parser by using a translated English treebank. What our method relies on is not the close relation of the chosen language pair but the similarity of two treebanks, this is the most different from the previous work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Two main obstacles are supposed to confront in a cross-language dependency parsing task. The first is the cost of translation. Machine translation has been shown one of the most expensive language processing tasks, as a great deal of time and space is required to perform this task. In addition, a standard statistical machine translation method based on a parallel corpus will not work effectively if it is not able to find a parallel corpus that right covers source and target treebanks. However, dependency parsing focuses on the relations of word pairs, this allows us to use a dictionarybased translation without assuming a parallel corpus available, and the training stage of translation may be ignored and the decoding will be quite fast in this case. The second difficulty is that the outputs of translation are hardly qualified for the parsing purpose. The most challenge in this aspect is morphological preprocessing. We regard that the morphological issue should be handled aiming at the specific language, our solution here is to use character-level features for a target language like Chinese.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows. The next section presents some related existing work. Section 3 describes the procedure on tree-bank translation and dependency transformation. Section 4 describes a dependency parser for Chinese as a baseline. Section 5 describes how a parser can be strengthened from the translated treebank. The experimental results are reported in Section 6. Section 7 looks into a few issues concerning the conditions that the proposed approach is suitable for. Section 8 concludes the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "As this work is about exploiting extra resources to enhance an existing parser, it is related to domain adaption for parsing that has been draw some interests in recent years. Typical domain adaptation tasks often assume annotated data in new domain absent or insufficient and a large scale unlabeled data available. As unlabeled data are concerned, semi-supervised or unsupervised methods will be naturally adopted. In previous works, two basic types of methods can be identified to enhance an existing parser from additional resources. The first is usually focus on exploiting automatic generated labeled data from the unlabeled data (Steedman et al., 2003; McClosky et al., 2006; Reichart and Rappoport, 2007; Sagae and Tsujii, 2007; Chen et al., 2008) , the second is on combining supervised and unsupervised methods, and only unlabeled data are considered (Smith and Eisner, 2006; Wang and Schuurmans, 2008; Koo et al., 2008) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 636, |
|
"end": 659, |
|
"text": "(Steedman et al., 2003;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 660, |
|
"end": 682, |
|
"text": "McClosky et al., 2006;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 683, |
|
"end": 712, |
|
"text": "Reichart and Rappoport, 2007;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 736, |
|
"text": "Sagae and Tsujii, 2007;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 755, |
|
"text": "Chen et al., 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 861, |
|
"end": 885, |
|
"text": "(Smith and Eisner, 2006;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 886, |
|
"end": 912, |
|
"text": "Wang and Schuurmans, 2008;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 913, |
|
"end": 930, |
|
"text": "Koo et al., 2008)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our purpose in this study is to obtain a further performance enhancement by exploiting treebanks in other languages. This is similar to the above first type of methods, some assistant data should be automatically generated for the subsequent processing. The differences are what type of data are concerned with and how they are produced. In our method, a machine translation method is applied to tackle golden-standard treebank, while all the previous works focus on the unlabeled data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Although cross-language technique has been used in other natural language processing tasks, it is basically new for syntactic parsing as few works were concerned with this issue. The reason is straightforward, syntactic structure is too complicated to be properly translated and the cost of translation cannot be afforded in many cases. However, we empirically find this difficulty may be dramatically alleviated as dependencies rather than phrases are used for syntactic structure representation. Even the translation outputs are not so good as the expected, a dependency parser for the target language can effectively make use of them by only considering the most related information extracted from the translated text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The basic idea to support this work is to make use of the semantic connection between different languages. In this sense, it is related to the work of (Merlo et al., 2002) and (Burkett and Klein, 2008) . The former showed that complementary information about English verbs can be extracted from their translations in a second language (Chinese) and the use of multilingual features improves classification performance of the English verbs. The latter iteratively trained a model to maximize the marginal likelihood of tree pairs, with alignments treated as latent variables, and then jointly parsing bilingual sentences in a translation pair. The proposed parser using features from monolingual and mutual constraints helped its log-linear model to achieve better performance for both monolingual parsers and machine translation system. In this work, cross-language features will be also adopted as the latter work. However, although it is not essentially different, we only focus on dependency parsing itself, while the parsing scheme in (Burkett and Klein, 2008) based on a constituent representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 171, |
|
"text": "(Merlo et al., 2002)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 201, |
|
"text": "(Burkett and Klein, 2008)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Among of existing works that we are aware of, we regard that the most similar one to ours is (Zeman and Resnik, 2008), who adapted a parser to a new language that is much poorer in linguistic resources than the source language. However, there are two main differences between their work and ours. The first is that they considered a pair of sufficiently related languages, Danish and Swedish, and made full use of the similar characteristics of two languages. Here we consider two quite different languages, English and Chinese. As fewer language properties are concerned, our approach holds the more possibility to be extended to other language pairs than theirs. The second is that a parallel corpus is required for their work and a strict statistical machine translation procedure was performed, while our approach holds a merit of simplicity as only a bilingual lexicon is required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "As a case study, this work will be conducted between the source language, English, and the target language, Chinese, namely, we will investigate how a translated English treebank enhances a Chinese dependency parser. For English data, the Penn Treebank (PTB) 3 is used. The constituency structures is converted to dependency trees by using the same rules as (Yamada and Matsumoto, 2003) and the standard training/development/test split is used. However, only training corpus (sections 2-21) is used for this study. For Chinese data, the Chinese Treebank (CTB) version 4.0 is used in our experiments. The same rules for conversion and the same data split is adopted as (Wang et al., 2007) : files 1-270 and 400-931 as training, 271-300 as testing and files 301-325 as development. We use the gold standard segmentation and part-of-speech (POS) tags in both treebanks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 386, |
|
"text": "(Yamada and Matsumoto, 2003)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 687, |
|
"text": "(Wang et al., 2007)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "As a bilingual lexicon is required for our task and none of existing lexicons are suitable for translating PTB, two lexicons, LDC Chinese-English Translation Lexicon Version 2.0 (LDC2002L27), and an English to Chinese lexicon in StarDict 2 , are conflated, with some necessary manual extensions, to cover 99% words appearing in the PTB (the most part of the untranslated words are named entities.). This lexicon includes 123K entries.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "A word-by-word statistical machine translation strategy is adopted to translate words attached with the respective dependency information from the source language to the target one. In detail, a word-based decoding is used, which adopts a loglinear framework as in (Och and Ney, 2002) with only two features, translation model and language model,", |
|
"cite_spans": [ |
|
{ |
|
"start": 265, |
|
"end": 284, |
|
"text": "(Och and Ney, 2002)", |
|
"ref_id": "BIBREF13" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "P (c|e) = exp[ 2 i=1 \u03bb i h i (c, e)] c exp[ 2 i=1 \u03bb i h i (c, e)]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h 1 (c, e) = log(p \u03b3 (c|e))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the translation model, which is converted from the bilingual lexicon, and", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "h 2 (c, e) = log(p \u03b8 (c))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "is the language model, a word trigram model trained from the CTB. In our experiment, we set two weights \u03bb 1 = \u03bb 2 = 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The conversion process of the source treebank is completed by three steps as the following: 1. Bind POS tag and dependency relation of a word with itself; 2. Translate the PTB text into Chinese word by word. Since we use a lexicon rather than a parallel corpus to estimate the translation probabilities, we simply assign uniform probabilities to all translation options. Thus the decoding process is actually only determined by the language model. Similar to the \"bag translation\" experiment in (Brown et al., 1990) , the candidate target sentences made up by a sequence of the optional target words are ranked by the trigram language model. The output sentence will be generated only if it is with maximum probability as follows,", |
|
"cite_spans": [ |
|
{ |
|
"start": 495, |
|
"end": 515, |
|
"text": "(Brown et al., 1990)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "c = argmax{p \u03b8 (c)p \u03b3 (c|e)} = argmax p \u03b8 (c) = argmax p \u03b8 (w c )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "A beam search algorithm is used for this process to find the best path from all the translation options; As the training stage, especially, the most time-consuming alignment sub-stage, is skipped, the translation only includes a decoding procedure that takes about 4.5 hours for about one million words of the PTB in a 2.8GHz PC. 3. After the target sentence is generated, the attached POS tags and dependency information of each English word will also be transferred to each corresponding Chinese word. As word order is often changed after translation, the pointer of each dependency relationship, represented by a serial number, should be re-calculated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Although we try to perform an exact word-byword translation, this aim cannot be fully reached in fact, as the following case is frequently encountered, multiple English words have to be translated into one Chinese word. To solve this problem, we use a policy that lets the output Chinese word only inherits the attached information of the highest syntactic head in the original multiple English words.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "4 Dependency Parsing: Baseline", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Translation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "According to (McDonald and Nivre, 2007) , all data-driven models for dependency parsing that have been proposed in recent years can be described as either graph-based or transition-based. Although the former will be also used as comparison, the latter is chosen as the main parsing framework by this study for the sake of efficiency. In detail, a shift-reduce method is adopted as in (Nivre, 2003) , where a classifier is used to make a parsing decision step by step. In each step, the classifier checks a word pair, namely, s, the top of a stack that consists of the processed words, and, i, the first word in the (input) unprocessed sequence, to determine if a dependent relation should be established between them. Besides two dependency arc building actions, a shift action and a reduce action are also defined to maintain the stack and the unprocessed sequence. In this work, we adopt a left-to-right arc-eager parsing model, that means that the parser scans the input sequence from left to right and right dependents are attached to their heads as soon as possible . While memory-based and margin-based learning approaches such as support vector machines are popularly applied to shift-reduce parsing, we apply maximum entropy model as the learning model for efficient training and adopting overlapped features as our work in (Zhao and Kit, 2008) , especially, those character-level ones for Chinese parsing. Our implementation of maximum entropy adopts L-BFGS algorithm for parameter optimization as usual.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 39, |
|
"text": "(McDonald and Nivre, 2007)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 384, |
|
"end": 397, |
|
"text": "(Nivre, 2003)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 1332, |
|
"end": 1352, |
|
"text": "(Zhao and Kit, 2008)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Model and Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "With notations defined in Table 1, a feature set as shown in Table 2 is adopted. Here, we explain some terms in Tables 1 and 2 . We used a large scale feature selection approach as in to obtain the feature set in Table 2 . Some feature notations in this paper are also borrowed from that work.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 69, |
|
"text": "Table 1, a feature set as shown in Table 2", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 127, |
|
"text": "Tables 1 and 2", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 214, |
|
"end": 221, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning Model and Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The feature curroot returns the root of a partial parsing tree that includes a specified node. The feature charseq returns a character sequence whose members are collected from all identified children for a specified word.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Model and Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Table 2 , as for concatenating multiple substrings into a feature string, there are two ways, seq and bag. The former is to concatenate all substrings without do something special. The latter will remove all duplicated substrings, sort the rest and concatenate all at last.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Learning Model and Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Note that we systemically use a group of character-level features. Surprisingly, as to our best knowledge, this is the first report on using this type of features in Chinese dependency parsing. Although (McDonald et al., 2005) used the prefix of each word form instead of word form itself as features, character-level features here for Chinese is essentially different from that. As Chinese is basically a character-based written language. Character plays an important role in many means, most characters can be formed as single-character words, and Chinese itself is character-order free rather than word-order free to some extent. In addition, there is often a close connection between the meaning of a Chinese word and its first or last character.", |
|
"cite_spans": [ |
|
{ |
|
"start": 203, |
|
"end": 226, |
|
"text": "(McDonald et al., 2005)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Learning Model and Features", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "In Table 2 , the feature preact n returns the previous parsing action type, and the subscript n stands for the action order before the current action. These are a group of Markovian features. Without this type of features, a shift-reduce parser may directly scan through an input sequence in linear time. Otherwise, following the work of (Duan et al., 2007) and (Zhao, 2009) , the parsing algorithm is to search a parsing action sequence with the maximal probability.", |
|
"cite_spans": [ |
|
{ |
|
"start": 338, |
|
"end": 357, |
|
"text": "(Duan et al., 2007)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 362, |
|
"end": 374, |
|
"text": "(Zhao, 2009)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Parsing using a Beam Search Algorithm", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "S d i = argmax i p(d i |d i\u22121 d i\u22122 ...),", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing using a Beam Search Algorithm", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "where S d i is the object parsing action sequence, p(d i |d i\u22121 ...) is the conditional probability, and d i is i-th parsing action. We use a beam search algorithm to find the object parsing action sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Parsing using a Beam Search Algorithm", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "As we cannot expect too much for a word-by-word translation, only word pairs with dependency relation in translated text are extracted as useful and reliable information. Then some features based on a query in these word pairs according to the current parsing state (namely, words in the current stack and input) will be derived to enhance the Chinese parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A translation sample can be seen in Figure 1 . Although most words are satisfactorily translated, to generate effective features, what we still have to consider at first is the inconsistence between the translated text and the target text.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 36, |
|
"end": 44, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In Chinese, word lemma is always its word form itself, this is a convenient characteristic in computational linguistics and makes lemma features unnecessary for Chinese parsing at all. However, Chinese has a special primary processing task, i.e., word segmentation. Unfortunately, word definitions for Chinese are not consistent in various linguistical views, for example, seven segmentation conventions for computational purpose are formally proposed since the first Bakeoff 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Note that CTB or any other Chinese treebank has its own word segmentation guideline. Chinese word should be strictly segmented according to the guideline before POS tags and dependency relations are annotated. However, as we say the English treebank is translated into Chinese word by word, Chinese words in the translated text are exactly some entries from the bilingual lexicon, they are actually irregular phrases, short sentences or something else rather than words that follows any existing word segmentation convention. If the bilingual lexicon is not carefully selected or refined according to the treebank where the Chinese parser is trained from, then there will be a serious inconsistence on word segmentation conventions between the translated and the target treebanks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As all concerned feature values here are calculated from the searching result in the translated word pair list according to the current parsing state, and a complete and exact match cannot be always expected, our solution to the above segmentation issue is using a partial matching strategy based on characters that the words include.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Above all, a translated word pair list, L, is extracted from the translated treebank. Each item in the list consists of three elements, dependant word (dp), head word (hd) and the frequency of this pair in the translated treebank, f .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "There are two basic strategies to organize the features derived from the translated word pair list. The first is to find the most matching word pair in the list and extract some properties from it, such as the matched length, part-of-speech tags and so on, to generate features. Note that a matching priority serial should be defined aforehand in this case. The second is to check every matching models between the current parsing state and the partially matched word pair. In an early version of our approach, the former was implemented. However, It is proven to be quite inefficient in computation. Thus we adopt the second strategy at last. Two matching model feature functions, \u03c6(\u2022) and \u03c8(\u2022), are correspondingly defined as follows. The return value of \u03c6(\u2022) or \u03c8(\u2022) is the logarithmic frequency of the matched item. There are four input parameters required by the function \u03c6(\u2022). Two parameters of them are about which part of the stack(input) words is chosen, and other two are about which part of each item in the translated word pair is chosen. These parameters could be set to f ull or char n as shown in Table 1 , where n = ..., \u22122, \u22121, 1, 2, .... For example, a possible feature could be \u03c6(s.f ull, i.char 1 , dp.f ull, hd.char 1 ), it tries to find a match in L by comparing stack word and dp word, and the first character of input word Table 3 : Features based on the translated treebank \u03c6 (i.char3, s .f ull, dp.char3, hd.f ull) +i.char3 +s .f orm \u03c6 (i.char3, s.char2, dp.char3, hd.char2)+s.char2 \u03c6(i.char3, s.f ull, dp.char3, hd.char2)+s.f orm \u03c8(s .char\u22122, hd.char\u22122, head)+i.pos+s .pos \u03c6(i.char3, s.f ull, dp.char3, hd.char2)+s.f ull \u03c6(s .f ull, i.char4, dp.f ull, hd.char4)+s .pos+i.pos \u03c8(i.f ull, hd.char2, root)+i.pos+s.pos \u03c8(i.f ull, hd.char2, root)+i.pos+s .pos \u03c8(s.f ull, dp.f ull, dependant) One parameter is about which part of the stack(input) words is chosen, and the other is about which part of each item in the translated word pair is chosen. The third is about the matching type that may be set to dependant, head, or root. For example, the function \u03c8(i.char 1 , hd.f ull, root) tries to find a match in L by comparing the first character of input word and the whole dp word. If such a match item in L is found, then \u03c8(\u2022) returns log(f ) as hd occurs as ROOT f times.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1401, |
|
"end": 1440, |
|
"text": "(i.char3, s .f ull, dp.char3, hd.f ull)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1462, |
|
"end": 1812, |
|
"text": "(i.char3, s.char2, dp.char3, hd.char2)+s.char2 \u03c6(i.char3, s.f ull, dp.char3, hd.char2)+s.f orm \u03c8(s .char\u22122, hd.char\u22122, head)+i.pos+s .pos \u03c6(i.char3, s.f ull, dp.char3, hd.char2)+s.f ull \u03c6(s .f ull, i.char4, dp.f ull, hd.char4)+s .pos+i.pos \u03c8(i.f ull, hd.char2, root)+i.pos+s.pos \u03c8(i.f ull, hd.char2, root)+i.pos+s .pos \u03c8(s.f ull, dp.f ull, dependant)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1112, |
|
"end": 1119, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
}, |
|
{ |
|
"start": 1347, |
|
"end": 1354, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "As having observed that CTB and PTB share a similar POS guideline. A POS pair list from PTB is also extract. Two types of features, rootscore and pairscore are used to make use of such information. Both of them returns the logarithmic value of the frequency for a given dependent event. The difference is, rootscore counts for the given POS tag occurring as ROOT, and pairscore counts for two POS tag combination occurring for a dependent relationship.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A full adapted feature list that is derived from the translated word pairs is in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 88, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Exploiting the Translated Treebank", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The quality of the parser is measured by the parsing accuracy or the unlabeled attachment score (UAS), i.e., the percentage of tokens with correct head. Two types of scores are reported for comparison: \"UAS without p\" is the UAS score without all punctuation tokens and \"UAS with p\" is the one with all punctuation tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The results with different feature sets are in Table 4. As the features preact n are involved, a beam search algorithm with width 5 is used for parsing, otherwise, a simple shift-reduce decoding is used. It is observed that the features derived from the translated text bring a significant performance improvement as high as 1.3%. b +T: using features derived from the translated text as in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 391, |
|
"end": 398, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "To compare our parser to the state-of-the-art counterparts, we use the same testing data as (Wang et al., 2005) did, selecting the sentences length up to 40. Table 5 shows the results achieved by other researchers and ours (UAS with p), which indicates that our parser outperforms any other ones 4 . However, our results is only slightly better than that of (Chen et al., 2008) as only sentences whose lengths are less than 40 are considered. As our full result is much better than the latter, this comparison indicates that our approach improves the performance for those longer sentences. (Wang et al., 2007) -0.866 (Chen et al., 2008) 0.852 0.884 Ours 0.861 0.889 a This results was reported in (Wang et al., 2007) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 92, |
|
"end": 111, |
|
"text": "(Wang et al., 2005)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 358, |
|
"end": 377, |
|
"text": "(Chen et al., 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 610, |
|
"text": "(Wang et al., 2007)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 618, |
|
"end": 637, |
|
"text": "(Chen et al., 2008)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 717, |
|
"text": "(Wang et al., 2007)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 158, |
|
"end": 165, |
|
"text": "Table 5", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "The experimental results in (McDonald and Nivre, 2007) show a negative impact on the parsing accuracy from too long dependency relation. For the proposed method, the improvement relative to dependency length is shown in Figure 2 . From the figure, it is seen that our method gives observable better performance when dependency lengths are larger than 4. Although word order is changed, the results here show that the useful information from the translated treebank still help those long distance dependencies. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 54, |
|
"text": "(McDonald and Nivre, 2007)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 220, |
|
"end": 228, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Evaluation Results", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "If a treebank in the source language can help improve parsing in the target language, then there must be something common between these two languages, or more precisely, these two corresponding treebanks. (Zeman and Resnik, 2008) assumed that the morphology and syntax in the language pair should be very similar, and that is so for the language pair that they considered, Danish and Swedish, two very close north European languages. Thus it is somewhat surprising that we show a translated English treebank may help Chinese parsing, as English and Chinese even belong to two different language systems. However, it will not be so strange if we recognize that PTB and CTB share very similar guidelines on POS and syntactics annotation. Since it will be too abstract in discussing the details of the annotation guidelines, we look into the similarities of two treebanks from the matching degree of two word pair lists. The reason is that the effectiveness of the proposed method actually relies on how many word pairs at every parsing states can find their full or partial matched partners in the translated word pair list. Table 6 shows such a statistics on the matching degree distribution from all training samples for Chinese parsing. The statistics in the table suggest that most to-be-check word pairs during parsing have a full or partial hitting in the translated word pair list. The latter then obtains an opportunity to provide a great deal of useful guideline information to help determine how the former should be tackled. Therefore we have cause for attributing the effectiveness of the proposed method to the similarity of these two treebanks. From Table 6 , we also find that the partial matching strategy defined in Section 5 plays a very important role in improving the whole matching degree. Note that our approach is not too related to the characteristics of two languages. Our discussion here brings an interesting issue, which difference is more important in cross language processing, between two languages themselves or the corresponding annotated corpora? This may be extensively discussed in the future work. Note that only a bilingual lexicon is adopted in our approach. We regard it one of the most merits for our approach. A lexicon is much easier to be obtained than an annotated corpus. One of the remained question about this work is if the bilingual lexicon should be very specific for this kind of tasks. According to our experiences, actually, it is not so sensitive to choose a highly refined lexicon or not. We once found many words, mostly named entities, were outside the lexicon. Thus we managed to collect a named entity translation dictionary to enhance the original one. However, this extra effort did not receive an observable performance improvement in return. Finally we realize that a lexicon that can guarantee two word pair lists highly matched is sufficient for this work, and this requirement may be conveniently satisfied only if the lexicon consists of adequate highfrequent words from the source treebank.", |
|
"cite_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 229, |
|
"text": "(Zeman and Resnik, 2008)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1123, |
|
"end": 1130, |
|
"text": "Table 6", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 1662, |
|
"end": 1669, |
|
"text": "Table 6", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We propose a method to enhance dependency parsing in one language by using a translated treebank from another language. A simple statistical machine translation technique, word-by-word decoding, where only a bilingual lexicon is necessary, is used to translate the source treebank. As dependency parsing is concerned with the relations of word pairs, only those word pairs with dependency relations in the translated treebank are chosen to generate some additional features to enhance the parser for the target language. The experimental results in English and Chinese treebanks show the proposed method is effective and helps the Chinese parser in this work achieve a state-of-the-art result.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "Note that our method is evaluated in two treebanks with a similar annotation style and it avoids using too many linguistic properties. Thus the method is in the hope of being used in other similarly annotated treebanks 5 . For an immediate example, we may adopt a translated Chinese treebank to improve English parsing. Although there are still something to do, the remained key work has been as simple as considering how to determine the matching strategy for searching the translated word pair list in English according to the framework of our method. .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "8" |
|
}, |
|
{ |
|
"text": "StarDict is an open source dictionary software, available at http://stardict.sourceforge.net/.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Bakeoff is a Chinese processing share task held by SIGHAN.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "There is a slight exception: using the same data splitting,(Yu et al., 2008) reported UAS without p as 0.873 versus ours, 0.870.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For example, Catalan and Spanish treebanks from the AnCora(-Es/Ca) Multilevel Annotated Corpus that are annotated by the Universitat de Barcelona (CLiC-UB) and the Universitat Polit cnica de Catalunya (UPC).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We'd like to give our thanks to three anonymous reviewers for their insightful comments, Dr. Chen Wenliang for for helpful discussions and Mr. Liu Jun for helping us fix a bug in our scoring program.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "A statistical approach to machine translation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [ |
|
"A Della" |
|
], |
|
"last": "Cocke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [ |
|
"J Della" |
|
], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fredrick", |
|
"middle": [], |
|
"last": "Pietra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Jelinek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Mercer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Roossin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1990, |
|
"venue": "Computational Linguistics", |
|
"volume": "16", |
|
"issue": "2", |
|
"pages": "79--85", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peter F. Brown, John Cocke, Stephen A. Della Pietra, Vincent J. Della Pietra, Fredrick Jelinek, John D. Lafferty, Robert L. Mercer, and Paul S. Roossin. 1990. A statistical approach to machine translation. Computational Linguistics, 16(2):79-85.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Two languages are better than one (for syntactic parsing)", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Burkett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "EMNLP-2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "877--886", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Burkett and Dan Klein. 2008. Two lan- guages are better than one (for syntactic parsing). In EMNLP-2008, pages 877-886, Honolulu, Hawaii, USA.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Dependency parsing with short dependency relations in unlabeled data", |
|
"authors": [ |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kiyotaka", |
|
"middle": [], |
|
"last": "Uchimoto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yujie", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hitoshi", |
|
"middle": [], |
|
"last": "Isahara", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of IJCNLP-2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wenliang Chen, Daisuke Kawahara, Kiyotaka Uchi- moto, Yujie Zhang, and Hitoshi Isahara. 2008. De- pendency parsing with short dependency relations in unlabeled data. In Proceedings of IJCNLP-2008, Hyderabad, India, January 8-10.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Probabilistic parsing action models for multi-lingual dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Xiangyu", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "940--946", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiangyu Duan, Jun Zhao, and Bo Xu. 2007. Proba- bilistic parsing action models for multi-lingual de- pendency parsing. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, pages 940-946, Prague, Czech, June 28-30.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Single malt or blended? a study in multilingual parser optimization", |
|
"authors": [ |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G\u00fclsen", |
|
"middle": [], |
|
"last": "Eryi\u01e7it", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Be\u00e1ta", |
|
"middle": [], |
|
"last": "Megyesi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "933--939", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Johan Hall, Jens Nilsson, Joakim Nivre, G\u00fclsen Eryi\u01e7it, Be\u00e1ta Megyesi, Mattias Nils- son, and Markus Saers. 2007. Single malt or blended? a study in multilingual parser optimiza- tion. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, pages 933-939, Prague, Czech, June.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Simple semi-supervised dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Terry", |
|
"middle": [], |
|
"last": "Koo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Collins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "595--603", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Terry Koo, Xavier Carreras, and Michael Collins. 2008. Simple semi-supervised dependency parsing. In Proceedings of ACL-08: HLT, pages 595-603, Columbus, Ohio, USA, June.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Reranking and self-training for parser adaptation", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Mcclosky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eugene", |
|
"middle": [], |
|
"last": "Charniak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Johnson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of ACL-COLING 2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "337--344", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David McClosky, Eugene Charniak, and Mark John- son. 2006. Reranking and self-training for parser adaptation. In Proceedings of ACL-COLING 2006, pages 337-344, Sydney, Australia, July.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Characterizing the errors of data-driven dependency parsing models", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "122--131", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald and Joakim Nivre. 2007. Charac- terizing the errors of data-driven dependency pars- ing models. In Proceedings of the 2007 Joint Con- ference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL 2007), pages 122-131, Prague, Czech, June 28-30.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Online learning of approximate dependency parsing algorithms", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of EACL-2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "81--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald and Fernando Pereira. 2006. Online learning of approximate dependency parsing algo- rithms. In Proceedings of EACL-2006, pages 81-88, Trento, Italy, April.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Online large-margin training of dependency parsers", |
|
"authors": [ |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mcdonald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Koby", |
|
"middle": [], |
|
"last": "Crammer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of ACL-2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "91--98", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ryan McDonald, Koby Crammer, and Fernando Pereira. 2005. Online large-margin training of de- pendency parsers. In Proceedings of ACL-2005, pages 91-98, Ann Arbor, Michigan, USA, June 25- 30.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A multilingual paradigm for automatic verb classification", |
|
"authors": [ |
|
{ |
|
"first": "Paola", |
|
"middle": [], |
|
"last": "Merlo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suzanne", |
|
"middle": [], |
|
"last": "Stevenson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vivian", |
|
"middle": [], |
|
"last": "Tsang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "Allaria", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "ACL-2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "207--214", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paola Merlo, Suzanne Stevenson, Vivian Tsang, and Gianluca Allaria. 2002. A multilingual paradigm for automatic verb classification. In ACL-2002, pages 207-214, Philadelphia, Pennsylvania, USA.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The conll 2007 shared task on dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Johan", |
|
"middle": [], |
|
"last": "Hall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandra", |
|
"middle": [], |
|
"last": "K\u00fcbler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Mc-Donald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Nilsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deniz", |
|
"middle": [], |
|
"last": "Yuret", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "915--932", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre, Johan Hall, Sandra K\u00fcbler, Ryan Mc- Donald, Jens Nilsson, Sebastian Riedel, and Deniz Yuret. 2007. The conll 2007 shared task on de- pendency parsing. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, page 915 932, Prague, Czech, June.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "An efficient algorithm for projective dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of IWPT-2003)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--160", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joakim Nivre. 2003. An efficient algorithm for projec- tive dependency parsing. In Proceedings of IWPT- 2003), pages 149-160, Nancy, France, April 23-25.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Discriminative training and maximum entropy models for statistical machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Josef", |
|
"middle": [], |
|
"last": "Franz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hermann", |
|
"middle": [], |
|
"last": "Och", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of ACL-2002", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "295--302", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Franz Josef Och and Hermann Ney. 2002. Discrimina- tive training and maximum entropy models for sta- tistical machine translation. In Proceedings of ACL- 2002, pages 295-302, Philadelphia, USA, July.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Self-training for enhancement and domain adaptation of statistical parsers trained on small datasets", |
|
"authors": [ |
|
{ |
|
"first": "Roi", |
|
"middle": [], |
|
"last": "Reichart", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ari", |
|
"middle": [], |
|
"last": "Rappoport", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of ACL-2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "616--623", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Roi Reichart and Ari Rappoport. 2007. Self-training for enhancement and domain adaptation of statistical parsers trained on small datasets. In Proceedings of ACL-2007, pages 616-623, Prague, Czech Republic, June.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Dependency parsing and domain adaptation with lr models and parser ensembles", |
|
"authors": [ |
|
{ |
|
"first": "Kenji", |
|
"middle": [], |
|
"last": "Sagae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tsujii", |
|
"middle": [], |
|
"last": "Jun Ichi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1044--1050", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kenji Sagae and Jun ichi Tsujii. 2007. Dependency parsing and domain adaptation with lr models and parser ensembles. In Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007, page 1044 1050, Prague, Czech, June 28-30.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Annealing structural bias in multilingual weighted grammar induction", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Noah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Eisner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of ACL-COLING 2006", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "569--576", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Noah A. Smith and Jason Eisner. 2006. Annealing structural bias in multilingual weighted grammar in- duction. In Proceedings of ACL-COLING 2006, page 569 576, Sydney, Australia, July.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Bootstrapping statistical parsers from small datasets", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Steedman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Osborne", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anoop", |
|
"middle": [], |
|
"last": "Sarkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rebecca", |
|
"middle": [], |
|
"last": "Hwa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [], |
|
"last": "Hockenmaier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Ruhlen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steven", |
|
"middle": [], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremiah", |
|
"middle": [], |
|
"last": "Crim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of EACL-2003", |
|
"volume": "331", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Steedman, Miles Osborne, Anoop Sarkar, Stephen Clark, Rebecca Hwa, Julia Hockenmaier, Paul Ruhlen, Steven Baker, and Jeremiah Crim. 2003. Bootstrapping statistical parsers from small datasets. In Proceedings of EACL-2003, page 331 338, Budapest, Hungary, April.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Semisupervised convex training for dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Iris", |
|
"middle": [], |
|
"last": "Qin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dale", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Schuurmans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of ACL-08: HLT", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "532--540", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qin Iris Wang and Dale Schuurmans. 2008. Semi- supervised convex training for dependency parsing. In Proceedings of ACL-08: HLT, pages 532-540, Columbus, Ohio, USA, June.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Strictly lexical dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Qin Iris", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dale", |
|
"middle": [], |
|
"last": "Schuurmans", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of IWPT-2005", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "152--159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qin Iris Wang, Dale Schuurmans, and Dekang Lin. 2005. Strictly lexical dependency parsing. In Pro- ceedings of IWPT-2005, pages 152-159, Vancouver, BC, Canada, October.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Simple training of dependency parsers via structured boosting", |
|
"authors": [ |
|
{ |
|
"first": "Qin Iris", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dekang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dale", |
|
"middle": [], |
|
"last": "Schuurmans", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of IJCAI 2007", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1756--1762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qin Iris Wang, Dekang Lin, and Dale Schuurmans. 2007. Simple training of dependency parsers via structured boosting. In Proceedings of IJCAI 2007, pages 1756-1762, Hyderabad, India, January.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Statistical dependency analysis with support vector machines", |
|
"authors": [ |
|
{ |
|
"first": "Hiroyasu", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuji", |
|
"middle": [], |
|
"last": "Matsumoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of IWPT-2003)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "195--206", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hiroyasu Yamada and Yuji Matsumoto. 2003. Sta- tistical dependency analysis with support vector machines. In Proceedings of IWPT-2003), page 195 206, Nancy, France, April.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Chinese dependency parsing with large scale automatically constructed case structures", |
|
"authors": [ |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daisuke", |
|
"middle": [], |
|
"last": "Kawahara", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sadao", |
|
"middle": [], |
|
"last": "Kurohashi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of COLING-2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1049--1056", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kun Yu, Daisuke Kawahara, and Sadao Kurohashi. 2008. Chinese dependency parsing with large scale automatically constructed case structures. In Proceedings of COLING-2008, pages 1049-1056, Manchester, UK, August.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Crosslanguage parser adaptation between related languages", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Zeman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip", |
|
"middle": [], |
|
"last": "Resnik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceedings of IJCNLP 2008 Workshop on NLP for Less Privileged Languages", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "35--42", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Zeman and Philip Resnik. 2008. Cross- language parser adaptation between related lan- guages. In Proceedings of IJCNLP 2008 Workshop on NLP for Less Privileged Languages, pages 35- 42, Hyderabad, India, January.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Parsing syntactic and semantic dependencies with two single-stage maximum entropy models", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyu", |
|
"middle": [], |
|
"last": "Kit", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Proceeding of CoNLL-2008", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "203--207", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhao and Chunyu Kit. 2008. Parsing syntactic and semantic dependencies with two single-stage max- imum entropy models. In Proceeding of CoNLL- 2008, pages 203-207, Manchester, UK.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Multilingual dependency learning: A huge feature engineering method to semantic dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenliang", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyu", |
|
"middle": [], |
|
"last": "Kit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of CoNLL-2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhao, Wenliang Chen, Chunyu Kit, and Guodong Zhou. 2009. Multilingual dependency learning: A huge feature engineering method to semantic de- pendency parsing. In Proceedings of CoNLL-2009, Boulder, Colorado, USA.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Character-level dependencies in chinese: Usefulness and learning", |
|
"authors": [ |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "EACL-2009", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "879--887", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hai Zhao. 2009. Character-level dependencies in chinese: Usefulness and learning. In EACL-2009, pages 879-887, Athens, Greece.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "A comparison before and after translation" |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "+i.pos pairscore(s .pos, i.pos)+s .f orm+i.f orm rootscore(s .pos)+s .f orm+i.f orm rootscore(s .pos)+i.pos and the first character of hd word. If such a match item in L is found, then \u03c6(\u2022) returns log(f ). There are three input parameters required by the function \u03c8(\u2022)." |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Performance vs. dependency length" |
|
}, |
|
"TABREF0": { |
|
"content": "<table><tr><td/><td>The first (second) word in the</td></tr><tr><td/><td>unprocessed sequence, etc.</td></tr><tr><td>dir</td><td>Dependent direction</td></tr><tr><td>h</td><td>Head</td></tr><tr><td>lm</td><td>Leftmost child</td></tr><tr><td>rm</td><td>Rightmost child</td></tr><tr><td>rn</td><td>Right nearest child</td></tr><tr><td>form</td><td>word form</td></tr><tr><td>pos</td><td>POS tag of word</td></tr><tr><td>cpos1</td><td>coarse POS: the first letter of POS tag of word</td></tr><tr><td>cpos2</td><td>coarse POS: the first two POS tags of word</td></tr><tr><td>lnverb</td><td>the left nearest verb</td></tr><tr><td>char1</td><td>The first character of a word</td></tr><tr><td>char2</td><td>The first two characters of a word</td></tr><tr><td>char\u22121</td><td>The last character of a word</td></tr><tr><td>char\u22122</td><td>The last two characters of a word</td></tr><tr><td>.</td><td>'s, i.e., 's.dprel' means dependent label</td></tr><tr><td/><td>of character in the top of stack</td></tr><tr><td>+</td><td>Feature combination, i.e., 's.char+i.char'</td></tr><tr><td/><td>means both s.char and i.char work as a</td></tr><tr><td/><td>feature function.</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Feature Notations Notation Meaning sThe word in the top of stack sThe first word below the top of stack. s\u22121,s1... The first word before(after) the word in the top of stack. i, i+1,...", |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"content": "<table><tr><td>in.f orm, n = 0, 1</td></tr><tr><td>i.f orm + i1.f orm</td></tr><tr><td>in.char2 + in+1.char2, n = \u22121, 0</td></tr><tr><td>i.char\u22121 + i1.char\u22121</td></tr><tr><td>in.char\u22122 n = 0, 3</td></tr><tr><td>i1.char\u22122 + i2.char\u22122 +i3.char\u22122</td></tr><tr><td>i.lnverb.char\u22122</td></tr><tr><td>i3.pos</td></tr><tr><td>in.pos + in+1.pos, n = 0, 1</td></tr><tr><td>i\u22122.cpos1 + i\u22121.cpos1</td></tr><tr><td>i1.cpos1 + i2.cpos1 + i3.cpos1</td></tr><tr><td>s 2 .char1 s .char\u22122 + s 1 .char\u22122 s \u22122 .cpos2 s \u22121 .cpos2 + s 1 .cpos2 s .cpos2 + s 1 .cpos2 s'.children.cpos2.seq</td></tr><tr><td>s'.children.dprel.seq</td></tr><tr><td>s'.subtree.depth</td></tr><tr><td>s .h.f orm + s .rm.cpos1</td></tr><tr><td>s .lm.char2 + s .char2</td></tr><tr><td>s.h.children.dprel.seq</td></tr><tr><td>s.lm.dprel</td></tr><tr><td>s.char\u22122 + i1.char\u22122</td></tr><tr><td>s.charn + i.charn, n = \u22121, 1</td></tr><tr><td>s\u22121.pos + i1.pos</td></tr><tr><td>s.pos + in.pos, n = \u22121, 0, 1</td></tr><tr><td>s : i|lineP ath.f orm.bag</td></tr><tr><td>s .f orm + i.f orm</td></tr><tr><td>s .char2 + in.char2, n = \u22121, 0, 1</td></tr><tr><td>s.curroot.pos + i.pos</td></tr><tr><td>s.curroot.char2 + i.char2</td></tr><tr><td>s.children.cpos2.seq + i.children.cpos2.seq</td></tr><tr><td>s.children.cpos2.seq + i.children.cpos2.seq</td></tr><tr><td>+ s.cpos2 + i.cpos2</td></tr><tr><td>s .children.dprel.seq + i.children.dprel.seq</td></tr><tr><td>preact\u22121</td></tr><tr><td>preact\u22122</td></tr><tr><td>preact\u22122+preact\u22121</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Features for Parsing", |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td colspan=\"3\">features with p without p</td></tr><tr><td>baseline</td><td>-d +d a</td><td>0.846 0.848</td><td>0.858 0.860</td></tr><tr><td>+T b</td><td>-d</td><td>0.859</td><td>0.869</td></tr><tr><td/><td>+d</td><td>0.861</td><td>0.870</td></tr><tr><td colspan=\"4\">a +d: using three Markovian features preact and</td></tr><tr><td colspan=\"2\">beam search decoding.</td><td/><td/></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "The results with different feature sets", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table><tr><td>(McDonald and Pereira, 2006) a</td><td>full -</td><td>up to 40 0.825</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Comparison against the state-of-the-art", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td colspan=\"3\">: Matching degree distribution</td></tr><tr><td colspan=\"3\">dependant-match head-match Percent (%)</td></tr><tr><td>None</td><td>None</td><td>9.6</td></tr><tr><td>None</td><td>Partial</td><td>16.2</td></tr><tr><td>None</td><td>Full</td><td>9.9</td></tr><tr><td>Partial</td><td>None</td><td>12.4</td></tr><tr><td>Partial</td><td>Partial</td><td>42.6</td></tr><tr><td>Partial</td><td>Full</td><td>7.3</td></tr><tr><td>Full</td><td>None</td><td>3.7</td></tr><tr><td>Full</td><td>Partial</td><td>7.0</td></tr><tr><td>Full</td><td>Full</td><td>0.2</td></tr></table>", |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |