corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-668801 | cmp-lg/9605021 | Functional Centering | <|reference_start|>Functional Centering: Based on empirical evidence from a free word order language (German) we propose a fundamental revision of the principles guiding the ordering of discourse entities in the forward-looking centers within the centering model. We claim that grammatical role criteria should be replaced by indicators of the functional information structure of the utterances, i.e., the distinction between context-bound and unbound discourse elements. This claim is backed up by an empirical evaluation of functional centering.<|reference_end|> | arxiv | @article{strube1996functional,
title={Functional Centering},
author={Michael Strube & Udo Hahn, (Computational Linguistics Lab, Freiburg
University, Germany)},
journal={Proceedings of ACL '96 (Santa Cruz)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605021},
primaryClass={cmp-lg cs.CL}
} | strube1996functional |
arxiv-668802 | cmp-lg/9605022 | Processing Complex Sentences in the Centering Framework | <|reference_start|>Processing Complex Sentences in the Centering Framework: We extend the centering model for the resolution of intra-sentential anaphora and specify how to handle complex sentences. An empirical evaluation indicates that the functional information structure guides the search for an antecedent within the sentence.<|reference_end|> | arxiv | @article{strube1996processing,
title={Processing Complex Sentences in the Centering Framework},
author={Michael Strube (Computational Linguistics Lab, Freiburg University,
Germany)},
journal={Proceedings of ACL '96 (Santa Cruz), Student Session},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605022},
primaryClass={cmp-lg cs.CL}
} | strube1996processing |
arxiv-668803 | cmp-lg/9605023 | A Simple Transformation for Offline-Parsable Grammars and its Termination Properties | <|reference_start|>A Simple Transformation for Offline-Parsable Grammars and its Termination Properties: We present, in easily reproducible terms, a simple transformation for offline-parsable grammars which results in a provably terminating parsing program directly top-down interpretable in Prolog. The transformation consists in two steps: (1) removal of empty-productions, followed by: (2) left-recursion elimination. It is related both to left-corner parsing (where the grammar is compiled, rather than interpreted through a parsing program, and with the advantage of guaranteed termination in the presence of empty productions) and to the Generalized Greibach Normal Form for DCGs (with the advantage of implementation simplicity).<|reference_end|> | arxiv | @article{dymetman1996a,
title={A Simple Transformation for Offline-Parsable Grammars and its
Termination Properties},
author={Marc Dymetman (Rank Xerox Research Centre, Grenoble)},
journal={arXiv preprint arXiv:cmp-lg/9605023},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605023},
primaryClass={cmp-lg cs.CL}
} | dymetman1996a |
arxiv-668804 | cmp-lg/9605024 | Using Terminological Knowledge Representation Languages to Manage Linguistic Resources | <|reference_start|>Using Terminological Knowledge Representation Languages to Manage Linguistic Resources: I examine how terminological languages can be used to manage linguistic data during NL research and development. In particular, I consider the lexical semantics task of characterizing semantic verb classes and show how the language can be extended to flag inconsistencies in verb class definitions, identify the need for new verb classes, and identify appropriate linguistic hypotheses for a new verb's behavior.<|reference_end|> | arxiv | @article{jordan1996using,
title={Using Terminological Knowledge Representation Languages to Manage
Linguistic Resources},
author={Pamela W. Jordan (University of Pittsburgh)},
journal={Proceedings of ACL 96, Santa Cruz, USA, June 23-28},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605024},
primaryClass={cmp-lg cs.CL}
} | jordan1996using |
arxiv-668805 | cmp-lg/9605025 | A Conceptual Reasoning Approach to Textual Ellipsis | <|reference_start|>A Conceptual Reasoning Approach to Textual Ellipsis: We present a hybrid text understanding methodology for the resolution of textual ellipsis. It integrates conceptual criteria (based on the well-formedness and conceptual strength of role chains in a terminological knowledge base) and functional constraints reflecting the utterances' information structure (based on the distinction between context-bound and unbound discourse elements). The methodological framework for text ellipsis resolution is the centering model that has been adapted to these constraints.<|reference_end|> | arxiv | @article{hahn1996a,
title={A Conceptual Reasoning Approach to Textual Ellipsis},
author={Udo Hahn, Katja Markert & Michael Strube (Computational Linguistics
Lab, Freiburg University, Germany)},
journal={ECAI '96: Proc. of 12th European Conference on Artificial
Intelligence. Budapest, Aug 12-16 1996, pp.572-576},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605025},
primaryClass={cmp-lg cs.CL}
} | hahn1996a |
arxiv-668806 | cmp-lg/9605026 | Trading off Completeness for Efficiency --- The \textscParseTalk Performance Grammar Approach to Real-World Text Parsing | <|reference_start|>Trading off Completeness for Efficiency --- The \textscParseTalk Performance Grammar Approach to Real-World Text Parsing: We argue for a performance-based design of natural language grammars and their associated parsers in order to meet the constraints posed by real-world natural language understanding. This approach incorporates declarative and procedural knowledge about language and language use within an object-oriented specification framework. We discuss several message passing protocols for real-world text parsing and provide reasons for sacrificing completeness of the parse in favor of efficiency.<|reference_end|> | arxiv | @article{neuhaus1996trading,
title={Trading off Completeness for Efficiency --- The \textsc{ParseTalk}
Performance Grammar Approach to Real-World Text Parsing},
author={Peter Neuhaus, Udo Hahn, (Computational Linguistics Lab, Freiburg
University, Germany)},
journal={Proceedings of FLAIRS '96 (Key West)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605026},
primaryClass={cmp-lg cs.CL}
} | neuhaus1996trading |
arxiv-668807 | cmp-lg/9605027 | Restricted Parallelism in Object-Oriented Lexical Parsing | <|reference_start|>Restricted Parallelism in Object-Oriented Lexical Parsing: We present an approach to parallel natural language parsing which is based on a concurrent, object-oriented model of computation. A depth-first, yet incomplete parsing algorithm for a dependency grammar is specified and several restrictions on the degree of its parallelization are discussed.<|reference_end|> | arxiv | @article{neuhaus1996restricted,
title={Restricted Parallelism in Object-Oriented Lexical Parsing},
author={Peter Neuhaus, Udo Hahn, (Computational Linguistics Lab, Freiburg
University, Germany)},
journal={Proceedings of COLING '96 (Copenhagen)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605027},
primaryClass={cmp-lg cs.CL}
} | neuhaus1996restricted |
arxiv-668808 | cmp-lg/9605028 | Towards Understanding Spontaneous Speech: Word Accuracy vs Concept Accuracy | <|reference_start|>Towards Understanding Spontaneous Speech: Word Accuracy vs Concept Accuracy: In this paper we describe an approach to automatic evaluation of both the speech recognition and understanding capabilities of a spoken dialogue system for train time table information. We use word accuracy for recognition and concept accuracy for understanding performance judgement. Both measures are calculated by comparing these modules' output with a correct reference answer. We report evaluation results for a spontaneous speech corpus with about 10000 utterances. We observed a nearly linear relationship between word accuracy and concept accuracy.<|reference_end|> | arxiv | @article{boros1996towards,
title={Towards Understanding Spontaneous Speech: Word Accuracy vs. Concept
Accuracy},
author={M. Boros, W. Eckert, F. Gallwitz, G. Goerz, G. Hanrieder, H. Niemann
(Bavarian Research Center for Knowledge-Based Systems & Department of Pattern
Recognition, University of Erlangen-Nuremberg, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9605028},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605028},
primaryClass={cmp-lg cs.CL}
} | boros1996towards |
arxiv-668809 | cmp-lg/9605029 | Learning Word Association Norms Using Tree Cut Pair Models | <|reference_start|>Learning Word Association Norms Using Tree Cut Pair Models: We consider the problem of learning co-occurrence information between two word categories, or more in general between two discrete random variables taking values in a hierarchically classified domain. In particular, we consider the problem of learning the `association norm' defined by A(x,y)=p(x, y)/(p(x)*p(y)), where p(x, y) is the joint distribution for x and y and p(x) and p(y) are marginal distributions induced by p(x, y). We formulate this problem as a sub-task of learning the conditional distribution p(x|y), by exploiting the identity p(x|y) = A(x,y)*p(x). We propose a two-step estimation method based on the MDL principle, which works as follows: It first estimates p(x) as p1 using MDL, and then estimates p(x|y) for a fixed y by applying MDL on the hypothesis class of {A * p1 | A \in B} for some given class B of representations for association norm. The estimation of A is therefore obtained as a side-effect of a near optimal estimation of p(x|y). We then apply this general framework to the problem of acquiring case-frame patterns. We assume that both p(x) and A(x, y) for given y are representable by a model based on a classification that exists within an existing thesaurus tree as a `cut,' and hence p(x|y) is represented as the product of a pair of `tree cut models.' We then devise an efficient algorithm that implements our general strategy. We tested our method by using it to actually acquire case-frame patterns and conducted disambiguation experiments using the acquired knowledge. The experimental results show that our method improves upon existing methods.<|reference_end|> | arxiv | @article{abe1996learning,
title={Learning Word Association Norms Using Tree Cut Pair Models},
author={Naoki Abe and Hang Li (Theory NEC Lab., RWCP)},
journal={arXiv preprint arXiv:cmp-lg/9605029},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605029},
primaryClass={cmp-lg cs.CL}
} | abe1996learning |
arxiv-668810 | cmp-lg/9605030 | Incremental Centering and Center Ambiguity | <|reference_start|>Incremental Centering and Center Ambiguity: In this paper, we present a model of anaphor resolution within the framework of the centering model. The consideration of an incremental processing mode introduces the need to manage structural ambiguity at the center level. Hence, the centering framework is further refined to account for local and global parsing ambiguities which propagate up to the level of center representations, yielding moderately adapted data structures for the centering algorithm.<|reference_end|> | arxiv | @article{hahn1996incremental,
title={Incremental Centering and Center Ambiguity},
author={Udo Hahn & Michael Strube (Computational Linguistics Lab, Freiburg
University, Germany)},
journal={CogSci '96: Proc. of 18th Annual Conference of the Cognitive
Science Society. La Jolla, Ca., Jul 12-15 1996.},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605030},
primaryClass={cmp-lg cs.CL}
} | hahn1996incremental |
arxiv-668811 | cmp-lg/9605031 | Efficient Algorithms for Parsing the DOP Model? A Reply to Joshua Goodman | <|reference_start|>Efficient Algorithms for Parsing the DOP Model? A Reply to Joshua Goodman: This note is a reply to Joshua Goodman's paper "Efficient Algorithms for Parsing the DOP Model" (Goodman, 1996; cmp-lg/9604008). In his paper, Goodman makes a number of claims about (my work on) the Data-Oriented Parsing model (Bod, 1992-1996). This note shows that some of these claims must be mistaken.<|reference_end|> | arxiv | @article{bod1996efficient,
title={Efficient Algorithms for Parsing the DOP Model? A Reply to Joshua
Goodman},
author={Rens Bod (University of Amsterdam)},
journal={arXiv preprint arXiv:cmp-lg/9605031},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605031},
primaryClass={cmp-lg cs.CL}
} | bod1996efficient |
arxiv-668812 | cmp-lg/9605032 | Synchronous Models of Language | <|reference_start|>Synchronous Models of Language: In synchronous rewriting, the productions of two rewriting systems are paired and applied synchronously in the derivation of a pair of strings. We present a new synchronous rewriting system and argue that it can handle certain phenomena that are not covered by existing synchronous systems. We also prove some interesting formal/computational properties of our system.<|reference_end|> | arxiv | @article{rambow1996synchronous,
title={Synchronous Models of Language},
author={Owen Rambow (CoGenTex, Inc) and Giorgio Satta (Padova)},
journal={arXiv preprint arXiv:cmp-lg/9605032},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605032},
primaryClass={cmp-lg cs.CL}
} | rambow1996synchronous |
arxiv-668813 | cmp-lg/9605033 | Notes on LR Parser Design | <|reference_start|>Notes on LR Parser Design: The design of an LR parser based on interleaving the atomic symbol processing of a context-free backbone grammar with the full constraints of the underlying unification grammar is described. The parser employs a set of reduced constraints derived from the unification grammar in the LR parsing step. Gap threading is simulated to reduce the applicability of empty productions.<|reference_end|> | arxiv | @article{samuelsson1996notes,
title={Notes on LR Parser Design},
author={Christer Samuelsson (Swedish Institute of Computer Science)},
journal={Coling 94},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605033},
primaryClass={cmp-lg cs.CL}
} | samuelsson1996notes |
arxiv-668814 | cmp-lg/9605034 | Handling Sparse Data by Successive Abstraction | <|reference_start|>Handling Sparse Data by Successive Abstraction: A general, practical method for handling sparse data that avoids held-out data and iterative reestimation is derived from first principles. It has been tested on a part-of-speech tagging task and outperformed (deleted) interpolation with context-independent weights, even when the latter used a globally optimal parameter setting determined a posteriori.<|reference_end|> | arxiv | @article{samuelsson1996handling,
title={Handling Sparse Data by Successive Abstraction},
author={Christer Samuelsson (University of the Saarland)},
journal={Coling 96},
year={1996},
number={CLAUS 69},
archivePrefix={arXiv},
eprint={cmp-lg/9605034},
primaryClass={cmp-lg cs.CL}
} | samuelsson1996handling |
arxiv-668815 | cmp-lg/9605035 | Example-Based Optimization of Surface-Generation Tables | <|reference_start|>Example-Based Optimization of Surface-Generation Tables: A method is given that "inverts" a logic grammar and displays it from the point of view of the logical form, rather than from that of the word string. LR-compiling techniques are used to allow a recursive-descent generation algorithm to perform "functor merging" much in the same way as an LR parser performs prefix merging. This is an improvement on the semantic-head-driven generator that results in a much smaller search space. The amount of semantic lookahead can be varied, and appropriate tradeoff points between table size and resulting nondeterminism can be found automatically. This can be done by removing all spurious nondeterminism for input sufficiently close to the examples of a training corpus, and large portions of it for other input, while preserving completeness.<|reference_end|> | arxiv | @article{samuelsson1996example-based,
title={Example-Based Optimization of Surface-Generation Tables},
author={Christer Samuelsson (University of the Saarland)},
journal={R. Mitkov and N. Nicolov (eds.) "Recent Advances in Natural
Language Processing", vol. 136 of "Current Issues in Linguistic Theory", John
Benjamins, Amsterdam, 1996.},
year={1996},
number={CLAUS 56},
archivePrefix={arXiv},
eprint={cmp-lg/9605035},
primaryClass={cmp-lg cs.CL}
} | samuelsson1996example-based |
arxiv-668816 | cmp-lg/9605036 | Parsing Algorithms and Metrics | <|reference_start|>Parsing Algorithms and Metrics: Many different metrics exist for evaluating parsing results, including Viterbi, Crossing Brackets Rate, Zero Crossing Brackets Rate, and several others. However, most parsing algorithms, including the Viterbi algorithm, attempt to optimize the same metric, namely the probability of getting the correct labelled tree. By choosing a parsing algorithm appropriate for the evaluation metric, better performance can be achieved. We present two new algorithms: the ``Labelled Recall Algorithm,'' which maximizes the expected Labelled Recall Rate, and the ``Bracketed Recall Algorithm,'' which maximizes the Bracketed Recall Rate. Experimental results are given, showing that the two new algorithms have improved performance over the Viterbi algorithm on many criteria, especially the ones that they optimize.<|reference_end|> | arxiv | @article{goodman1996parsing,
title={Parsing Algorithms and Metrics},
author={Joshua Goodman (Harvard University)},
journal={Proceedings of the 34th Meeting of the Association for
Computational Linguistics (ACL'96)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605036},
primaryClass={cmp-lg cs.CL}
} | goodman1996parsing |
arxiv-668817 | cmp-lg/9605037 | Combining Trigram-based and Feature-based Methods for Context-Sensitive Spelling Correction | <|reference_start|>Combining Trigram-based and Feature-based Methods for Context-Sensitive Spelling Correction: This paper addresses the problem of correcting spelling errors that result in valid, though unintended words (such as ``peace'' and ``piece'', or ``quiet'' and ``quite'') and also the problem of correcting particular word usage errors (such as ``amount'' and ``number'', or ``among'' and ``between''). Such corrections require contextual information and are not handled by conventional spelling programs such as Unix `spell'. First, we introduce a method called Trigrams that uses part-of-speech trigrams to encode the context. This method uses a small number of parameters compared to previous methods based on word trigrams. However, it is effectively unable to distinguish among words that have the same part of speech. For this case, an alternative feature-based method called Bayes performs better; but Bayes is less effective than Trigrams when the distinction among words depends on syntactic constraints. A hybrid method called Tribayes is then introduced that combines the best of the previous two methods. The improvement in performance of Tribayes over its components is verified experimentally. Tribayes is also compared with the grammar checker in Microsoft Word, and is found to have substantially higher performance.<|reference_end|> | arxiv | @article{golding1996combining,
title={Combining Trigram-based and Feature-based Methods for Context-Sensitive
Spelling Correction},
author={Andrew R. Golding and Yves Schabes (Mitsubishi Electric Research
Laboratories)},
journal={arXiv preprint arXiv:cmp-lg/9605037},
year={1996},
number={TR96-03a},
archivePrefix={arXiv},
eprint={cmp-lg/9605037},
primaryClass={cmp-lg cs.CL}
} | golding1996combining |
arxiv-668818 | cmp-lg/9605038 | Efficient Normal-Form Parsing for Combinatory Categorial Grammar | <|reference_start|>Efficient Normal-Form Parsing for Combinatory Categorial Grammar: Under categorial grammars that have powerful rules like composition, a simple n-word sentence can have exponentially many parses. Generating all parses is inefficient and obscures whatever true semantic ambiguities are in the input. This paper addresses the problem for a fairly general form of Combinatory Categorial Grammar, by means of an efficient, correct, and easy to implement normal-form parsing technique. The parser is proved to find exactly one parse in each semantic equivalence class of allowable parses; that is, spurious ambiguity (as carefully defined) is shown to be both safely and completely eliminated.<|reference_end|> | arxiv | @article{eisner1996efficient,
title={Efficient Normal-Form Parsing for Combinatory Categorial Grammar},
author={Jason Eisner (Univ. of Pennsylvania)},
journal={Proceedings of ACL '96 (34th Meeting of the Association for
Computational Linguistics), Santa Cruz},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9605038},
primaryClass={cmp-lg cs.CL}
} | eisner1996efficient |
arxiv-668819 | cmp-lg/9606001 | A Bayesian hybrid method for context-sensitive spelling correction | <|reference_start|>A Bayesian hybrid method for context-sensitive spelling correction: Two classes of methods have been shown to be useful for resolving lexical ambiguity. The first relies on the presence of particular words within some distance of the ambiguous target word; the second uses the pattern of words and part-of-speech tags around the target word. These methods have complementary coverage: the former captures the lexical ``atmosphere'' (discourse topic, tense, etc.), while the latter captures local syntax. Yarowsky has exploited this complementarity by combining the two methods using decision lists. The idea is to pool the evidence provided by the component methods, and to then solve a target problem by applying the single strongest piece of evidence, whatever type it happens to be. This paper takes Yarowsky's work as a starting point, applying decision lists to the problem of context-sensitive spelling correction. Decision lists are found, by and large, to outperform either component method. However, it is found that further improvements can be obtained by taking into account not just the single strongest piece of evidence, but ALL the available evidence. A new hybrid method, based on Bayesian classifiers, is presented for doing this, and its performance improvements are demonstrated.<|reference_end|> | arxiv | @article{golding1996a,
title={A Bayesian hybrid method for context-sensitive spelling correction},
author={Andrew R. Golding (Mitsubishi Electric Research Laboratories)},
journal={arXiv preprint arXiv:cmp-lg/9606001},
year={1996},
number={TR95-13},
archivePrefix={arXiv},
eprint={cmp-lg/9606001},
primaryClass={cmp-lg cs.CL}
} | golding1996a |
arxiv-668820 | cmp-lg/9606002 | Clustered Language Models with Context-Equivalent States | <|reference_start|>Clustered Language Models with Context-Equivalent States: In this paper, a hierarchical context definition is added to an existing clustering algorithm in order to increase its robustness. The resulting algorithm, which clusters contexts and events separately, is used to experiment with different ways of defining the context a language model takes into account. The contexts range from standard bigram and trigram contexts to part of speech five-grams. Although none of the models can compete directly with a backoff trigram, they give up to 9\% improvement in perplexity when interpolated with a trigram. Moreover, the modified version of the algorithm leads to a performance increase over the original version of up to 12\%.<|reference_end|> | arxiv | @article{ueberla1996clustered,
title={Clustered Language Models with Context-Equivalent States},
author={J.P. Ueberla and I.R. Gransden},
journal={arXiv preprint arXiv:cmp-lg/9606002},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606002},
primaryClass={cmp-lg cs.CL}
} | ueberla1996clustered |
arxiv-668821 | cmp-lg/9606003 | Morphological Cues for Lexical Semantics | <|reference_start|>Morphological Cues for Lexical Semantics: Most natural language processing tasks require lexical semantic information. Automated acquisition of this information would thus increase the robustness and portability of NLP systems. This paper describes an acquisition method which makes use of fixed correspondences between derivational affixes and lexical semantic information. One advantage of this method, and of other methods that rely only on surface characteristics of language, is that the necessary input is currently available.<|reference_end|> | arxiv | @article{light1996morphological,
title={Morphological Cues for Lexical Semantics},
author={Marc Light (University of Tuebingen)},
journal={Proceedings of the 34th Meeting of the Association for
Computational Linguistics (ACL'96)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606003},
primaryClass={cmp-lg cs.CL}
} | light1996morphological |
arxiv-668822 | cmp-lg/9606004 | Classification in Feature-based Default Inheritance Hierarchies | <|reference_start|>Classification in Feature-based Default Inheritance Hierarchies: Increasingly, inheritance hierarchies are being used to reduce redundancy in natural language processing lexicons. Systems that utilize inheritance hierarchies need to be able to insert words under the optimal set of classes in these hierarchies. In this paper, we formalize this problem for feature-based default inheritance hierarchies. Since the problem turns out to be NP-complete, we present an approximation algorithm for it. We show that this algorithm is efficient and that it performs well with respect to a number of standard problems for default inheritance. A prototype implementation has been tested on lexical hierarchies and it has produced encouraging results. The work presented here is also relevant to other types of default hierarchies.<|reference_end|> | arxiv | @article{light1996classification,
title={Classification in Feature-based Default Inheritance Hierarchies},
author={Marc Light (University of Tuebingen)},
journal={Proceedings of KONVENS-94},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606004},
primaryClass={cmp-lg cs.CL}
} | light1996classification |
arxiv-668823 | cmp-lg/9606005 | Part-of-Speech-Tagging using morphological information | <|reference_start|>Part-of-Speech-Tagging using morphological information: This paper presents the results of an experiment to decide the question of authenticity of the supposedly spurious Rhesus - a attic tragedy sometimes credited to Euripides. The experiment involves use of statistics in order to test whether significant deviations in the distribution of word categories between Rhesus and the other works of Euripides can or cannot be found. To count frequencies of word categories in the corpus, a part-of-speech-tagger for Greek has been implemented. Some special techniques for reducing the problem of sparse data are used resulting in an accuracy of ca. 96.6%.<|reference_end|> | arxiv | @article{ludwig1996part-of-speech-tagging,
title={Part-of-Speech-Tagging using morphological information},
author={Bernd Ludwig (University of Erlangen-Nuremberg, Germany Department of
Artifical Intelligence (IMMD 8))},
journal={arXiv preprint arXiv:cmp-lg/9606005},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606005},
primaryClass={cmp-lg cs.CL}
} | ludwig1996part-of-speech-tagging |
arxiv-668824 | cmp-lg/9606006 | Coordination in Tree Adjoining Grammars: Formalization and Implementation | <|reference_start|>Coordination in Tree Adjoining Grammars: Formalization and Implementation: In this paper we show that an account for coordination can be constructed using the derivation structures in a lexicalized Tree Adjoining Grammar (LTAG). We present a notion of derivation in LTAGs that preserves the notion of fixed constituency in the LTAG lexicon while providing the flexibility needed for coordination phenomena. We also discuss the construction of a practical parser for LTAGs that can handle coordination including cases of non-constituent coordination.<|reference_end|> | arxiv | @article{sarkar1996coordination,
title={Coordination in Tree Adjoining Grammars: Formalization and
Implementation},
author={Anoop Sarkar and Aravind Joshi (Dept of Computer and Information
Science, University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9606006},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606006},
primaryClass={cmp-lg cs.CL}
} | sarkar1996coordination |
arxiv-668825 | cmp-lg/9606007 | Word Sense Disambiguation using Conceptual Density | <|reference_start|>Word Sense Disambiguation using Conceptual Density: This paper presents a method for the resolution of lexical ambiguity of nouns and its automatic evaluation over the Brown Corpus. The method relies on the use of the wide-coverage noun taxonomy of WordNet and the notion of conceptual distance among concepts, captured by a Conceptual Density formula developed for this purpose. This fully automatic method requires no hand coding of lexical entries, hand tagging of text nor any kind of training process. The results of the experiments have been automatically evaluated against SemCor, the sense-tagged version of the Brown Corpus.<|reference_end|> | arxiv | @article{agirre1996word,
title={Word Sense Disambiguation using Conceptual Density},
author={Eneko Agirre (Euskal Herriko Unibertsitatea), German Rigau
(Universitat Politecnica de Catalunya)},
journal={arXiv preprint arXiv:cmp-lg/9606007},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606007},
primaryClass={cmp-lg cs.CL}
} | agirre1996word |
arxiv-668826 | cmp-lg/9606008 | Coordination as a Direct Process | <|reference_start|>Coordination as a Direct Process: We propose a treatment of coordination based on the concepts of functor, argument and subcategorization. Its formalization comprises two parts which are conceptually independent. On one hand, we have extended the feature structure unification to disjunctive and set values in order to check the compatibility and the satisfiability of subcategorization requirements by structured complements. On the other hand, we have considered the conjunction {\em et (and)} as the head of the coordinate structure, so that coordinate structures stem simply from the subcategorization specifications of {\em et} and the general schemata of a head saturation. Both parts have been encoded within HPSG using the same resource that is the subcategorization and its principle which we have just extended.<|reference_end|> | arxiv | @article{mela1996coordination,
title={Coordination as a Direct Process},
author={Augusta Mela, Christophe Fouquere (Universite Paris-Nord)},
journal={arXiv preprint arXiv:cmp-lg/9606008},
year={1996},
number={LIPN-URA1507},
archivePrefix={arXiv},
eprint={cmp-lg/9606008},
primaryClass={cmp-lg cs.CL}
} | mela1996coordination |
arxiv-668827 | cmp-lg/9606009 | Modularizing Contexted Constraints | <|reference_start|>Modularizing Contexted Constraints: This paper describes a method for compiling a constraint-based grammar into a potentially more efficient form for processing. This method takes dependent disjunctions within a constraint formula and factors them into non-interacting groups whenever possible by determining their independence. When a group of dependent disjunctions is split into smaller groups, an exponential amount of redundant information is reduced. At runtime, this means that an exponential amount of processing can be saved as well. Since the performance of an algorithm for processing constraints with dependent disjunctions is highly determined by its input, the transformation presented in this paper should prove beneficial for all such algorithms.<|reference_end|> | arxiv | @article{griffith1996modularizing,
title={Modularizing Contexted Constraints},
author={John Griffith (University of Tuebingen)},
journal={arXiv preprint arXiv:cmp-lg/9606009},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606009},
primaryClass={cmp-lg cs.CL}
} | griffith1996modularizing |
arxiv-668828 | cmp-lg/9606010 | An Information Structural Approach to Spoken Language Generation | <|reference_start|>An Information Structural Approach to Spoken Language Generation: This paper presents an architecture for the generation of spoken monologues with contextually appropriate intonation. A two-tiered information structure representation is used in the high-level content planning and sentence planning stages of generation to produce efficient, coherent speech that makes certain discourse relationships, such as explicit contrasts, appropriately salient. The system is able to produce appropriate intonational patterns that cannot be generated by other systems which rely solely on word class and given/new distinctions.<|reference_end|> | arxiv | @article{prevost1996an,
title={An Information Structural Approach to Spoken Language Generation},
author={Scott Prevost (MIT Media Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9606010},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606010},
primaryClass={cmp-lg cs.CL}
} | prevost1996an |
arxiv-668829 | cmp-lg/9606011 | An Empirical Study of Smoothing Techniques for Language Modeling | <|reference_start|>An Empirical Study of Smoothing Techniques for Language Modeling: We present an extensive empirical comparison of several smoothing techniques in the domain of language modeling, including those described by Jelinek and Mercer (1980), Katz (1987), and Church and Gale (1991). We investigate for the first time how factors such as training data size, corpus (e.g., Brown versus Wall Street Journal), and n-gram order (bigram versus trigram) affect the relative performance of these methods, which we measure through the cross-entropy of test data. In addition, we introduce two novel smoothing techniques, one a variation of Jelinek-Mercer smoothing and one a very simple linear interpolation technique, both of which outperform existing methods.<|reference_end|> | arxiv | @article{chen1996an,
title={An Empirical Study of Smoothing Techniques for Language Modeling},
author={Stanley F. Chen and Joshua T. Goodman (Harvard University)},
journal={Proceedings of the 34th Meeting of the Association for
Computational Linguistics (ACL '96)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606011},
primaryClass={cmp-lg cs.CL}
} | chen1996an |
arxiv-668830 | cmp-lg/9606012 | An Efficient Inductive Unsupervised Semantic Tagger | <|reference_start|>An Efficient Inductive Unsupervised Semantic Tagger: We report our development of a simple but fast and efficient inductive unsupervised semantic tagger for Chinese words. A POS hand-tagged corpus of 348,000 words is used. The corpus is being tagged in two steps. First, possible semantic tags are selected from a semantic dictionary(Tong Yi Ci Ci Lin), the POS and the conditional probability of semantic from POS, i.e., P(S|P). The final semantic tag is then assigned by considering the semantic tags before and after the current word and the semantic-word conditional probability P(S|W) derived from the first step. Semantic bigram probabilities P(S|S) are used in the second step. Final manual checking shows that this simple but efficient algorithm has a hit rate of 91%. The tagger tags 142 words per second, using a 120 MHz Pentium running FOXPRO. It runs about 2.3 times faster than a Viterbi tagger.<|reference_end|> | arxiv | @article{lua1996an,
title={An Efficient Inductive Unsupervised Semantic Tagger},
author={K T Lua (Department of Information Systems and Computer Science
National University of Singapore, Singapore)},
journal={arXiv preprint arXiv:cmp-lg/9606012},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606012},
primaryClass={cmp-lg cs.CL}
} | lua1996an |
arxiv-668831 | cmp-lg/9606013 | Relating Turing's Formula and Zipf's Law | <|reference_start|>Relating Turing's Formula and Zipf's Law: An asymptote is derived from Turing's local reestimation formula for population frequencies, and a local reestimation formula is derived from Zipf's law for the asymptotic behavior of population frequencies. The two are shown to be qualitatively different asymptotically, but nevertheless to be instances of a common class of reestimation-formula-asymptote pairs, in which they constitute the upper and lower bounds of the convergence region of the cumulative of the frequency function, as rank tends to infinity. The results demonstrate that Turing's formula is qualitatively different from the various extensions to Zipf's law, and suggest that it smooths the frequency estimates towards a geometric distribution.<|reference_end|> | arxiv | @article{samuelsson1996relating,
title={Relating Turing's Formula and Zipf's Law},
author={Christer Samuelsson (University of the Saarland)},
journal={WVLC-4},
year={1996},
number={CLAUS 78},
archivePrefix={arXiv},
eprint={cmp-lg/9606013},
primaryClass={cmp-lg cs.CL}
} | samuelsson1996relating |
arxiv-668832 | cmp-lg/9606014 | Building Probabilistic Models for Natural Language | <|reference_start|>Building Probabilistic Models for Natural Language: In this thesis, we investigate three problems involving the probabilistic modeling of language: smoothing n-gram models, statistical grammar induction, and bilingual sentence alignment. These three problems employ models at three different levels of language; they involve word-based, constituent-based, and sentence-based models, respectively. We describe techniques for improving the modeling of language at each of these levels, and surpass the performance of existing algorithms for each problem. We approach the three problems using three different frameworks. We relate each of these frameworks to the Bayesian paradigm, and show why each framework used was appropriate for the given problem. Finally, we show how our research addresses two central issues in probabilistic modeling: the sparse data problem and the problem of inducing hidden structure.<|reference_end|> | arxiv | @article{chen1996building,
title={Building Probabilistic Models for Natural Language},
author={Stanley F. Chen (Harvard University)},
journal={arXiv preprint arXiv:cmp-lg/9606014},
year={1996},
number={CRCT TR-02-96},
archivePrefix={arXiv},
eprint={cmp-lg/9606014},
primaryClass={cmp-lg cs.CL}
} | chen1996building |
arxiv-668833 | cmp-lg/9606015 | Stabilizing the Richardson Algorithm by Controlling Chaos | <|reference_start|>Stabilizing the Richardson Algorithm by Controlling Chaos: By viewing the operations of the Richardson purification algorithm as a discrete time dynamical process, we propose a method to overcome the instability of the algorithm by controlling chaos. We present theoretical analysis and numerical results on the behavior and performance of the stabilized algorithm.<|reference_end|> | arxiv | @article{he1996stabilizing,
title={Stabilizing the Richardson Algorithm by Controlling Chaos},
author={Song He (Bell Laboratories, Lucent Technologies, Murray Hill, NJ)},
journal={arXiv preprint arXiv:cmp-lg/9606015},
year={1996},
doi={10.1063/1.168602},
archivePrefix={arXiv},
eprint={cmp-lg/9606015},
primaryClass={cmp-lg chao-dyn comp-gas cond-mat cs.CL nlin.CD nlin.CG}
} | he1996stabilizing |
arxiv-668834 | cmp-lg/9606016 | A Probabilistic Disambiguation Method Based on Psycholinguistic Principles | <|reference_start|>A Probabilistic Disambiguation Method Based on Psycholinguistic Principles: We address the problem of structural disambiguation in syntactic parsing. In psycholinguistics, a number of principles of disambiguation have been proposed, notably the Lexical Preference Rule (LPR), the Right Association Principle (RAP), and the Attach Low and Parallel Principle (ALPP) (an extension of RAP). We argue that in order to improve disambiguation results it is necessary to implement these principles on the basis of a probabilistic methodology. We define a `three-word probability' for implementing LPR, and a `length probability' for implementing RAP and ALPP. Furthermore, we adopt the `back-off' method to combine these two types of probabilities. Our experimental results indicate our method to be effective, attaining an accuracy of 89.2%.<|reference_end|> | arxiv | @article{li1996a,
title={A Probabilistic Disambiguation Method Based on Psycholinguistic
Principles},
author={Hang Li (C&C Res. Labs., NEC Corporation)},
journal={arXiv preprint arXiv:cmp-lg/9606016},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606016},
primaryClass={cmp-lg cs.CL}
} | li1996a |
arxiv-668835 | cmp-lg/9606017 | With raised eyebrows or the eyebrows raised ? A Neural Network Approach to Grammar Checking for Definiteness | <|reference_start|>With raised eyebrows or the eyebrows raised ? A Neural Network Approach to Grammar Checking for Definiteness: In this paper, we use a feature model of the semantics of plural determiners to present an approach to grammar checking for definiteness. Using neural network techniques, a semantics -- morphological category mapping was learned. We then applied a textual encoding technique to the 125 occurences of the relevant category in a 10 000 word narrative text and learned a surface -- semantics mapping. By applying the learned generation function to the newly generated representations, we achieved a correct category assignment in many cases (87 %). These results are considerably better than a direct surface categorization approach (54 %), with a baseline (always guessing the dominant category) of 60 %. It is discussed, how these results could be used in multilingual NLP applications.<|reference_end|> | arxiv | @article{scheler1996with,
title={With raised eyebrows or the eyebrows raised ? A Neural Network Approach
to Grammar Checking for Definiteness},
author={Gabriele Scheler (Institut fur Informatik, TU Munchen)},
journal={arXiv preprint arXiv:cmp-lg/9606017},
year={1996},
number={FKI-215-96},
archivePrefix={arXiv},
eprint={cmp-lg/9606017},
primaryClass={cmp-lg cs.CL}
} | scheler1996with |
arxiv-668836 | cmp-lg/9606018 | Compilation of Weighted Finite-State Transducers from Decision Trees | <|reference_start|>Compilation of Weighted Finite-State Transducers from Decision Trees: We report on a method for compiling decision trees into weighted finite-state transducers. The key assumptions are that the tree predictions specify how to rewrite symbols from an input string, and the decision at each tree node is stateable in terms of regular expressions on the input string. Each leaf node can then be treated as a separate rule where the left and right contexts are constructable from the decisions made traversing the tree from the root to the leaf. These rules are compiled into transducers using the weighted rewrite-rule rule-compilation algorithm described in (Mohri and Sproat, 1996).<|reference_end|> | arxiv | @article{sproat1996compilation,
title={Compilation of Weighted Finite-State Transducers from Decision Trees},
author={Richard Sproat (Bell Laboratories), Michael Riley (AT&T Research)},
journal={34th Annual Meeting of the ACL},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606018},
primaryClass={cmp-lg cs.CL}
} | sproat1996compilation |
arxiv-668837 | cmp-lg/9606019 | Computational Complexity of Probabilistic Disambiguation by means of Tree-Grammars | <|reference_start|>Computational Complexity of Probabilistic Disambiguation by means of Tree-Grammars: This paper studies the computational complexity of disambiguation under probabilistic tree-grammars and context-free grammars. It presents a proof that the following problems are NP-hard: computing the Most Probable Parse (MPP) from a sentence or from a word-graph, and computing the Most Probable Sentence (MPS) from a word-graph. The NP-hardness of computing the MPS from a word-graph also holds for Stochastic Context-Free Grammars. Consequently, the existence of deterministic polynomial-time algorithms for solving these disambiguation problems is a highly improbable event.<|reference_end|> | arxiv | @article{sima'an1996computational,
title={Computational Complexity of Probabilistic Disambiguation by means of
Tree-Grammars},
author={Khalil Sima'an (Utrecht University)},
journal={arXiv preprint arXiv:cmp-lg/9606019},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606019},
primaryClass={cmp-lg cs.CL}
} | sima'an1996computational |
arxiv-668838 | cmp-lg/9606020 | Computing Optimal Descriptions for Optimality Theory Grammars with Context-Free Position Structures | <|reference_start|>Computing Optimal Descriptions for Optimality Theory Grammars with Context-Free Position Structures: This paper describes an algorithm for computing optimal structural descriptions for Optimality Theory grammars with context-free position structures. This algorithm extends Tesar's dynamic programming approach [Tesar 1994][Tesar 1995] to computing optimal structural descriptions from regular to context-free structures. The generalization to context-free structures creates several complications, all of which are overcome without compromising the core dynamic programming approach. The resulting algorithm has a time complexity cubic in the length of the input, and is applicable to grammars with universal constraints that exhibit context-free locality.<|reference_end|> | arxiv | @article{tesar1996computing,
title={Computing Optimal Descriptions for Optimality Theory Grammars with
Context-Free Position Structures},
author={Bruce Tesar (Rutgers University)},
journal={arXiv preprint arXiv:cmp-lg/9606020},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606020},
primaryClass={cmp-lg cs.CL}
} | tesar1996computing |
arxiv-668839 | cmp-lg/9606021 | An Iterative Algorithm to Build Chinese Language Models | <|reference_start|>An Iterative Algorithm to Build Chinese Language Models: We present an iterative procedure to build a Chinese language model (LM). We segment Chinese text into words based on a word-based Chinese language model. However, the construction of a Chinese LM itself requires word boundaries. To get out of the chicken-and-egg problem, we propose an iterative procedure that alternates two operations: segmenting text into words and building an LM. Starting with an initial segmented corpus and an LM based upon it, we use a Viterbi-liek algorithm to segment another set of data. Then, we build an LM based on the second set and use the resulting LM to segment again the first corpus. The alternating procedure provides a self-organized way for the segmenter to detect automatically unseen words and correct segmentation errors. Our preliminary experiment shows that the alternating procedure not only improves the accuracy of our segmentation, but discovers unseen words surprisingly well. The resulting word-based LM has a perplexity of 188 for a general Chinese corpus.<|reference_end|> | arxiv | @article{luo1996an,
title={An Iterative Algorithm to Build Chinese Language Models},
author={Xiaoqiang Luo (Center for Language and Speech Processing, The Johns
Hopkins University) and Salim Roukos (IBM T. J. Watson Research Center)},
journal={arXiv preprint arXiv:cmp-lg/9606021},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606021},
primaryClass={cmp-lg cs.CL}
} | luo1996an |
arxiv-668840 | cmp-lg/9606022 | Two Questions about Data-Oriented Parsing | <|reference_start|>Two Questions about Data-Oriented Parsing: In this paper I present ongoing work on the data-oriented parsing (DOP) model. In previous work, DOP was tested on a cleaned-up set of analyzed part-of-speech strings from the Penn Treebank, achieving excellent test results. This left, however, two important questions unanswered: (1) how does DOP perform if tested on unedited data, and (2) how can DOP be used for parsing word strings that contain unknown words? This paper addresses these questions. We show that parse results on unedited data are worse than on cleaned-up data, although very competitive if compared to other models. As to the parsing of word strings, we show that the hardness of the problem does not so much depend on unknown words, but on previously unseen lexical categories of known words. We give a novel method for parsing these words by estimating the probabilities of unknown subtrees. The method is of general interest since it shows that good performance can be obtained without the use of a part-of-speech tagger. To the best of our knowledge, our method outperforms other statistical parsers tested on Penn Treebank word strings.<|reference_end|> | arxiv | @article{bod1996two,
title={Two Questions about Data-Oriented Parsing},
author={Rens Bod (University of Amsterdam)},
journal={arXiv preprint arXiv:cmp-lg/9606022},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606022},
primaryClass={cmp-lg cs.CL}
} | bod1996two |
arxiv-668841 | cmp-lg/9606023 | A Robust System for Natural Spoken Dialogue | <|reference_start|>A Robust System for Natural Spoken Dialogue: This paper describes a system that leads us to believe in the feasibility of constructing natural spoken dialogue systems in task-oriented domains. It specifically addresses the issue of robust interpretation of speech in the presence of recognition errors. Robustness is achieved by a combination of statistical error post-correction, syntactically- and semantically-driven robust parsing, and extensive use of the dialogue context. We present an evaluation of the system using time-to-completion and the quality of the final solution that suggests that most native speakers of English can use the system successfully with virtually no training.<|reference_end|> | arxiv | @article{allen1996a,
title={A Robust System for Natural Spoken Dialogue},
author={James F. Allen, Bradford W. Miller, Eric K. Ringger, and Teresa
Sikorski (University of Rochester)},
journal={Proceedings of the 34th Annual Meeting of the ACL},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606023},
primaryClass={cmp-lg cs.CL}
} | allen1996a |
arxiv-668842 | cmp-lg/9606024 | A Data-Oriented Approach to Semantic Interpretation | <|reference_start|>A Data-Oriented Approach to Semantic Interpretation: In Data-Oriented Parsing (DOP), an annotated language corpus is used as a stochastic grammar. The most probable analysis of a new input sentence is constructed by combining sub-analyses from the corpus in the most probable way. This approach has been succesfully used for syntactic analysis, using corpora with syntactic annotations such as the Penn Treebank. If a corpus with semantically annotated sentences is used, the same approach can also generate the most probable semantic interpretation of an input sentence. The present paper explains this semantic interpretation method, and summarizes the results of a preliminary experiment. Semantic annotations were added to the syntactic annotations of most of the sentences of the ATIS corpus. A data-oriented semantic interpretation algorithm was succesfully tested on this semantically enriched corpus.<|reference_end|> | arxiv | @article{bod1996a,
title={A Data-Oriented Approach to Semantic Interpretation},
author={Rens Bod, Remko Bonnema and Remko Scha (University of Amsterdam)},
journal={arXiv preprint arXiv:cmp-lg/9606024},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606024},
primaryClass={cmp-lg cs.CL}
} | bod1996a |
arxiv-668843 | cmp-lg/9606025 | Two Sources of Control over the Generation of Software Instructions | <|reference_start|>Two Sources of Control over the Generation of Software Instructions: This paper presents an analysis conducted on a corpus of software instructions in French in order to establish whether task structure elements (the procedural representation of the users' tasks) are alone sufficient to control the grammatical resources of a text generator. We show that the construct of genre provides a useful additional source of control enabling us to resolve undetermined cases.<|reference_end|> | arxiv | @article{hartley1996two,
title={Two Sources of Control over the Generation of Software Instructions},
author={Anthony Hartley (Language Center, University of Brighton) and Cecile
Paris (Information Technology Research Institute (ITRI), University of
Brighton)},
journal={arXiv preprint arXiv:cmp-lg/9606025},
year={1996},
number={ITRI96-02},
archivePrefix={arXiv},
eprint={cmp-lg/9606025},
primaryClass={cmp-lg cs.CL}
} | hartley1996two |
arxiv-668844 | cmp-lg/9606026 | An Efficient Compiler for Weighted Rewrite Rules | <|reference_start|>An Efficient Compiler for Weighted Rewrite Rules: Context-dependent rewrite rules are used in many areas of natural language and speech processing. Work in computational phonology has demonstrated that, given certain conditions, such rewrite rules can be represented as finite-state transducers (FSTs). We describe a new algorithm for compiling rewrite rules into FSTs. We show the algorithm to be simpler and more efficient than existing algorithms. Further, many of our applications demand the ability to compile weighted rules into weighted FSTs, transducers generalized by providing transitions with weights. We have extended the algorithm to allow for this.<|reference_end|> | arxiv | @article{mohri1996an,
title={An Efficient Compiler for Weighted Rewrite Rules},
author={Mehryar Mohri (AT&T Research), Richard Sproat (Bell Laboratories)},
journal={34th Annual Meeting of the ACL},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606026},
primaryClass={cmp-lg cs.CL}
} | mohri1996an |
arxiv-668845 | cmp-lg/9606027 | Linguistic Structure as Composition and Perturbation | <|reference_start|>Linguistic Structure as Composition and Perturbation: This paper discusses the problem of learning language from unprocessed text and speech signals, concentrating on the problem of learning a lexicon. In particular, it argues for a representation of language in which linguistic parameters like words are built by perturbing a composition of existing parameters. The power of this representation is demonstrated by several examples in text segmentation and compression, acquisition of a lexicon from raw speech, and the acquisition of mappings between text and artificial representations of meaning.<|reference_end|> | arxiv | @article{de marcken1996linguistic,
title={Linguistic Structure as Composition and Perturbation},
author={Carl de Marcken (MIT Artificial Intelligence Lab.)},
journal={arXiv preprint arXiv:cmp-lg/9606027},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606027},
primaryClass={cmp-lg cs.CL}
} | de marcken1996linguistic |
arxiv-668846 | cmp-lg/9606028 | Maximizing Top-down Constraints for Unification-based Systems | <|reference_start|>Maximizing Top-down Constraints for Unification-based Systems: A left-corner parsing algorithm with top-down filtering has been reported to show very efficient performance for unification-based systems. However, due to the nontermination of parsing with left-recursive grammars, top-down constraints must be weakened. In this paper, a general method of maximizing top-down constraints is proposed. The method provides a procedure to dynamically compute *restrictor*, a minimum set of features involved in an infinite loop for every propagation path; thus top-down constraints are maximally propagated.<|reference_end|> | arxiv | @article{tomuro1996maximizing,
title={Maximizing Top-down Constraints for Unification-based Systems},
author={Noriko Tomuro (DePaul University)},
journal={arXiv preprint arXiv:cmp-lg/9606028},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606028},
primaryClass={cmp-lg cs.CL}
} | tomuro1996maximizing |
arxiv-668847 | cmp-lg/9606029 | Directed Replacement | <|reference_start|>Directed Replacement: This paper introduces to the finite-state calculus a family of directed replace operators. In contrast to the simple replace expression, UPPER -> LOWER, defined in Karttunen (ACL-95), the new directed version, UPPER @-> LOWER, yields an unambiguous transducer if the lower language consists of a single string. It transduces the input string from left to right, making only the longest possible replacement at each point. A new type of replacement expression, UPPER @-> PREFIX ... SUFFIX, yields a transducer that inserts text around strings that are instances of UPPER. The symbol ... denotes the matching part of the input which itself remains unchanged. PREFIX and SUFFIX are regular expressions describing the insertions. Expressions of the type UPPER @-> PREFIX ... SUFFIX may be used to compose a deterministic parser for a ``local grammar'' in the sense of Gross (1989). Other useful applications of directed replacement include tokenization and filtering of text streams.<|reference_end|> | arxiv | @article{karttunen1996directed,
title={Directed Replacement},
author={Lauri Karttunen},
journal={arXiv preprint arXiv:cmp-lg/9606029},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606029},
primaryClass={cmp-lg cs.CL}
} | karttunen1996directed |
arxiv-668848 | cmp-lg/9606030 | Minimizing Manual Annotation Cost In Supervised Training From Corpora | <|reference_start|>Minimizing Manual Annotation Cost In Supervised Training From Corpora: Corpus-based methods for natural language processing often use supervised training, requiring expensive manual annotation of training corpora. This paper investigates methods for reducing annotation cost by {\it sample selection}. In this approach, during training the learning program examines many unlabeled examples and selects for labeling (annotation) only those that are most informative at each stage. This avoids redundantly annotating examples that contribute little new information. This paper extends our previous work on {\it committee-based sample selection} for probabilistic classifiers. We describe a family of methods for committee-based sample selection, and report experimental results for the task of stochastic part-of-speech tagging. We find that all variants achieve a significant reduction in annotation cost, though their computational efficiency differs. In particular, the simplest method, which has no parameters to tune, gives excellent results. We also show that sample selection yields a significant reduction in the size of the model used by the tagger.<|reference_end|> | arxiv | @article{engelson1996minimizing,
title={Minimizing Manual Annotation Cost In Supervised Training From Corpora},
author={Sean P. Engelson and Ido Dagan},
journal={arXiv preprint arXiv:cmp-lg/9606030},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606030},
primaryClass={cmp-lg cs.CL}
} | engelson1996minimizing |
arxiv-668849 | cmp-lg/9606031 | Research on Architectures for Integrated Speech/Language Systems in Verbmobil | <|reference_start|>Research on Architectures for Integrated Speech/Language Systems in Verbmobil: The German joint research project Verbmobil (VM) aims at the development of a speech to speech translation system. This paper reports on research done in our group which belongs to Verbmobil's subproject on system architectures (TP15). Our specific research areas are the construction of parsers for spontaneous speech, investigations in the parallelization of parsing and to contribute to the development of a flexible communication architecture with distributed control.<|reference_end|> | arxiv | @article{görz1996research,
title={Research on Architectures for Integrated Speech/Language Systems in
Verbmobil},
author={G"unther G"orz, Marcus Kesseler, J"org Spilker, Hans Weber},
journal={accepted for COLING 96},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606031},
primaryClass={cmp-lg cs.CL}
} | görz1996research |
arxiv-668850 | cmp-lg/9606032 | Integrating Multiple Knowledge Sources to Disambiguate Word Sense: An Exemplar-Based Approach | <|reference_start|>Integrating Multiple Knowledge Sources to Disambiguate Word Sense: An Exemplar-Based Approach: In this paper, we present a new approach for word sense disambiguation (WSD) using an exemplar-based learning algorithm. This approach integrates a diverse set of knowledge sources to disambiguate word sense, including part of speech of neighboring words, morphological form, the unordered set of surrounding words, local collocations, and verb-object syntactic relation. We tested our WSD program, named {\sc Lexas}, on both a common data set used in previous work, as well as on a large sense-tagged corpus that we separately constructed. {\sc Lexas} achieves a higher accuracy on the common data set, and performs better than the most frequent heuristic on the highly ambiguous words in the large corpus tagged with the refined senses of {\sc WordNet}.<|reference_end|> | arxiv | @article{ng1996integrating,
title={Integrating Multiple Knowledge Sources to Disambiguate Word Sense: An
Exemplar-Based Approach},
author={Hwee Tou Ng, Hian Beng Lee (Defence Science Organisation)},
journal={ACL-96},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9606032},
primaryClass={cmp-lg cs.CL}
} | ng1996integrating |
arxiv-668851 | cmp-lg/9607001 | GramCheck: A Grammar and Style Checker | <|reference_start|>GramCheck: A Grammar and Style Checker: This paper presents a grammar and style checker demonstrator for Spanish and Greek native writers developed within the project GramCheck. Besides a brief grammar error typology for Spanish, a linguistically motivated approach to detection and diagnosis is presented, based on the generalized use of PROLOG extensions to highly typed unification-based grammars. The demonstrator, currently including full coverage for agreement errors and certain head-argument relation issues, also provides correction by means of an analysis-transfer-synthesis cycle. Finally, future extensions to the current system are discussed.<|reference_end|> | arxiv | @article{bustamante1996gramcheck:,
title={GramCheck: A Grammar and Style Checker},
author={Flora Ram'irez Bustamante (Universidad Carlos III de Madrid),
Fernando S'anchez Le'on (Universidad Aut'onoma de Madrid)},
journal={arXiv preprint arXiv:cmp-lg/9607001},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607001},
primaryClass={cmp-lg cs.CL}
} | bustamante1996gramcheck: |
arxiv-668852 | cmp-lg/9607002 | Inducing Constraint Grammars | <|reference_start|>Inducing Constraint Grammars: Constraint Grammar rules are induced from corpora. A simple scheme based on local information, i.e., on lexical biases and next-neighbour contexts, extended through the use of barriers, reached 87.3 percent precision (1.12 tags/word) at 98.2 percent recall. The results compare favourably with other methods that are used for similar tasks although they are by no means as good as the results achieved using the original hand-written rules developed over several years time.<|reference_end|> | arxiv | @article{samuelsson1996inducing,
title={Inducing Constraint Grammars},
author={Christer Samuelsson, Pasi Tapanainen and Atro Voutilainen},
journal={ICGI-3},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607002},
primaryClass={cmp-lg cs.CL}
} | samuelsson1996inducing |
arxiv-668853 | cmp-lg/9607003 | Domain and Language Independent Feature Extraction for Statistical Text Categorization | <|reference_start|>Domain and Language Independent Feature Extraction for Statistical Text Categorization: A generic system for text categorization is presented which uses a representative text corpus to adapt the processing steps: feature extraction, dimension reduction, and classification. Feature extraction automatically learns features from the corpus by reducing actual word forms using statistical information of the corpus and general linguistic knowledge. The dimension of feature vector is then reduced by linear transformation keeping the essential information. The classification principle is a minimum least square approach based on polynomials. The described system can be readily adapted to new domains or new languages. In application, the system is reliable, fast, and processes completely automatically. It is shown that the text categorizer works successfully both on text generated by document image analysis - DIA and on ground truth data.<|reference_end|> | arxiv | @article{bayer1996domain,
title={Domain and Language Independent Feature Extraction for Statistical Text
Categorization},
author={Thomas Bayer, Ingrid Renz, Michael Stein, Ulrich Kressel (Daimler Benz
Research and Technology)},
journal={proceedings of workshop on language engineering for document
analysis and recognition - ed. by L. Evett and T. Rose, part of the AISB 1996
Workshop Series, April 96, Sussex University, England, 21-32 (ISBN 0 905
488628)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607003},
primaryClass={cmp-lg cs.CL}
} | bayer1996domain |
arxiv-668854 | cmp-lg/9607004 | Integrating Syntactic and Prosodic Information for the Efficient Detection of Empty Categories | <|reference_start|>Integrating Syntactic and Prosodic Information for the Efficient Detection of Empty Categories: We describe a number of experiments that demonstrate the usefulness of prosodic information for a processing module which parses spoken utterances with a feature-based grammar employing empty categories. We show that by requiring certain prosodic properties from those positions in the input where the presence of an empty category has to be hypothesized, a derivation can be accomplished more efficiently. The approach has been implemented in the machine translation project VERBMOBIL and results in a significant reduction of the work-load for the parser.<|reference_end|> | arxiv | @article{batliner1996integrating,
title={Integrating Syntactic and Prosodic Information for the Efficient
Detection of Empty Categories},
author={Anton Batliner, Anke Feldhaus, Stefan Geissler, Andreas Kiessling,
Tibor Kiss, Ralf Kompe, Elmar Noeth},
journal={arXiv preprint arXiv:cmp-lg/9607004},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607004},
primaryClass={cmp-lg cs.CL}
} | batliner1996integrating |
arxiv-668855 | cmp-lg/9607005 | Head Automata and Bilingual Tiling: Translation with Minimal Representations | <|reference_start|>Head Automata and Bilingual Tiling: Translation with Minimal Representations: We present a language model consisting of a collection of costed bidirectional finite state automata associated with the head words of phrases. The model is suitable for incremental application of lexical associations in a dynamic programming search for optimal dependency tree derivations. We also present a model and algorithm for machine translation involving optimal ``tiling'' of a dependency tree with entries of a costed bilingual lexicon. Experimental results are reported comparing methods for assigning cost functions to these models. We conclude with a discussion of the adequacy of annotated linguistic strings as representations for machine translation.<|reference_end|> | arxiv | @article{alshawi1996head,
title={Head Automata and Bilingual Tiling: Translation with Minimal
Representations},
author={Hiyan Alshawi (AT&T Research)},
journal={Proceedings of the 34th Annual Meeting of the Association for
Computational Linguistics, 167-176, Santa Cruz, California, 1996.},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607005},
primaryClass={cmp-lg cs.CL}
} | alshawi1996head |
arxiv-668856 | cmp-lg/9607006 | Head Automata for Speech Translation | <|reference_start|>Head Automata for Speech Translation: This paper presents statistical language and translation models based on collections of small finite state machines we call ``head automata''. The models are intended to capture the lexical sensitivity of N-gram models and direct statistical translation models, while at the same time taking account of the hierarchical phrasal structure of language. Two types of head automata are defined: relational head automata suitable for translation by transfer of dependency trees, and head transducers suitable for direct recursive lexical translation.<|reference_end|> | arxiv | @article{alshawi1996head,
title={Head Automata for Speech Translation},
author={Hiyan Alshawi (AT&T Research)},
journal={Proceedings of ICSLP 96, the Fourth International Conference on
Spoken Language Processing, Philadelphia, Pennsylvania, 1996.},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607006},
primaryClass={cmp-lg cs.CL}
} | alshawi1996head |
arxiv-668857 | cmp-lg/9607007 | Parallel Replacement in Finite State Calculus | <|reference_start|>Parallel Replacement in Finite State Calculus: This paper extends the calculus of regular expressions with new types of replacement expressions that enhance the expressiveness of the simple replace operator defined in Karttunen (1995). Parallel replacement allows multiple replacements to apply simultaneously to the same input without interfering with each other. We also allow a replacement to be constrained by any number of alternative contexts. With these enhancements, the general replacement expressions are more versatile than two-level rules for the description of complex morphological alternations.<|reference_end|> | arxiv | @article{kempe1996parallel,
title={Parallel Replacement in Finite State Calculus},
author={Andre Kempe and Lauri Karttunen (Rank Xerox Research Centre, Grenoble
Laboratory, France)},
journal={COLING-96, Copenhagen DK. August 5, 1996.},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607007},
primaryClass={cmp-lg cs.CL}
} | kempe1996parallel |
arxiv-668858 | cmp-lg/9607008 | From Submit to Submitted via Submission: On Lexical Rules in Large-Scale Lexicon Acquisition | <|reference_start|>From Submit to Submitted via Submission: On Lexical Rules in Large-Scale Lexicon Acquisition: This paper deals with the discovery, representation, and use of lexical rules (LRs) during large-scale semi-automatic computational lexicon acquisition. The analysis is based on a set of LRs implemented and tested on the basis of Spanish and English business- and finance-related corpora. We show that, though the use of LRs is justified, they do not come cost-free. Semi-automatic output checking is required, even with blocking and preemtion procedures built in. Nevertheless, large-scope LRs are justified because they facilitate the unavoidable process of large-scale semi-automatic lexical acquisition. We also argue that the place of LRs in the computational process is a complex issue.<|reference_end|> | arxiv | @article{viegas1996from,
title={From Submit to Submitted via Submission: On Lexical Rules in Large-Scale
Lexicon Acquisition},
author={Evelyne Viegas, Boyan Onyshkevych, Victor Raskin, Sergei Nirenburg},
journal={arXiv preprint arXiv:cmp-lg/9607008},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607008},
primaryClass={cmp-lg cs.CL}
} | viegas1996from |
arxiv-668859 | cmp-lg/9607009 | Semantic-based Transfer | <|reference_start|>Semantic-based Transfer: This article presents a new semantic-based transfer approach developed and applied within the Verbmobil Machine Translation project. We give an overview of the declarative transfer formalism together with its procedural realization. Our approach is discussed and compared with several other approaches from the MT literature.<|reference_end|> | arxiv | @article{dorna1996semantic-based,
title={Semantic-based Transfer},
author={Michael Dorna and Martin C. Emele (IMS, Stuttgart, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9607009},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607009},
primaryClass={cmp-lg cs.CL}
} | dorna1996semantic-based |
arxiv-668860 | cmp-lg/9607010 | Efficient Implementation of a Semantic-based Transfer Approach | <|reference_start|>Efficient Implementation of a Semantic-based Transfer Approach: This article gives an overview of a new semantic-based transfer approach developed and applied within the Verbmobil Machine Translation project. We present the declarative transfer formalism and discuss its implementation.<|reference_end|> | arxiv | @article{dorna1996efficient,
title={Efficient Implementation of a Semantic-based Transfer Approach},
author={Michael Dorna and Martin C. Emele (IMS, Stuttgart, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9607010},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607010},
primaryClass={cmp-lg cs.CL}
} | dorna1996efficient |
arxiv-668861 | cmp-lg/9607011 | Pattern-Based Context-Free Grammars for Machine Translation | <|reference_start|>Pattern-Based Context-Free Grammars for Machine Translation: This paper proposes the use of ``pattern-based'' context-free grammars as a basis for building machine translation (MT) systems, which are now being adopted as personal tools by a broad range of users in the cyberspace society. We discuss major requirements for such tools, including easy customization for diverse domains, the efficiency of the translation algorithm, and scalability (incremental improvement in translation quality through user interaction), and describe how our approach meets these requirements.<|reference_end|> | arxiv | @article{takeda1996pattern-based,
title={Pattern-Based Context-Free Grammars for Machine Translation},
author={Koichi Takeda (ToKyo Research Laboratory, IBM, Japan)},
journal={Proceedings of the 34th Meeting of the Association for
Computational Linguistics (ACL'96)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607011},
primaryClass={cmp-lg cs.CL}
} | takeda1996pattern-based |
arxiv-668862 | cmp-lg/9607012 | MBT: A Memory-Based Part of Speech Tagger-Generator | <|reference_start|>MBT: A Memory-Based Part of Speech Tagger-Generator: We introduce a memory-based approach to part of speech tagging. Memory-based learning is a form of supervised learning based on similarity-based reasoning. The part of speech tag of a word in a particular context is extrapolated from the most similar cases held in memory. Supervised learning approaches are useful when a tagged corpus is available as an example of the desired output of the tagger. Based on such a corpus, the tagger-generator automatically builds a tagger which is able to tag new text the same way, diminishing development time for the construction of a tagger considerably. Memory-based tagging shares this advantage with other statistical or machine learning approaches. Additional advantages specific to a memory-based approach include (i) the relatively small tagged corpus size sufficient for training, (ii) incremental learning, (iii) explanation capabilities, (iv) flexible integration of information in case representations, (v) its non-parametric nature, (vi) reasonably good results on unknown words without morphological analysis, and (vii) fast learning and tagging. In this paper we show that a large-scale application of the memory-based approach is feasible: we obtain a tagging accuracy that is on a par with that of known statistical approaches, and with attractive space and time complexity properties when using {\em IGTree}, a tree-based formalism for indexing and searching huge case bases.} The use of IGTree has as additional advantage that optimal context size for disambiguation is dynamically computed.<|reference_end|> | arxiv | @article{daelemans1996mbt:,
title={MBT: A Memory-Based Part of Speech Tagger-Generator},
author={Walter Daelemans (U. Tilburg, U. Antwerp), Jakub Zavrel (U. Tilburg)
Peter Berck (U. Antwerp), Steven Gillis (U. Antwerp)},
journal={Proceedings WVLC, Copenhagen},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607012},
primaryClass={cmp-lg cs.CL}
} | daelemans1996mbt: |
arxiv-668863 | cmp-lg/9607013 | Unsupervised Discovery of Phonological Categories through Supervised Learning of Morphological Rules | <|reference_start|>Unsupervised Discovery of Phonological Categories through Supervised Learning of Morphological Rules: We describe a case study in the application of {\em symbolic machine learning} techniques for the discovery of linguistic rules and categories. A supervised rule induction algorithm is used to learn to predict the correct diminutive suffix given the phonological representation of Dutch nouns. The system produces rules which are comparable to rules proposed by linguists. Furthermore, in the process of learning this morphological task, the phonemes used are grouped into phonologically relevant categories. We discuss the relevance of our method for linguistics and language technology.<|reference_end|> | arxiv | @article{daelemans1996unsupervised,
title={Unsupervised Discovery of Phonological Categories through Supervised
Learning of Morphological Rules},
author={Walter Daelemans (U. Tilburg, U. Antwerp), Peter Berck (U. Antwerp),
Steven Gillis (U. Antwerp)},
journal={Proceedings COLING 1996, Copenhagen},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607013},
primaryClass={cmp-lg cs.CL}
} | daelemans1996unsupervised |
arxiv-668864 | cmp-lg/9607014 | A Corpus Study of Negative Imperatives in Natural Language Instructions | <|reference_start|>A Corpus Study of Negative Imperatives in Natural Language Instructions: In this paper, we define the notion of a preventative expression and discuss a corpus study of such expressions in instructional text. We discuss our coding schema, which takes into account both form and function features, and present measures of inter-coder reliability for those features. We then discuss the correlations that exist between the function and the form features.<|reference_end|> | arxiv | @article{linden1996a,
title={A Corpus Study of Negative Imperatives in Natural Language Instructions},
author={Keith Vander Linden (University of Brighton), Barbara Di Eugenio
(Carnegie Mellon University)},
journal={Proceedings of COLING 96 (Copenhagen)},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607014},
primaryClass={cmp-lg cs.CL}
} | linden1996a |
arxiv-668865 | cmp-lg/9607015 | Learning Micro-Planning Rules for Preventative Expressions | <|reference_start|>Learning Micro-Planning Rules for Preventative Expressions: Building text planning resources by hand is time-consuming and difficult. Certainly, a number of planning architectures and their accompanying plan libraries have been implemented, but while the architectures themselves may be reused in a new domain, the library of plans typically cannot. One way to address this problem is to use machine learning techniques to automate the derivation of planning resources for new domains. In this paper, we apply this technique to build micro-planning rules for preventative expressions in instructional text.<|reference_end|> | arxiv | @article{linden1996learning,
title={Learning Micro-Planning Rules for Preventative Expressions},
author={Keith Vander Linden (ITRI, University of Brighton); Barbara Di Eugenio
(Computational Linguistics, Carnegie Mellon University)},
journal={INLG96 -- Eight International Natural Language Generation Workshop},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607015},
primaryClass={cmp-lg cs.CL}
} | linden1996learning |
arxiv-668866 | cmp-lg/9607016 | Beyond Word N-Grams | <|reference_start|>Beyond Word N-Grams: We describe, analyze, and evaluate experimentally a new probabilistic model for word-sequence prediction in natural language based on prediction suffix trees (PSTs). By using efficient data structures, we extend the notion of PST to unbounded vocabularies. We also show how to use a Bayesian approach based on recursive priors over all possible PSTs to efficiently maintain tree mixtures. These mixtures have provably and practically better performance than almost any single model. We evaluate the model on several corpora. The low perplexity achieved by relatively small PST mixture models suggests that they may be an advantageous alternative, both theoretically and practically, to the widely used n-gram models.<|reference_end|> | arxiv | @article{pereira1996beyond,
title={Beyond Word N-Grams},
author={Fernando C. N. Pereira (AT&T Research), Yoram Singer (AT&T Research),
Naftali Tishby (Hebrew University)},
journal={arXiv preprint arXiv:cmp-lg/9607016},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607016},
primaryClass={cmp-lg cs.CL}
} | pereira1996beyond |
arxiv-668867 | cmp-lg/9607017 | Natural Language Processing: Structure and Complexity | <|reference_start|>Natural Language Processing: Structure and Complexity: We introduce a method for analyzing the complexity of natural language processing tasks, and for predicting the difficulty new NLP tasks. Our complexity measures are derived from the Kolmogorov complexity of a class of automata --- {\it meaning automata}, whose purpose is to extract relevant pieces of information from sentences. Natural language semantics is defined only relative to the set of questions an automaton can answer. The paper shows examples of complexity estimates for various NLP programs and tasks, and some recipes for complexity management. It positions natural language processing as a subdomain of software engineering, and lays down its formal foundation.<|reference_end|> | arxiv | @article{zadrozny1996natural,
title={Natural Language Processing: Structure and Complexity},
author={Wlodek Zadrozny (IBM Research, T. J. Watson Research Center, Yorktown
Heights, NY, USA)},
journal={arXiv preprint arXiv:cmp-lg/9607017},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607017},
primaryClass={cmp-lg cs.CL}
} | zadrozny1996natural |
arxiv-668868 | cmp-lg/9607018 | TSNLP - Test Suites for Natural Language Processing | <|reference_start|>TSNLP - Test Suites for Natural Language Processing: The TSNLP project has investigated various aspects of the construction, maintenance and application of systematic test suites as diagnostic and evaluation tools for NLP applications. The paper summarizes the motivation and main results of the project: besides the solid methodological foundation, TSNLP has produced substantial multi-purpose and multi-user test suites for three European languages together with a set of specialized tools that facilitate the construction, extension, maintenance, retrieval, and customization of the test data. As TSNLP results, including the data and technology, are made publicly available, the project presents a valuable linguistic resourc e that has the potential of providing a wide-spread pre-standard diagnostic and evaluation tool for both developers and users of NLP applications.<|reference_end|> | arxiv | @article{lehmann1996tsnlp,
title={TSNLP - Test Suites for Natural Language Processing},
author={Sabine Lehmann (ISSCO, University of Geneva), Stephan Oepen (DFKI,
Saarbruecken), Sylvie Regnier-Prost (Aerospatiale, Suresnes), Klaus Netter
(DFKI), Veronika Lux (Aerospatiale), Judith Klein (DFKI), Kirsten Falkedal
(GMS, Berlin), Frederik Fouvry (University of Essex), Dominique Estival
(University of Melbourne), Eva Dauphin (Aerospatiale), Herve Compagnion
(ISSCO), Judith Baur (DFKI), Judith Baur (DFKI), Lorna Balkan (University of
Essex), Doug Arnold (University of Essex)},
journal={arXiv preprint arXiv:cmp-lg/9607018},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607018},
primaryClass={cmp-lg cs.CL}
} | lehmann1996tsnlp |
arxiv-668869 | cmp-lg/9607019 | Mental State Adjectives: the Perspective of Generative Lexicon | <|reference_start|>Mental State Adjectives: the Perspective of Generative Lexicon: This paper focusses on mental state adjectives and offers a unified analysis in the theory of Generative Lexicon (Pustejovsky, 1991, 1995). We show that, instead of enumerating the various syntactic constructions they enter into, with the different senses which arise, it is possible to give them a rich typed semantic representation which will explain both their semantic and syntactic polymorphism.<|reference_end|> | arxiv | @article{bouillon1996mental,
title={Mental State Adjectives: the Perspective of Generative Lexicon},
author={Pierrette Bouillon (ISSCO, University of Geneva)},
journal={arXiv preprint arXiv:cmp-lg/9607019},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607019},
primaryClass={cmp-lg cs.CL}
} | bouillon1996mental |
arxiv-668870 | cmp-lg/9607020 | A Divide-and-Conquer Strategy for Parsing | <|reference_start|>A Divide-and-Conquer Strategy for Parsing: In this paper, we propose a novel strategy which is designed to enhance the accuracy of the parser by simplifying complex sentences before parsing. This approach involves the separate parsing of the constituent sub-sentences within a complex sentence. To achieve that, the divide-and-conquer strategy first disambiguates the roles of the link words in the sentence and segments the sentence based on these roles. The separate parse trees of the segmented sub-sentences and the noun phrases within them are then synthesized to form the final parse. To evaluate the effects of this strategy on parsing, we compare the original performance of a dependency parser with the performance when it is enhanced with the divide-and-conquer strategy. When tested on 600 sentences of the IPSM'95 data sets, the enhanced parser saw a considerable error reduction of 21.2% in its accuracy.<|reference_end|> | arxiv | @article{shiuan1996a,
title={A Divide-and-Conquer Strategy for Parsing},
author={Peh Li Shiuan (Defence Science Organisation) and Christopher Ting Hian
Ann (Defence Science Organisation and National University of Singapore)},
journal={In Proceedings ACL/SIGPARSE, pp.57-66, 1996.},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607020},
primaryClass={cmp-lg cs.CL}
} | shiuan1996a |
arxiv-668871 | cmp-lg/9607021 | Morphological Analysis as Classification: an Inductive-Learning Approach | <|reference_start|>Morphological Analysis as Classification: an Inductive-Learning Approach: Morphological analysis is an important subtask in text-to-speech conversion, hyphenation, and other language engineering tasks. The traditional approach to performing morphological analysis is to combine a morpheme lexicon, sets of (linguistic) rules, and heuristics to find a most probable analysis. In contrast we present an inductive learning approach in which morphological analysis is reformulated as a segmentation task. We report on a number of experiments in which five inductive learning algorithms are applied to three variations of the task of morphological analysis. Results show (i) that the generalisation performance of the algorithms is good, and (ii) that the lazy learning algorithm IB1-IG performs best on all three tasks. We conclude that lazy learning of morphological analysis as a classification task is indeed a viable approach; moreover, it has the strong advantages over the traditional approach of avoiding the knowledge-acquisition bottleneck, being fast and deterministic in learning and processing, and being language-independent.<|reference_end|> | arxiv | @article{bosch1996morphological,
title={Morphological Analysis as Classification: an Inductive-Learning Approach},
author={Antal van den Bosch (University of Maastricht, the Netherlands),
Walter Daelemans (Tilburg University, the Netherlands), Ton Weijters
(University of Maastricht, the Netherlands)},
journal={Proceedings of NEMLAP-2},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607021},
primaryClass={cmp-lg cs.CL}
} | bosch1996morphological |
arxiv-668872 | cmp-lg/9607022 | A Machine Learning Approach to the Classification of Dialogue Utterances | <|reference_start|>A Machine Learning Approach to the Classification of Dialogue Utterances: The purpose of this paper is to present a method for automatic classification of dialogue utterances and the results of applying that method to a corpus. Superficial features of a set of training utterances (which we will call cues) are taken as the basis for finding relevant utterance classes and for extracting rules for assigning these classes to new utterances. Each cue is assumed to partially contribute to the communicative function of an utterance. Instead of relying on subjective judgments for the tasks of finding classes and rules, we opt for using machine learning techniques to guarantee objectivity.<|reference_end|> | arxiv | @article{andernach1996a,
title={A Machine Learning Approach to the Classification of Dialogue Utterances},
author={Toine Andernach (Parlevink Group, Department of Computer Science,
University of Twente, The Netherlands)},
journal={arXiv preprint arXiv:cmp-lg/9607022},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607022},
primaryClass={cmp-lg cs.CL}
} | andernach1996a |
arxiv-668873 | cmp-lg/9607023 | Phonological modeling for continuous speech recognition in Korean | <|reference_start|>Phonological modeling for continuous speech recognition in Korean: A new scheme to represent phonological changes during continuous speech recognition is suggested. A phonological tag coupled with its morphological tag is designed to represent the conditions of Korean phonological changes. A pairwise language model of these morphological and phonological tags is implemented in Korean speech recognition system. Performance of the model is verified through the TDNN-based speech recognition experiments.<|reference_end|> | arxiv | @article{lee1996phonological,
title={Phonological modeling for continuous speech recognition in Korean},
author={WonIl Lee, Geunbae Lee, and Jong-Hyeok Lee (Pohang University of
Science and Technology Korea)},
journal={arXiv preprint arXiv:cmp-lg/9607023},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607023},
primaryClass={cmp-lg cs.CL}
} | lee1996phonological |
arxiv-668874 | cmp-lg/9607024 | Applying Winnow to Context-Sensitive Spelling Correction | <|reference_start|>Applying Winnow to Context-Sensitive Spelling Correction: Multiplicative weight-updating algorithms such as Winnow have been studied extensively in the COLT literature, but only recently have people started to use them in applications. In this paper, we apply a Winnow-based algorithm to a task in natural language: context-sensitive spelling correction. This is the task of fixing spelling errors that happen to result in valid words, such as substituting {\it to\/} for {\it too}, {\it casual\/} for {\it causal}, and so on. Previous approaches to this problem have been statistics-based; we compare Winnow to one of the more successful such approaches, which uses Bayesian classifiers. We find that: (1)~When the standard (heavily-pruned) set of features is used to describe problem instances, Winnow performs comparably to the Bayesian method; (2)~When the full (unpruned) set of features is used, Winnow is able to exploit the new features and convincingly outperform Bayes; and (3)~When a test set is encountered that is dissimilar to the training set, Winnow is better than Bayes at adapting to the unfamiliar test set, using a strategy we will present for combining learning on the training set with unsupervised learning on the (noisy) test set.<|reference_end|> | arxiv | @article{golding1996applying,
title={Applying Winnow to Context-Sensitive Spelling Correction},
author={Andrew R. Golding and Dan Roth},
journal={arXiv preprint arXiv:cmp-lg/9607024},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607024},
primaryClass={cmp-lg cs.CL}
} | golding1996applying |
arxiv-668875 | cmp-lg/9607025 | New Methods, Current Trends and Software Infrastructure for NLP | <|reference_start|>New Methods, Current Trends and Software Infrastructure for NLP: The increasing use of `new methods' in NLP, which the NeMLaP conference series exemplifies, occurs in the context of a wider shift in the nature and concerns of the discipline. This paper begins with a short review of this context and significant trends in the field. The review motivates and leads to a set of requirements for support software of general utility for NLP research and development workers. A freely-available system designed to meet these requirements is described (called GATE - a General Architecture for Text Engineering). Information Extraction (IE), in the sense defined by the Message Understanding Conferences (ARPA \cite{Arp95}), is an NLP application in which many of the new methods have found a home (Hobbs \cite{Hob93}; Jacobs ed. \cite{Jac92}). An IE system based on GATE is also available for research purposes, and this is described. Lastly we review related work.<|reference_end|> | arxiv | @article{cunningham1996new,
title={New Methods, Current Trends and Software Infrastructure for NLP},
author={Hamish Cunningham, Yorick Wilks, and Robert J. Gaizauskas},
journal={Proceedings of NEMLAP-2},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607025},
primaryClass={cmp-lg cs.CL}
} | cunningham1996new |
arxiv-668876 | cmp-lg/9607026 | Building Knowledge Bases for the Generation of Software Documentation | <|reference_start|>Building Knowledge Bases for the Generation of Software Documentation: Automated text generation requires a underlying knowledge base from which to generate, which is often difficult to produce. Software documentation is one domain in which parts of this knowledge base may be derived automatically. In this paper, we describe \drafter, an authoring support tool for generating user-centred software documentation, and in particular, we describe how parts of its required knowledge base can be obtained automatically.<|reference_end|> | arxiv | @article{paris1996building,
title={Building Knowledge Bases for the Generation of Software Documentation},
author={Cecile Paris and Keith Vander Linden (ITRI, Univ. of Brighton)},
journal={arXiv preprint arXiv:cmp-lg/9607026},
year={1996},
number={ITRI-96-06},
archivePrefix={arXiv},
eprint={cmp-lg/9607026},
primaryClass={cmp-lg cs.CL}
} | paris1996building |
arxiv-668877 | cmp-lg/9607027 | Learning Translation Rules From A Bilingual Corpus | <|reference_start|>Learning Translation Rules From A Bilingual Corpus: This paper proposes a mechanism for learning pattern correspondences between two languages from a corpus of translated sentence pairs. The proposed mechanism uses analogical reasoning between two translations. Given a pair of translations, the similar parts of the sentences in the source language must correspond the similar parts of the sentences in the target language. Similarly, the different parts should correspond to the respective parts in the translated sentences. The correspondences between the similarities, and also differences are learned in the form of translation rules. The system is tested on a small training dataset and produced promising results for further investigation.<|reference_end|> | arxiv | @article{cicekli1996learning,
title={Learning Translation Rules From A Bilingual Corpus},
author={Ilyas Cicekli and H. Altay Guvenir},
journal={Published in Proceedings of NEMLAP-2},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607027},
primaryClass={cmp-lg cs.CL}
} | cicekli1996learning |
arxiv-668878 | cmp-lg/9607028 | The Grammar of Sense: Is word-sense tagging much more than part-of-speech tagging? | <|reference_start|>The Grammar of Sense: Is word-sense tagging much more than part-of-speech tagging?: This squib claims that Large-scale Automatic Sense Tagging of text (LAST) can be done at a high-level of accuracy and with far less complexity and computational effort than has been believed until now. Moreover, it can be done for all open class words, and not just carefully selected opposed pairs as in some recent work. We describe two experiments: one exploring the amount of information relevant to sense disambiguation which is contained in the part-of-speech field of entries in Longman Dictionary of Contemporary English (LDOCE). Another, more practical, experiment attempts sense disambiguation of all open class words in a text assigning LDOCE homographs as sense tags using only part-of-speech information. We report that 92% of open class words can be successfully tagged in this way. We plan to extend this work and to implement an improved large-scale tagger, a description of which is included here.<|reference_end|> | arxiv | @article{wilks1996the,
title={The Grammar of Sense: Is word-sense tagging much more than
part-of-speech tagging?},
author={Yorick Wilks, Mark Stevenson (University of Sheffield, UK)},
journal={arXiv preprint arXiv:cmp-lg/9607028},
year={1996},
number={CS-96-05},
archivePrefix={arXiv},
eprint={cmp-lg/9607028},
primaryClass={cmp-lg cs.CL}
} | wilks1996the |
arxiv-668879 | cmp-lg/9607029 | Design and Implementation of a Tactical Generator for Turkish, a Free Constituent Order Language | <|reference_start|>Design and Implementation of a Tactical Generator for Turkish, a Free Constituent Order Language: This thesis describes a tactical generator for Turkish, a free constituent order language, in which the order of the constituents may change according to the information structure of the sentences to be generated. In the absence of any information regarding the information structure of a sentence (i.e., topic, focus, background, etc.), the constituents of the sentence obey a default order, but the order is almost freely changeable, depending on the constraints of the text flow or discourse. We have used a recursively structured finite state machine for handling the changes in constituent order, implemented as a right-linear grammar backbone. Our implementation environment is the GenKit system, developed at Carnegie Mellon University--Center for Machine Translation. Morphological realization has been implemented using an external morphological analysis/generation component which performs concrete morpheme selection and handles morphographemic processes.<|reference_end|> | arxiv | @article{hakkani1996design,
title={Design and Implementation of a Tactical Generator for Turkish, a Free
Constituent Order Language},
author={Dilek Zeynep Hakkani},
journal={arXiv preprint arXiv:cmp-lg/9607029},
year={1996},
number={BU-CEIS-9614},
archivePrefix={arXiv},
eprint={cmp-lg/9607029},
primaryClass={cmp-lg cs.CL}
} | hakkani1996design |
arxiv-668880 | cmp-lg/9607030 | Using Multiple Sources of Information for Constraint-Based Morphological Disambiguation | <|reference_start|>Using Multiple Sources of Information for Constraint-Based Morphological Disambiguation: This thesis presents a constraint-based morphological disambiguation approach that is applicable to languages with complex morphology--specifically agglutinative languages with productive inflectional and derivational morphological phenomena. For morphologically complex languages like Turkish, automatic morphological disambiguation involves selecting for each token morphological parse(s), with the right set of inflectional and derivational markers. Our system combines corpus independent hand-crafted constraint rules, constraint rules that are learned via unsupervised learning from a training corpus, and additional statistical information obtained from the corpus to be morphologically disambiguated. The hand-crafted rules are linguistically motivated and tuned to improve precision without sacrificing recall. In certain respects, our approach has been motivated by Brill's recent work, but with the observation that his transformational approach is not directly applicable to languages like Turkish. Our approach also uses a novel approach to unknown word processing by employing a secondary morphological processor which recovers any relevant inflectional and derivational information from a lexical item whose root is unknown. With this approach, well below 1% of the tokens remains as unknown in the texts we have experimented with. Our results indicate that by combining these hand-crafted, statistical and learned information sources, we can attain a recall of 96 to 97% with a corresponding precision of 93 to 94%, and ambiguity of 1.02 to 1.03 parses per token.<|reference_end|> | arxiv | @article{tur1996using,
title={Using Multiple Sources of Information for Constraint-Based Morphological
Disambiguation},
author={Gokhan Tur},
journal={arXiv preprint arXiv:cmp-lg/9607030},
year={1996},
number={BU-CEIS-9615},
archivePrefix={arXiv},
eprint={cmp-lg/9607030},
primaryClass={cmp-lg cs.CL}
} | tur1996using |
arxiv-668881 | cmp-lg/9607031 | Compositional Semantics in Verbmobil | <|reference_start|>Compositional Semantics in Verbmobil: The paper discusses how compositional semantics is implemented in the Verbmobil speech-to-speech translation system using LUD, a description language for underspecified discourse representation structures. The description language and its formal interpretation in DRT are described as well as its implementation together with the architecture of the system's entire syntactic-semantic processing module. We show that a linguistically sound theory and formalism can be properly implemented in a system with (near) real-time requirements.<|reference_end|> | arxiv | @article{bos1996compositional,
title={Compositional Semantics in Verbmobil},
author={Johan Bos, Bj"orn Gamb"ack, Christian Lieske, Yoshiki Mori, Manfred
Pinkal, Karsten Worm},
journal={Proceedings of COLING '96},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607031},
primaryClass={cmp-lg cs.CL}
} | bos1996compositional |
arxiv-668882 | cmp-lg/9607032 | A Lexical Semantic Database for Verbmobil | <|reference_start|>A Lexical Semantic Database for Verbmobil: This paper describes the development and use of a lexical semantic database for the Verbmobil speech-to-speech machine translation system. The motivation is to provide a common information source for the distributed development of the semantics, transfer and semantic evaluation modules and to store lexical semantic information application-independently. The database is organized around a set of abstract semantic classes and has been used to define the semantic contributions of the lemmata in the vocabulary of the system, to automatically create semantic lexica and to check the correctness of the semantic representations built up. The semantic classes are modelled using an inheritance hierarchy. The database is implemented using the lexicon formalism LeX4 developed during the project.<|reference_end|> | arxiv | @article{heinecke1996a,
title={A Lexical Semantic Database for Verbmobil},
author={Johannes Heinecke (Humboldt-Universit"at zu Berlin), Karsten L. Worm
(Universit"at des Saarlandes)},
journal={arXiv preprint arXiv:cmp-lg/9607032},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607032},
primaryClass={cmp-lg cs.CL}
} | heinecke1996a |
arxiv-668883 | cmp-lg/9607033 | Multiple Discourse Relations on the Sentential Level in Japanese | <|reference_start|>Multiple Discourse Relations on the Sentential Level in Japanese: In the German government (BMBF) funded project Verbmobil, a semantic formalism Language for Underspecified Discourse Representation Structures (LUD) is used which describes several DRSs and allows for underspecification. Dealing with Japanese poses challenging problems. In this paper, a treatment of multiple discourse relation constructions on the sentential level is shown, which are common in Japanese but cause a problem for the formalism,. The problem is to distinguish discourse relations which take the widest scope compared with other scope-taking elements on the one hand and to have them underspecified among each other on the other hand. We also state a semantic constraint on the resolution of multiple discourse relations which seems to prevail over the syntactic c-command constraint.<|reference_end|> | arxiv | @article{mori1996multiple,
title={Multiple Discourse Relations on the Sentential Level in Japanese},
author={Yoshiki Mori},
journal={Proceedings of COLING '96},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607033},
primaryClass={cmp-lg cs.CL}
} | mori1996multiple |
arxiv-668884 | cmp-lg/9607034 | Using textual clues to improve metaphor processing | <|reference_start|>Using textual clues to improve metaphor processing: In this paper, we propose a textual clue approach to help metaphor detection, in order to improve the semantic processing of this figure. The previous works in the domain studied the semantic regularities only, overlooking an obvious set of regularities. A corpus-based analysis shows the existence of surface regularities related to metaphors. These clues can be characterized by syntactic structures and lexical markers. We present an object oriented model for representing the textual clues that were found. This representation is designed to help the choice of a semantic processing, in terms of possible non-literal meanings. A prototype implementing this model is currently under development, within an incremental approach allowing step-by-step evaluations. \footnote{This work takes part in a research project sponsored by the AUPELF-UREF (Francophone Agency For Education and Research)}<|reference_end|> | arxiv | @article{ferrari1996using,
title={Using textual clues to improve metaphor processing},
author={St'ephane Ferrari (Limsi-CNRS, France)},
journal={Proceedings of the ACL'96, 34th Annual Meeting of the Association
for Computational Linguistics, 351-353, Santa Cruz, California, 1996},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607034},
primaryClass={cmp-lg cs.CL}
} | ferrari1996using |
arxiv-668885 | cmp-lg/9607035 | Completeness of Compositional Translation for Context-Free Grammars | <|reference_start|>Completeness of Compositional Translation for Context-Free Grammars: A machine translation system is said to be *complete* if all expressions that are correct according to the source-language grammar can be translated into the target language. This paper addresses the completeness issue for compositional machine translation in general, and for compositional machine translation of context-free grammars in particular. Conditions that guarantee translation completeness of context-free grammars are presented.<|reference_end|> | arxiv | @article{huijsen1996completeness,
title={Completeness of Compositional Translation for Context-Free Grammars},
author={Willem-Olaf Huijsen (Research Institute for Language and Speech (OTS),
Utrecht University, The Netherlands)},
journal={arXiv preprint arXiv:cmp-lg/9607035},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607035},
primaryClass={cmp-lg cs.CL}
} | huijsen1996completeness |
arxiv-668886 | cmp-lg/9607036 | Connected Text Recognition Using Layered HMMs and Token Passing | <|reference_start|>Connected Text Recognition Using Layered HMMs and Token Passing: We present a novel approach to lexical error recovery on textual input. An advanced robust tokenizer has been implemented that can not only correct spelling mistakes, but also recover from segmentation errors. Apart from the orthographic considerations taken, the tokenizer also makes use of linguistic expectations extracted from a training corpus. The idea is to arrange Hidden Markov Models (HMM) in multiple layers where the HMMs in each layer are responsible for different aspects of the processing of the input. We report on experimental evaluations with alternative probabilistic language models to guide the lexical error recovery process.<|reference_end|> | arxiv | @article{ingels1996connected,
title={Connected Text Recognition Using Layered HMMs and Token Passing},
author={Peter Ingels (Linkoping University, Linkoping, Sweden)},
journal={Proceedings of NeMLaP-2},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607036},
primaryClass={cmp-lg cs.CL}
} | ingels1996connected |
arxiv-668887 | cmp-lg/9607037 | Automatic Construction of Clean Broad-Coverage Translation Lexicons | <|reference_start|>Automatic Construction of Clean Broad-Coverage Translation Lexicons: Word-level translational equivalences can be extracted from parallel texts by surprisingly simple statistical techniques. However, these techniques are easily fooled by {\em indirect associations} --- pairs of unrelated words whose statistical properties resemble those of mutual translations. Indirect associations pollute the resulting translation lexicons, drastically reducing their precision. This paper presents an iterative lexicon cleaning method. On each iteration, most of the remaining incorrect lexicon entries are filtered out, without significant degradation in recall. This lexicon cleaning technique can produce translation lexicons with recall and precision both exceeding 90\%, as well as dictionary-sized translation lexicons that are over 99\% correct.<|reference_end|> | arxiv | @article{melamed1996automatic,
title={Automatic Construction of Clean Broad-Coverage Translation Lexicons},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9607037},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9607037},
primaryClass={cmp-lg cs.CL}
} | melamed1996automatic |
arxiv-668888 | cmp-lg/9608001 | Storage of Natural Language Sentences in a Hopfield Network | <|reference_start|>Storage of Natural Language Sentences in a Hopfield Network: This paper look at how the Hopfield neural network can be used to store and recall patterns constructed from natural language sentences. As a pattern recognition and storage tool, the Hopfield neural network has received much attention. This attention however has been mainly in the field of statistical physics due to the model's simple abstraction of spin glass systems. A discussion is made of the differences, shown as bias and correlation, between natural language sentence patterns and the randomly generated ones used in previous experiments. Results are given for numerical simulations which show the auto-associative competence of the network when trained with natural language patterns.<|reference_end|> | arxiv | @article{collier1996storage,
title={Storage of Natural Language Sentences in a Hopfield Network},
author={Nigel Collier (Department of Language Engineering, UMIST, UK)},
journal={arXiv preprint arXiv:cmp-lg/9608001},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608001},
primaryClass={cmp-lg cs.CL}
} | collier1996storage |
arxiv-668889 | cmp-lg/9608002 | Controlling Functional Uncertainty | <|reference_start|>Controlling Functional Uncertainty: There have been two different methods for checking the satisfiability of feature descriptions that use the functional uncertainty device, namely~\cite{Kaplan:88CO} and \cite{Backofen:94JSC}. Although only the one in \cite{Backofen:94JSC} solves the satisfiability problem completely, both methods have their merits. But it may happen that in one single description, there are parts where the first method is more appropriate, and other parts where the second should be applied. In this paper, we present a common framework that allows one to combine both methods. This is done by presenting a set of rules for simplifying feature descriptions. The different methods are described as different controls on this rule set, where a control specifies in which order the different rules must be applied.<|reference_end|> | arxiv | @article{backofen1996controlling,
title={Controlling Functional Uncertainty},
author={Rolf Backofen (Theoretische Informatik, LMU Muenchen, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9608002},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608002},
primaryClass={cmp-lg cs.CL}
} | backofen1996controlling |
arxiv-668890 | cmp-lg/9608003 | Stylistic Variation in an Information Retrieval Experiment | <|reference_start|>Stylistic Variation in an Information Retrieval Experiment: Texts exhibit considerable stylistic variation. This paper reports an experiment where a corpus of documents (N= 75 000) is analyzed using various simple stylistic metrics. A subset (n = 1000) of the corpus has been previously assessed to be relevant for answering given information retrieval queries. The experiment shows that this subset differs significantly from the rest of the corpus in terms of the stylistic metrics studied.<|reference_end|> | arxiv | @article{karlgren1996stylistic,
title={Stylistic Variation in an Information Retrieval Experiment},
author={Jussi Karlgren (NYU)},
journal={arXiv preprint arXiv:cmp-lg/9608003},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608003},
primaryClass={cmp-lg cs.CL}
} | karlgren1996stylistic |
arxiv-668891 | cmp-lg/9608004 | Patterns of Language - A Population Model for Language Structure | <|reference_start|>Patterns of Language - A Population Model for Language Structure: A key problem in the description of language structure is to explain its contradictory properties of specificity and generality, the contrasting poles of formulaic prescription and generative productivity. I argue that this is possible if we accept analogy and similarity as the basic mechanisms of structural definition. As a specific example I discuss how it would be possible to use analogy to define a generative model of syntactic structure.<|reference_end|> | arxiv | @article{freeman1996patterns,
title={Patterns of Language - A Population Model for Language Structure},
author={Robert John Freeman},
journal={arXiv preprint arXiv:cmp-lg/9608004},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608004},
primaryClass={cmp-lg cs.CL}
} | freeman1996patterns |
arxiv-668892 | cmp-lg/9608005 | CLEARS - An Education and Research Tool for Computational Semantics | <|reference_start|>CLEARS - An Education and Research Tool for Computational Semantics: The CLEARS (Computational Linguistics Education and Research for Semantics) tool provides a graphical interface allowing interactive construction of semantic representations in a variety of different formalisms, and using several construction methods. CLEARS was developed as part of the FraCaS project which was designed to encourage convergence between different semantic formalisms, such as Montague-Grammar, DRT, and Situation Semantics. The CLEARS system is freely available on the WWW from http://coli.uni-sb.de/~clears/clears.html<|reference_end|> | arxiv | @article{milward1996clears,
title={CLEARS - An Education and Research Tool for Computational Semantics},
author={David Milward (SRI Cambridge), Karsten Konrad, Holger Maier, Manfred
Pinkal (University of the Saarland)},
journal={arXiv preprint arXiv:cmp-lg/9608005},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608005},
primaryClass={cmp-lg cs.CL}
} | milward1996clears |
arxiv-668893 | cmp-lg/9608006 | Grapheme-to-Phoneme Conversion using Multiple Unbounded Overlapping Chunks | <|reference_start|>Grapheme-to-Phoneme Conversion using Multiple Unbounded Overlapping Chunks: We present in this paper an original extension of two data-driven algorithms for the transcription of a sequence of graphemes into the corresponding sequence of phonemes. In particular, our approach generalizes the algorithm originally proposed by Dedina and Nusbaum (D&N) (1991), which had originally been promoted as a model of the human ability to pronounce unknown words by analogy to familiar lexical items. We will show that DN's algorithm performs comparatively poorly when evaluated on a realistic test set, and that our extension allows us to improve substantially the performance of the analogy-based model. We will also suggest that both algorithms can be reformulated in a much more general framework, which allows us to anticipate other useful extensions. However, considering the inability to define in these models important notions like lexical neighborhood, we conclude that both approaches fail to offer a proper model of the analogical processes involved in reading aloud.<|reference_end|> | arxiv | @article{yvon1996grapheme-to-phoneme,
title={Grapheme-to-Phoneme Conversion using Multiple Unbounded Overlapping
Chunks},
author={Francois Yvon (Ecole Nationale Superieure des Telecommunications,
Paris)},
journal={arXiv preprint arXiv:cmp-lg/9608006},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608006},
primaryClass={cmp-lg cs.CL}
} | yvon1996grapheme-to-phoneme |
arxiv-668894 | cmp-lg/9608007 | Centering in Italian | <|reference_start|>Centering in Italian: This paper explores the correlation between centering and different forms of pronominal reference in Italian, in particular zeros and overt pronouns in subject position. Such correlations, that I had proposed in earlier work (COLING 90), are verified through the analysis of a corpus of naturally occurring texts. In the process, I extend my previous analysis in several ways, for example by taking possessives and subordinates into account. I also provide a more detailed analysis of the "continue" transition: more specifically, I show that pronouns are used in a markedly different way in a "continue" preceded by another "continue" or by a "shift", and in a "continue" preceded by a "retain".<|reference_end|> | arxiv | @article{di eugenio1996centering,
title={Centering in Italian},
author={Barbara Di Eugenio (Carnegie Mellon University / University of
Pittsburgh)},
journal={arXiv preprint arXiv:cmp-lg/9608007},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608007},
primaryClass={cmp-lg cs.CL}
} | di eugenio1996centering |
arxiv-668895 | cmp-lg/9608008 | The discourse functions of Italian subjects: a centering approach | <|reference_start|>The discourse functions of Italian subjects: a centering approach: This paper examines the discourse functions that different types of subjects perform in Italian within the centering framework. I build on my previous work (COLING90) that accounted for the alternation of null and strong pronouns in subject position. I extend my previous analysis in several ways: for example, I refine the notion of {\sc continue} and discuss the centering functions of full NPs.<|reference_end|> | arxiv | @article{di eugenio1996the,
title={The discourse functions of Italian subjects: a centering approach},
author={Barbara Di Eugenio (Carnegie Mellon University / University of
Pittsburgh)},
journal={Proceedings COLING96, Copenhagen, August 1996},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608008},
primaryClass={cmp-lg cs.CL}
} | di eugenio1996the |
arxiv-668896 | cmp-lg/9608009 | Centering theory and the Italian pronominal system | <|reference_start|>Centering theory and the Italian pronominal system: In this paper, I give an account of some phenomena of pronominalization in Italian in terms of centering theory. After a general introduction to the Italian pronominal system, I will review centering, and then show how the original rules have to be extended or modified. Finally, I will show that centering does not account for two phenomena: first, the functional role of an utterance may override the predictions of centering; second, a null subject can be used to refer to a whole discourse segment.<|reference_end|> | arxiv | @article{di eugenio1996centering,
title={Centering theory and the Italian pronominal system},
author={Barbara Di Eugenio (Carnegie Mellon University / University of
Pittsburgh)},
journal={Proceedings COLING90, Helsinki, August 1990},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608009},
primaryClass={cmp-lg cs.CL}
} | di eugenio1996centering |
arxiv-668897 | cmp-lg/9608010 | Fishing for Exactness | <|reference_start|>Fishing for Exactness: Statistical methods for automatically identifying dependent word pairs (i.e. dependent bigrams) in a corpus of natural language text have traditionally been performed using asymptotic tests of significance. This paper suggests that Fisher's exact test is a more appropriate test due to the skewed and sparse data samples typical of this problem. Both theoretical and experimental comparisons between Fisher's exact test and a variety of asymptotic tests (the t-test, Pearson's chi-square test, and Likelihood-ratio chi-square test) are presented. These comparisons show that Fisher's exact test is more reliable in identifying dependent word pairs. The usefulness of Fisher's exact test extends to other problems in statistical natural language processing as skewed and sparse data appears to be the rule in natural language. The experiment presented in this paper was performed using PROC FREQ of the SAS System.<|reference_end|> | arxiv | @article{pedersen1996fishing,
title={Fishing for Exactness},
author={Ted Pedersen (Southern Methodist University, Dallas, TX)},
journal={Proceedings of the South-Central SAS Users Group Conference
(SCSUG-96), Austin, TX, Oct 27-29, 1996},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608010},
primaryClass={cmp-lg cs.CL}
} | pedersen1996fishing |
arxiv-668898 | cmp-lg/9608011 | Punctuation in Quoted Speech | <|reference_start|>Punctuation in Quoted Speech: Quoted speech is often set off by punctuation marks, in particular quotation marks. Thus, it might seem that the quotation marks would be extremely useful in identifying these structures in texts. Unfortunately, the situation is not quite so clear. In this work, I will argue that quotation marks are not adequate for either identifying or constraining the syntax of quoted speech. More useful information comes from the presence of a quoting verb, which is either a verb of saying or a punctual verb, and the presence of other punctuation marks, usually commas. Using a lexicalized grammar, we can license most quoting clauses as text adjuncts. A distinction will be made not between direct and indirect quoted speech, but rather between adjunct and non-adjunct quoting clauses.<|reference_end|> | arxiv | @article{doran1996punctuation,
title={Punctuation in Quoted Speech},
author={Christine Doran (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9608011},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608011},
primaryClass={cmp-lg cs.CL}
} | doran1996punctuation |
arxiv-668899 | cmp-lg/9608012 | Multilingual Text Analysis for Text-to-Speech Synthesis | <|reference_start|>Multilingual Text Analysis for Text-to-Speech Synthesis: We present a model of text analysis for text-to-speech (TTS) synthesis based on (weighted) finite-state transducers, which serves as the text-analysis module of the multilingual Bell Labs TTS system. The transducers are constructed using a lexical toolkit that allows declarative descriptions of lexicons, morphological rules, numeral-expansion rules, and phonological rules, inter alia. To date, the model has been applied to eight languages: Spanish, Italian, Romanian, French, German, Russian, Mandarin and Japanese.<|reference_end|> | arxiv | @article{sproat1996multilingual,
title={Multilingual Text Analysis for Text-to-Speech Synthesis},
author={Richard Sproat},
journal={ECAI Workshop on Extended Finite-State Models of Language},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608012},
primaryClass={cmp-lg cs.CL}
} | sproat1996multilingual |
arxiv-668900 | cmp-lg/9608013 | A Word Grammar of Turkish with Morphophonemic Rules | <|reference_start|>A Word Grammar of Turkish with Morphophonemic Rules: In this thesis, morphological description of Turkish is encoded using the two-level model. This description is made up of the phonological component that contains the two-level morphophonemic rules, and the lexicon component which lists the lexical items and encodes the morphotactic constraints. The word grammar is expressed in tabular form. It includes the verbal and the nominal paradigm. Vowel and consonant harmony, epenthesis, reduplication, etc. are described in detail and coded in two-level notation. Loan-word phonology is modelled separately. The implementation makes use of Lexc/Twolc from Xerox. Mechanisms to integrate the morphological analyzer with the lexical and syntactic components are discussed, and a simple graphical user interface is provided. Work is underway to use this model in a classroom setting for teaching Turkish morphology to non-native speakers.<|reference_end|> | arxiv | @article{oztaner1996a,
title={A Word Grammar of Turkish with Morphophonemic Rules},
author={S. Murat Oztaner (Middle East Technical University)},
journal={arXiv preprint arXiv:cmp-lg/9608013},
year={1996},
archivePrefix={arXiv},
eprint={cmp-lg/9608013},
primaryClass={cmp-lg cs.CL}
} | oztaner1996a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.