corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-668601 | cmp-lg/9504024 | A Morphographemic Model for Error Correction in Nonconcatenative Strings | <|reference_start|>A Morphographemic Model for Error Correction in Nonconcatenative Strings: This paper introduces a spelling correction system which integrates seamlessly with morphological analysis using a multi-tape formalism. Handling of various Semitic error problems is illustrated, with reference to Arabic and Syriac examples. The model handles errors vocalisation, diacritics, phonetic syncopation and morphographemic idiosyncrasies, in addition to Damerau errors. A complementary correction strategy for morphologically sound but morphosyntactically ill-formed words is outlined.<|reference_end|> | arxiv | @article{bowden1995a,
title={A Morphographemic Model for Error Correction in Nonconcatenative Strings},
author={Tanya Bowden (University of Cambridge) and George Anton Kiraz
(University of Cambridge)},
journal={arXiv preprint arXiv:cmp-lg/9504024},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504024},
primaryClass={cmp-lg cs.CL}
} | bowden1995a |
arxiv-668602 | cmp-lg/9504025 | Discourse Processing of Dialogues with Multiple Threads | <|reference_start|>Discourse Processing of Dialogues with Multiple Threads: In this paper we will present our ongoing work on a plan-based discourse processor developed in the context of the Enthusiast Spanish to English translation system as part of the JANUS multi-lingual speech-to-speech translation system. We will demonstrate that theories of discourse which postulate a strict tree structure of discourse on either the intentional or attentional level are not totally adequate for handling spontaneous dialogues. We will present our extension to this approach along with its implementation in our plan-based discourse processor. We will demonstrate that the implementation of our approach outperforms an implementation based on the strict tree structure approach.<|reference_end|> | arxiv | @article{rose'1995discourse,
title={Discourse Processing of Dialogues with Multiple Threads},
author={Carolyn Penstein Rose' (Carnegie Mellon University), Barbara Di
Eugenio (Carnegie Mellon University), Lori S. Levin (Carnegie Mellon
University), Carol Van Ess-Dykema (Department Of Defense)},
journal={Proceedings of the 33rd Annual Meeting of the Association for
Computational Linguistics, MIT, 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504025},
primaryClass={cmp-lg cs.CL}
} | rose'1995discourse |
arxiv-668603 | cmp-lg/9504026 | The intersection of Finite State Automata and Definite Clause Grammars | <|reference_start|>The intersection of Finite State Automata and Definite Clause Grammars: Bernard Lang defines parsing as the calculation of the intersection of a FSA (the input) and a CFG. Viewing the input for parsing as a FSA rather than as a string combines well with some approaches in speech understanding systems, in which parsing takes a word lattice as input (rather than a word string). Furthermore, certain techniques for robust parsing can be modelled as finite state transducers. In this paper we investigate how we can generalize this approach for unification grammars. In particular we will concentrate on how we might the calculation of the intersection of a FSA and a DCG. It is shown that existing parsing algorithms can be easily extended for FSA inputs. However, we also show that the termination properties change drastically: we show that it is undecidable whether the intersection of a FSA and a DCG is empty (even if the DCG is off-line parsable). Furthermore we discuss approaches to cope with the problem.<|reference_end|> | arxiv | @article{van noord1995the,
title={The intersection of Finite State Automata and Definite Clause Grammars},
author={Gertjan van Noord (Alfa-informatica and BCN, Groningen)},
journal={Proceedings of the 33rd ACL. Boston 1995.},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504026},
primaryClass={cmp-lg cs.CL}
} | van noord1995the |
arxiv-668604 | cmp-lg/9504027 | An Efficient Generation Algorithm for Lexicalist MT | <|reference_start|>An Efficient Generation Algorithm for Lexicalist MT: The lexicalist approach to Machine Translation offers significant advantages in the development of linguistic descriptions. However, the Shake-and-Bake generation algorithm of (Whitelock, COLING-92) is NP-complete. We present a polynomial time algorithm for lexicalist MT generation provided that sufficient information can be transferred to ensure more determinism.<|reference_end|> | arxiv | @article{poznanski1995an,
title={An Efficient Generation Algorithm for Lexicalist MT},
author={Victor Poznanski, John L. Beaven, Pete Whitelock},
journal={arXiv preprint arXiv:cmp-lg/9504027},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504027},
primaryClass={cmp-lg cs.CL}
} | poznanski1995an |
arxiv-668605 | cmp-lg/9504028 | Memoization of Coroutined Constraints | <|reference_start|>Memoization of Coroutined Constraints: Some linguistic constraints cannot be effectively resolved during parsing at the location in which they are most naturally introduced. This paper shows how constraints can be propagated in a memoizing parser (such as a chart parser) in much the same way that variable bindings are, providing a general treatment of constraint coroutining in memoization. Prolog code for a simple application of our technique to Bouma and van Noord's (1994) categorial grammar analysis of Dutch is provided.<|reference_end|> | arxiv | @article{johnson1995memoization,
title={Memoization of Coroutined Constraints},
author={Mark Johnson (Brown University) and Jochen D"orre (Universit"at
Stuttgart)},
journal={arXiv preprint arXiv:cmp-lg/9504028},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504028},
primaryClass={cmp-lg cs.CL}
} | johnson1995memoization |
arxiv-668606 | cmp-lg/9504029 | Quantifiers, Anaphora, and Intensionality | <|reference_start|>Quantifiers, Anaphora, and Intensionality: The relationship between Lexical-Functional Grammar (LFG) {\em functional structures} (f-structures) for sentences and their semantic interpretations can be expressed directly in a fragment of linear logic in a way that correctly explains the constrained interactions between quantifier scope ambiguity, bound anaphora and intensionality. This deductive approach to semantic interpretaion obviates the need for additional mechanisms, such as Cooper storage, to represent the possible scopes of a quantified NP, and explains the interactions between quantified NPs, anaphora and intensional verbs such as `seek'. A single specification in linear logic of the argument requirements of intensional verbs is sufficient to derive the correct reading predictions for intensional-verb clauses both with nonquantified and with quantified direct objects. In particular, both de dicto and de re readings are derived for quantified objects. The effects of type-raising or quantifying-in rules in other frameworks here just follow as linear-logic theorems. While our approach resembles current categorial approaches in important ways, it differs from them in allowing the greater type flexibility of categorial semantics while maintaining a precise connection to syntax. As a result, we are able to provide derivations for certain readings of sentences with intensional verbs and complex direct objects that are not derivable in current purely categorial accounts of the syntax-semantics interface.<|reference_end|> | arxiv | @article{dalrymple1995quantifiers,,
title={Quantifiers, Anaphora, and Intensionality},
author={Mary Dalrymple (Xerox PARC), John Lamping (Xerox PARC), Fernando
Pereira (AT&T Bell Laboratories), Vijay Saraswat (Xerox PARC)},
journal={arXiv preprint arXiv:cmp-lg/9504029},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504029},
primaryClass={cmp-lg cs.CL}
} | dalrymple1995quantifiers, |
arxiv-668607 | cmp-lg/9504030 | Statistical Decision-Tree Models for Parsing | <|reference_start|>Statistical Decision-Tree Models for Parsing: Syntactic natural language parsers have shown themselves to be inadequate for processing highly-ambiguous large-vocabulary text, as is evidenced by their poor performance on domains like the Wall Street Journal, and by the movement away from parsing-based approaches to text-processing in general. In this paper, I describe SPATTER, a statistical parser based on decision-tree learning techniques which constructs a complete parse for every sentence and achieves accuracy rates far better than any published result. This work is based on the following premises: (1) grammars are too complex and detailed to develop manually for most interesting domains; (2) parsing models must rely heavily on lexical and contextual information to analyze sentences accurately; and (3) existing {$n$}-gram modeling techniques are inadequate for parsing models. In experiments comparing SPATTER with IBM's computer manuals parser, SPATTER significantly outperforms the grammar-based parser. Evaluating SPATTER against the Penn Treebank Wall Street Journal corpus using the PARSEVAL measures, SPATTER achieves 86\% precision, 86\% recall, and 1.3 crossing brackets per sentence for sentences of 40 words or less, and 91\% precision, 90\% recall, and 0.5 crossing brackets for sentences between 10 and 20 words in length.<|reference_end|> | arxiv | @article{magerman1995statistical,
title={Statistical Decision-Tree Models for Parsing},
author={David M. Magerman},
journal={Proceedings of the 33rd Annual Meeting of the ACL},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504030},
primaryClass={cmp-lg cs.CL}
} | magerman1995statistical |
arxiv-668608 | cmp-lg/9504031 | Error-tolerant Finite State Recognition with Applications to Morphological Analysis and Spelling Correction | <|reference_start|>Error-tolerant Finite State Recognition with Applications to Morphological Analysis and Spelling Correction: Error-tolerant recognition enables the recognition of strings that deviate mildly from any string in the regular set recognized by the underlying finite state recognizer. Such recognition has applications in error-tolerant morphological processing, spelling correction, and approximate string matching in information retrieval. After a description of the concepts and algorithms involved, we give examples from two applications: In the context of morphological analysis, error-tolerant recognition allows misspelled input word forms to be corrected, and morphologically analyzed concurrently. We present an application of this to error-tolerant analysis of agglutinative morphology of Turkish words. The algorithm can be applied to morphological analysis of any language whose morphology is fully captured by a single (and possibly very large) finite state transducer, regardless of the word formation processes and morphographemic phenomena involved. In the context of spelling correction, error-tolerant recognition can be used to enumerate correct candidate forms from a given misspelled string within a certain edit distance. Again, it can be applied to any language with a word list comprising all inflected forms, or whose morphology is fully described by a finite state transducer. We present experimental results for spelling correction for a number of languages. These results indicate that such recognition works very efficiently for candidate generation in spelling correction for many European languages such as English, Dutch, French, German, Italian (and others) with very large word lists of root and inflected forms (some containing well over 200,000 forms), generating all candidate solutions within 10 to 45 milliseconds (with edit distance 1) on a SparcStation 10/41. For spelling correction in Turkish, error-tolerant<|reference_end|> | arxiv | @article{oflazer1995error-tolerant,
title={Error-tolerant Finite State Recognition with Applications to
Morphological Analysis and Spelling Correction},
author={Kemal Oflazer (Department of Computer Engineering and Information
Science Bilkent University, Ankara Turkey)},
journal={arXiv preprint arXiv:cmp-lg/9504031},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504031},
primaryClass={cmp-lg cs.CL}
} | oflazer1995error-tolerant |
arxiv-668609 | cmp-lg/9504032 | The Replace Operator | <|reference_start|>The Replace Operator: This paper introduces to the calculus of regular expressions a replace operator, ->, and defines a set of replacement expressions that concisely encode several alternate variations of the operation. The basic case is unconditional obligatory replacement: UPPER -> LOWER Conditional versions of replacement, such as, UPPER -> LOWER || LEFT _ RIGHT constrain the operation by left and right contexts. UPPER, LOWER, LEFT, and RIGHT may be regular expressions of any complexity. Replace expressions denote regular relations. The replace operator is defined in terms of other regular expression operators using techniques introduced by Ronald M. Kaplan and Martin Kay in "Regular Models of Phonological Rule Systems" (Computational Linguistics 20:3 331-378. 1994).<|reference_end|> | arxiv | @article{karttunen1995the,
title={The Replace Operator},
author={Lauri Karttunen, Rank Xerox Research Centre},
journal={arXiv preprint arXiv:cmp-lg/9504032},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504032},
primaryClass={cmp-lg cs.CL}
} | karttunen1995the |
arxiv-668610 | cmp-lg/9504033 | Corpus Statistics Meet the Noun Compound: Some Empirical Results | <|reference_start|>Corpus Statistics Meet the Noun Compound: Some Empirical Results: A variety of statistical methods for noun compound analysis are implemented and compared. The results support two main conclusions. First, the use of conceptual association not only enables a broad coverage, but also improves the accuracy. Second, an analysis model based on dependency grammar is substantially more accurate than one based on deepest constituents, even though the latter is more prevalent in the literature.<|reference_end|> | arxiv | @article{lauer1995corpus,
title={Corpus Statistics Meet the Noun Compound: Some Empirical Results},
author={Mark Lauer (Microsoft Institute, Sydney)},
journal={Proceedings of the 33rd Annual Meeting of the Association for
Computational Linguistics, Boston, MA., 1995 pp47-54},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504033},
primaryClass={cmp-lg cs.CL}
} | lauer1995corpus |
arxiv-668611 | cmp-lg/9504034 | Bayesian Grammar Induction for Language Modeling | <|reference_start|>Bayesian Grammar Induction for Language Modeling: We describe a corpus-based induction algorithm for probabilistic context-free grammars. The algorithm employs a greedy heuristic search within a Bayesian framework, and a post-pass using the Inside-Outside algorithm. We compare the performance of our algorithm to n-gram models and the Inside-Outside algorithm in three language modeling tasks. In two of the tasks, the training data is generated by a probabilistic context-free grammar and in both tasks our algorithm outperforms the other techniques. The third task involves naturally-occurring data, and in this task our algorithm does not perform as well as n-gram models but vastly outperforms the Inside-Outside algorithm.<|reference_end|> | arxiv | @article{chen1995bayesian,
title={Bayesian Grammar Induction for Language Modeling},
author={Stanley F. Chen (Harvard University)},
journal={Proc. 33rd Annual Meeting of the ACL, p. 228-235, Cambridge, MA
1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9504034},
primaryClass={cmp-lg cs.CL}
} | chen1995bayesian |
arxiv-668612 | cmp-lg/9505001 | Response Generation in Collaborative Negotiation | <|reference_start|>Response Generation in Collaborative Negotiation: In collaborative planning activities, since the agents are autonomous and heterogeneous, it is inevitable that conflicts arise in their beliefs during the planning process. In cases where such conflicts are relevant to the task at hand, the agents should engage in collaborative negotiation as an attempt to square away the discrepancies in their beliefs. This paper presents a computational strategy for detecting conflicts regarding proposed beliefs and for engaging in collaborative negotiation to resolve the conflicts that warrant resolution. Our model is capable of selecting the most effective aspect to address in its pursuit of conflict resolution in cases where multiple conflicts arise, and of selecting appropriate evidence to justify the need for such modification. Furthermore, by capturing the negotiation process in a recursive Propose-Evaluate-Modify cycle of actions, our model can successfully handle embedded negotiation subdialogues.<|reference_end|> | arxiv | @article{chu-carroll1995response,
title={Response Generation in Collaborative Negotiation},
author={Jennifer Chu-Carroll and Sandra Carberry (University of Delaware)},
journal={arXiv preprint arXiv:cmp-lg/9505001},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505001},
primaryClass={cmp-lg cs.CL}
} | chu-carroll1995response |
arxiv-668613 | cmp-lg/9505002 | New Techniques for Context Modeling | <|reference_start|>New Techniques for Context Modeling: We introduce three new techniques for statistical language models: extension modeling, nonmonotonic contexts, and the divergence heuristic. Together these techniques result in language models that have few states, even fewer parameters, and low message entropies. For example, our techniques achieve a message entropy of 1.97 bits/char on the Brown corpus using only 89,325 parameters. In contrast, the character 4-gram model requires more than 250 times as many parameters in order to achieve a message entropy of only 2.47 bits/char. The fact that our model performs significantly better while using vastly fewer parameters indicates that it is a better probability model of natural language text.<|reference_end|> | arxiv | @article{ristad1995new,
title={New Techniques for Context Modeling},
author={Eric Sven Ristad and Robert G. Thomas (Princeton University)},
journal={arXiv preprint arXiv:cmp-lg/9505002},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505002},
primaryClass={cmp-lg cs.CL}
} | ristad1995new |
arxiv-668614 | cmp-lg/9505003 | Compiling HPSG type constraints into definite clause programs | <|reference_start|>Compiling HPSG type constraints into definite clause programs: We present a new approach to HPSG processing: compiling HPSG grammars expressed as type constraints into definite clause programs. This provides a clear and computationally useful correspondence between linguistic theories and their implementation. The compiler performs off-line constraint inheritance and code optimization. As a result, we are able to efficiently process with HPSG grammars without having to hand-translate them into definite clause or phrase structure based systems.<|reference_end|> | arxiv | @article{goetz1995compiling,
title={Compiling HPSG type constraints into definite clause programs},
author={Thilo Goetz and Walt Detmar Meurers (SFB 340, Univ. Tuebingen)},
journal={Proceedings of ACL-95},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505003},
primaryClass={cmp-lg cs.CL}
} | goetz1995compiling |
arxiv-668615 | cmp-lg/9505004 | DATR Theories and DATR Models | <|reference_start|>DATR Theories and DATR Models: Evans and Gazdar introduced DATR as a simple, non-monotonic language for representing natural language lexicons. Although a number of implementations of DATR exist, the full language has until now lacked an explicit, declarative semantics. This paper rectifies the situation by providing a mathematical semantics for DATR. We present a view of DATR as a language for defining certain kinds of partial functions by cases. The formal model provides a transparent treatment of DATR's notion of global context. It is shown that DATR's default mechanism can be accounted for by interpreting value descriptors as families of values indexed by paths.<|reference_end|> | arxiv | @article{keller1995datr,
title={DATR Theories and DATR Models},
author={Bill Keller (University of Sussex)},
journal={arXiv preprint arXiv:cmp-lg/9505004},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505004},
primaryClass={cmp-lg cs.CL}
} | keller1995datr |
arxiv-668616 | cmp-lg/9505005 | Learning Syntactic Rules and Tags with Genetic Algorithms for Information Retrieval and Filtering: An Empirical Basis for Grammatical Rules | <|reference_start|>Learning Syntactic Rules and Tags with Genetic Algorithms for Information Retrieval and Filtering: An Empirical Basis for Grammatical Rules: The grammars of natural languages may be learned by using genetic algorithms that reproduce and mutate grammatical rules and part-of-speech tags, improving the quality of later generations of grammatical components. Syntactic rules are randomly generated and then evolve; those rules resulting in improved parsing and occasionally improved retrieval and filtering performance are allowed to further propagate. The LUST system learns the characteristics of the language or sublanguage used in document abstracts by learning from the document rankings obtained from the parsed abstracts. Unlike the application of traditional linguistic rules to retrieval and filtering applications, LUST develops grammatical structures and tags without the prior imposition of some common grammatical assumptions (e.g., part-of-speech assumptions), producing grammars that are empirically based and are optimized for this particular application.<|reference_end|> | arxiv | @article{losee1995learning,
title={Learning Syntactic Rules and Tags with Genetic Algorithms for
Information Retrieval and Filtering: An Empirical Basis for Grammatical Rules},
author={Robert M. Losee (U. of North Carolina, Chapel Hill)},
journal={arXiv preprint arXiv:cmp-lg/9505005},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505005},
primaryClass={cmp-lg cs.CL}
} | losee1995learning |
arxiv-668617 | cmp-lg/9505006 | Treating Coordination with Datalog Grammars | <|reference_start|>Treating Coordination with Datalog Grammars: In previous work we studied a new type of DCGs, Datalog grammars, which are inspired on database theory. Their efficiency was shown to be better than that of their DCG counterparts under (terminating) OLDT-resolution. In this article we motivate a variant of Datalog grammars which allows us a meta-grammatical treatment of coordination. This treatment improves in some respects over previous work on coordination in logic grammars, although more research is needed for testing it in other respects.<|reference_end|> | arxiv | @article{dahl1995treating,
title={Treating Coordination with Datalog Grammars},
author={Veronica Dahl, Paul Tarau, Lidia Moreno, Manolo Palomar},
journal={arXiv preprint arXiv:cmp-lg/9505006},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505006},
primaryClass={cmp-lg cs.CL}
} | dahl1995treating |
arxiv-668618 | cmp-lg/9505007 | Parsing a Flexible Word Order Language | <|reference_start|>Parsing a Flexible Word Order Language: A logic formalism is presented which increases the expressive power of the ID/LP format of GPSG by enlarging the inventory of ordering relations and extending the domain of their application to non-siblings. This allows a concise, modular and declarative statement of intricate word order regularities.<|reference_end|> | arxiv | @article{pericliev1995parsing,
title={Parsing a Flexible Word Order Language},
author={Vladimir Pericliev and Alexander Grigorov (Institute of Mathematics,
Sofia, Bulgaria)},
journal={arXiv preprint arXiv:cmp-lg/9505007},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505007},
primaryClass={cmp-lg cs.CL}
} | pericliev1995parsing |
arxiv-668619 | cmp-lg/9505008 | Conciseness through Aggregation in Text Generation | <|reference_start|>Conciseness through Aggregation in Text Generation: Aggregating different pieces of similar information is necessary to generate concise and easy to understand reports in technical domains. This paper presents a general algorithm that combines similar messages in order to generate one or more coherent sentences for them. The process is not as trivial as might be expected. Problems encountered are briefly described.<|reference_end|> | arxiv | @article{shaw1995conciseness,
title={Conciseness through Aggregation in Text Generation},
author={James Shaw (Columbia University)},
journal={student sessions ACL 95},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505008},
primaryClass={cmp-lg cs.CL}
} | shaw1995conciseness |
arxiv-668620 | cmp-lg/9505009 | Compilation of HPSG to TAG | <|reference_start|>Compilation of HPSG to TAG: We present an implemented compilation algorithm that translates HPSG into lexicalized feature-based TAG, relating concepts of the two theories. While HPSG has a more elaborated principle-based theory of possible phrase structures, TAG provides the means to represent lexicalized structures more explicitly. Our objectives are met by giving clear definitions that determine the projection of structures from the lexicon, and identify maximal projections, auxiliary trees and foot nodes.<|reference_end|> | arxiv | @article{kasper1995compilation,
title={Compilation of HPSG to TAG},
author={Robert Kasper (Ohio State University), Bernd Kiefer (DFKI
Saarbruecken), Klaus Netter (DFKI Saarbruecken), and K. Vijay-Shanker
(University of Delaware)},
journal={arXiv preprint arXiv:cmp-lg/9505009},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505009},
primaryClass={cmp-lg cs.CL}
} | kasper1995compilation |
arxiv-668621 | cmp-lg/9505010 | Tagset Reduction Without Information Loss | <|reference_start|>Tagset Reduction Without Information Loss: A technique for reducing a tagset used for n-gram part-of-speech disambiguation is introduced and evaluated in an experiment. The technique ensures that all information that is provided by the original tagset can be restored from the reduced one. This is crucial, since we are interested in the linguistically motivated tags for part-of-speech disambiguation. The reduced tagset needs fewer parameters for its statistical model and allows more accurate parameter estimation. Additionally, there is a slight but not significant improvement of tagging accuracy.<|reference_end|> | arxiv | @article{brants1995tagset,
title={Tagset Reduction Without Information Loss},
author={Thorsten Brants (Universit"at des Saarlandes, Computational
Linguistics)},
journal={arXiv preprint arXiv:cmp-lg/9505010},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505010},
primaryClass={cmp-lg cs.CL}
} | brants1995tagset |
arxiv-668622 | cmp-lg/9505011 | Evaluation of Semantic Clusters | <|reference_start|>Evaluation of Semantic Clusters: Semantic clusters of a domain form an important feature that can be useful for performing syntactic and semantic disambiguation. Several attempts have been made to extract the semantic clusters of a domain by probabilistic or taxonomic techniques. However, not much progress has been made in evaluating the obtained semantic clusters. This paper focuses on an evaluation mechanism that can be used to evaluate semantic clusters produced by a system against those provided by human experts.<|reference_end|> | arxiv | @article{agarwal1995evaluation,
title={Evaluation of Semantic Clusters},
author={Rajeev Agarwal (Mississippi State University)},
journal={arXiv preprint arXiv:cmp-lg/9505011},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505011},
primaryClass={cmp-lg cs.CL}
} | agarwal1995evaluation |
arxiv-668623 | cmp-lg/9505012 | A Symbolic and Surgical Acquisition of Terms through Variation | <|reference_start|>A Symbolic and Surgical Acquisition of Terms through Variation: Terminological acquisition is an important issue in learning for NLP due to the constant terminological renewal through technological changes. Terms play a key role in several NLP-activities such as machine translation, automatic indexing or text understanding. In opposition to classical once-and-for-all approaches, we propose an incremental process for terminological enrichment which operates on existing reference lists and large corpora. Candidate terms are acquired by extracting variants of reference terms through {\em FASTR}, a unification-based partial parser. As acquisition is performed within specific morpho-syntactic contexts (coordinations, insertions or permutations of compounds), rich conceptual links are learned together with candidate terms. A clustering of terms related through coordination yields classes of conceptually close terms while graphs resulting from insertions denote generic/specific relations. A graceful degradation of the volume of acquisition on partial initial lists confirms the robustness of the method to incomplete data.<|reference_end|> | arxiv | @article{jacquemin1995a,
title={A Symbolic and Surgical Acquisition of Terms through Variation},
author={Christian Jacquemin (Institut de Recherches en Informatique de Nantes)},
journal={arXiv preprint arXiv:cmp-lg/9505012},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505012},
primaryClass={cmp-lg cs.CL}
} | jacquemin1995a |
arxiv-668624 | cmp-lg/9505013 | Utilizing Statistical Dialogue Act Processing in Verbmobil | <|reference_start|>Utilizing Statistical Dialogue Act Processing in Verbmobil: In this paper, we present a statistical approach for dialogue act processing in the dialogue component of the speech-to-speech translation system \vm. Statistics in dialogue processing is used to predict follow-up dialogue acts. As an application example we show how it supports repair when unexpected dialogue states occur.<|reference_end|> | arxiv | @article{reithinger1995utilizing,
title={Utilizing Statistical Dialogue Act Processing in Verbmobil},
author={Norbert Reithinger and Elisabeth Maier (DFKI GmbH, Saarbr"ucken,
Germany)},
journal={arXiv preprint arXiv:cmp-lg/9505013},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505013},
primaryClass={cmp-lg cs.CL}
} | reithinger1995utilizing |
arxiv-668625 | cmp-lg/9505014 | Compositionality for Presuppositions over Tableaux | <|reference_start|>Compositionality for Presuppositions over Tableaux: Tableaux originate as a decision method for a logical language. They can also be extended to obtain a structure that spells out all the information in a set of sentences in terms of truth value assignments to atomic formulas that appear in them. This approach is pursued here. Over such a structure, compositional rules are provided for obtaining the presuppositions of a logical statement from its atomic subformulas and their presuppositions. The rules are based on classical logic semantics and they are shown to model the behaviour of presuppositions observed in natural language sentences built with {\em if \ldots then}, {\em and} and {\em or}. The advantages of this method over existing frameworks for presuppositions are discussed.<|reference_end|> | arxiv | @article{gervas1995compositionality,
title={Compositionality for Presuppositions over Tableaux},
author={Pablo Gervas (Department of Computing, Imperial College, London)},
journal={arXiv preprint arXiv:cmp-lg/9505014},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505014},
primaryClass={cmp-lg cs.CL}
} | gervas1995compositionality |
arxiv-668626 | cmp-lg/9505015 | Efficient Analysis of Complex Diagrams using Constraint-Based Parsing | <|reference_start|>Efficient Analysis of Complex Diagrams using Constraint-Based Parsing: This paper describes substantial advances in the analysis (parsing) of diagrams using constraint grammars. The addition of set types to the grammar and spatial indexing of the data make it possible to efficiently parse real diagrams of substantial complexity. The system is probably the first to demonstrate efficient diagram parsing using grammars that easily be retargeted to other domains. The work assumes that the diagrams are available as a flat collection of graphics primitives: lines, polygons, circles, Bezier curves and text. This is appropriate for future electronic documents or for vectorized diagrams converted from scanned images. The classes of diagrams that we have analyzed include x,y data graphs and genetic diagrams drawn from the biological literature, as well as finite state automata diagrams (states and arcs). As an example, parsing a four-part data graph composed of 133 primitives required 35 sec using Macintosh Common Lisp on a Macintosh Quadra 700.<|reference_end|> | arxiv | @article{futrelle1995efficient,
title={Efficient Analysis of Complex Diagrams using Constraint-Based Parsing},
author={Robert P. Futrelle and Nikos Nikolakis (Northeastern U.)},
journal={arXiv preprint arXiv:cmp-lg/9505015},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505015},
primaryClass={cmp-lg cs.CL}
} | futrelle1995efficient |
arxiv-668627 | cmp-lg/9505016 | A Pattern Matching method for finding Noun and Proper Noun Translations from Noisy Parallel Corpora | <|reference_start|>A Pattern Matching method for finding Noun and Proper Noun Translations from Noisy Parallel Corpora: We present a pattern matching method for compiling a bilingual lexicon of nouns and proper nouns from unaligned, noisy parallel texts of Asian/Indo-European language pairs. Tagging information of one language is used. Word frequency and position information for high and low frequency words are represented in two different vector forms for pattern matching. New anchor point finding and noise elimination techniques are introduced. We obtained a 73.1\% precision. We also show how the results can be used in the compilation of domain-specific noun phrases.<|reference_end|> | arxiv | @article{fung1995a,
title={A Pattern Matching method for finding Noun and Proper Noun Translations
from Noisy Parallel Corpora},
author={Pascale Fung (Computer Science Department, Columbia Univ)},
journal={arXiv preprint arXiv:cmp-lg/9505016},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505016},
primaryClass={cmp-lg cs.CL}
} | fung1995a |
arxiv-668628 | cmp-lg/9505017 | Robust Parsing of Spoken Dialogue Using Contextual Knowledge and Recognition Probabilities | <|reference_start|>Robust Parsing of Spoken Dialogue Using Contextual Knowledge and Recognition Probabilities: In this paper we describe the linguistic processor of a spoken dialogue system. The parser receives a word graph from the recognition module as its input. Its task is to find the best path through the graph. If no complete solution can be found, a robust mechanism for selecting multiple partial results is applied. We show how the information content rate of the results can be improved if the selection is based on an integrated quality score combining word recognition scores and context-dependent semantic predictions. Results of parsing word graphs with and without predictions are reported.<|reference_end|> | arxiv | @article{hanrieder1995robust,
title={Robust Parsing of Spoken Dialogue Using Contextual Knowledge and
Recognition Probabilities},
author={Gerhard Hanrieder and Guenther Goerz (Bavarian Research Center for
Knowledge Based Systems, Erlangen, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9505017},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505017},
primaryClass={cmp-lg cs.CL}
} | hanrieder1995robust |
arxiv-668629 | cmp-lg/9505018 | Acquiring a Lexicon from Unsegmented Speech | <|reference_start|>Acquiring a Lexicon from Unsegmented Speech: We present work-in-progress on the machine acquisition of a lexicon from sentences that are each an unsegmented phone sequence paired with a primitive representation of meaning. A simple exploratory algorithm is described, along with the direction of current work and a discussion of the relevance of the problem for child language acquisition and computer speech recognition.<|reference_end|> | arxiv | @article{de marcken1995acquiring,
title={Acquiring a Lexicon from Unsegmented Speech},
author={Carl de Marcken (MIT AI Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9505018},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505018},
primaryClass={cmp-lg cs.CL}
} | de marcken1995acquiring |
arxiv-668630 | cmp-lg/9505019 | Measuring semantic complexity | <|reference_start|>Measuring semantic complexity: We define {\em semantic complexity} using a new concept of {\em meaning automata}. We measure the semantic complexity of understanding of prepositional phrases, of an "in depth understanding system", and of a natural language interface to an on-line calendar. We argue that it is possible to measure some semantic complexities of natural language processing systems before building them, and that systems that exhibit relatively complex behavior can be built from semantically simple components.<|reference_end|> | arxiv | @article{zadrozny1995measuring,
title={Measuring semantic complexity},
author={Wlodek Zadrozny (IBM Research, T. J. Watson Research Center)},
journal={arXiv preprint arXiv:cmp-lg/9505019},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505019},
primaryClass={cmp-lg cs.CL}
} | zadrozny1995measuring |
arxiv-668631 | cmp-lg/9505020 | CRYSTAL: Inducing a Conceptual Dictionary | <|reference_start|>CRYSTAL: Inducing a Conceptual Dictionary: One of the central knowledge sources of an information extraction system is a dictionary of linguistic patterns that can be used to identify the conceptual content of a text. This paper describes CRYSTAL, a system which automatically induces a dictionary of "concept-node definitions" sufficient to identify relevant information from a training corpus. Each of these concept-node definitions is generalized as far as possible without producing errors, so that a minimum number of dictionary entries cover the positive training instances. Because it tests the accuracy of each proposed definition, CRYSTAL can often surpass human intuitions in creating reliable extraction rules.<|reference_end|> | arxiv | @article{soderland1995crystal:,
title={CRYSTAL: Inducing a Conceptual Dictionary},
author={Stephen Soderland, David Fisher, Jonathan Aseltine, Wendy Lehnert
(University of Massachusetts)},
journal={arXiv preprint arXiv:cmp-lg/9505020},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505020},
primaryClass={cmp-lg cs.CL}
} | soderland1995crystal: |
arxiv-668632 | cmp-lg/9505021 | Improving the Efficiency of a Generation Algorithm for Shake and Bake Machine Translation Using Head-Driven Phrase Structure Grammar | <|reference_start|>Improving the Efficiency of a Generation Algorithm for Shake and Bake Machine Translation Using Head-Driven Phrase Structure Grammar: A Shake and Bake machine translation algorithm for Head-Driven Phrase Structure Grammar is introduced based on the algorithm proposed by Whitelock for unification categorial grammar. The translation process is then analysed to determine where the potential sources of inefficiency reside, and some proposals are introduced which greatly improve the efficiency of the generation algorithm. Preliminary empirical results from tests involving a small grammar are presented, and suggestions for greater improvement to the algorithm are provided.<|reference_end|> | arxiv | @article{popowich1995improving,
title={Improving the Efficiency of a Generation Algorithm for Shake and Bake
Machine Translation Using Head-Driven Phrase Structure Grammar},
author={Fred Popowich (Simon Fraser University)},
journal={arXiv preprint arXiv:cmp-lg/9505021},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505021},
primaryClass={cmp-lg cs.CL}
} | popowich1995improving |
arxiv-668633 | cmp-lg/9505022 | Generating One-Anaphoric Expressions: Where Does the Decision Lie? | <|reference_start|>Generating One-Anaphoric Expressions: Where Does the Decision Lie?: Most natural language generation systems embody mechanisms for choosing whether to subsequently refer to an already-introduced entity by means of a pronoun or a definite noun phrase. Relatively few systems, however, consider referring to entites by means of one-anaphoric expressions such as \lingform{the small green one}. This paper looks at what is involved in generating referring expressions of this type. Consideration of how to fit this capability into a standard algorithm for referring expression generation leads us to suggest a revision of some of the assumptions that underlie existing approaches. We demonstrate the usefulness of our approach to one-anaphora generation in the context of a simple database interface application, and make some observations about the impact of this approach on referring expression generation more generally.<|reference_end|> | arxiv | @article{dale1995generating,
title={Generating One-Anaphoric Expressions: Where Does the Decision Lie?},
author={Robert Dale (Microsoft, Sydney)},
journal={arXiv preprint arXiv:cmp-lg/9505022},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505022},
primaryClass={cmp-lg cs.CL}
} | dale1995generating |
arxiv-668634 | cmp-lg/9505023 | Some Novel Applications of Explanation-Based Learning to Parsing Lexicalized Tree-Adjoining Grammars | <|reference_start|>Some Novel Applications of Explanation-Based Learning to Parsing Lexicalized Tree-Adjoining Grammars: In this paper we present some novel applications of Explanation-Based Learning (EBL) technique to parsing Lexicalized Tree-Adjoining grammars. The novel aspects are (a) immediate generalization of parses in the training set, (b) generalization over recursive structures and (c) representation of generalized parses as Finite State Transducers. A highly impoverished parser called a ``stapler'' has also been introduced. We present experimental results using EBL for different corpora and architectures to show the effectiveness of our approach.<|reference_end|> | arxiv | @article{srinivas1995some,
title={Some Novel Applications of Explanation-Based Learning to Parsing
Lexicalized Tree-Adjoining Grammars},
author={B. Srinivas and Aravind Joshi (Department of Computer and Information
Science, University of Pennsylvania)},
journal={ACL 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505023},
primaryClass={cmp-lg cs.CL}
} | srinivas1995some |
arxiv-668635 | cmp-lg/9505024 | Exploring the role of Punctuation in Parsing Natural Text | <|reference_start|>Exploring the role of Punctuation in Parsing Natural Text: Few, if any, current NLP systems make any significant use of punctuation. Intuitively, a treatment of punctuation seems necessary to the analysis and production of text. Whilst this has been suggested in the fields of discourse structure, it is still unclear whether punctuation can help in the syntactic field. This investigation attempts to answer this question by parsing some corpus-based material with two similar grammars --- one including rules for punctuation, the other ignoring it. The punctuated grammar significantly out-performs the unpunctuated one, and so the conclusion is that punctuation can play a useful role in syntactic processing.<|reference_end|> | arxiv | @article{jones1995exploring,
title={Exploring the role of Punctuation in Parsing Natural Text},
author={Bernard Jones (Centre for Cognitive Science, University of Edinburgh,
UK)},
journal={arXiv preprint arXiv:cmp-lg/9505024},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505024},
primaryClass={cmp-lg cs.CL}
} | jones1995exploring |
arxiv-668636 | cmp-lg/9505025 | Combining Multiple Knowledge Sources for Discourse Segmentation | <|reference_start|>Combining Multiple Knowledge Sources for Discourse Segmentation: We predict discourse segment boundaries from linguistic features of utterances, using a corpus of spoken narratives as data. We present two methods for developing segmentation algorithms from training data: hand tuning and machine learning. When multiple types of features are used, results approach human performance on an independent test set (both methods), and using cross-validation (machine learning).<|reference_end|> | arxiv | @article{litman1995combining,
title={Combining Multiple Knowledge Sources for Discourse Segmentation},
author={Diane J. Litman (AT&T Bell Laboratories) and Rebecca J. Passonneau
(Bellcore)},
journal={arXiv preprint arXiv:cmp-lg/9505025},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505025},
primaryClass={cmp-lg cs.CL}
} | litman1995combining |
arxiv-668637 | cmp-lg/9505026 | Tagging the Teleman Corpus | <|reference_start|>Tagging the Teleman Corpus: Experiments were carried out comparing the Swedish Teleman and the English Susanne corpora using an HMM-based and a novel reductionistic statistical part-of-speech tagger. They indicate that tagging the Teleman corpus is the more difficult task, and that the performance of the two different taggers is comparable.<|reference_end|> | arxiv | @article{brants1995tagging,
title={Tagging the Teleman Corpus},
author={Thorsten Brants and Christer Samuelsson (Universit"at des Saarlandes,
Computational Linguistics, Saarbr"ucken, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9505026},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505026},
primaryClass={cmp-lg cs.CL}
} | brants1995tagging |
arxiv-668638 | cmp-lg/9505027 | Quantifier Scope and Constituency | <|reference_start|>Quantifier Scope and Constituency: Traditional approaches to quantifier scope typically need stipulation to exclude readings that are unavailable to human understanders. This paper shows that quantifier scope phenomena can be precisely characterized by a semantic representation constrained by surface constituency, if the distinction between referential and quantificational NPs is properly observed. A CCG implementation is described and compared to other approaches.<|reference_end|> | arxiv | @article{park1995quantifier,
title={Quantifier Scope and Constituency},
author={Jong C. Park (Department of Computer and Information Science,
University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9505027},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505027},
primaryClass={cmp-lg cs.CL}
} | park1995quantifier |
arxiv-668639 | cmp-lg/9505028 | D-Tree Grammars | <|reference_start|>D-Tree Grammars: DTG are designed to share some of the advantages of TAG while overcoming some of its limitations. DTG involve two composition operations called subsertion and sister-adjunction. The most distinctive feature of DTG is that, unlike TAG, there is complete uniformity in the way that the two DTG operations relate lexical items: subsertion always corresponds to complementation and sister-adjunction to modification. Furthermore, DTG, unlike TAG, can provide a uniform analysis for em wh-movement in English and Kashmiri, despite the fact that the em wh element in Kashmiri appears in sentence-second position, and not sentence-initial position as in English.<|reference_end|> | arxiv | @article{rambow1995d-tree,
title={D-Tree Grammars},
author={Owen Rambow, K. Vijay-Shanker, and David Weir},
journal={arXiv preprint arXiv:cmp-lg/9505028},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505028},
primaryClass={cmp-lg cs.CL}
} | rambow1995d-tree |
arxiv-668640 | cmp-lg/9505029 | Mapping Scrambled Korean Sentences into English Using Synchronous TAGs | <|reference_start|>Mapping Scrambled Korean Sentences into English Using Synchronous TAGs: Synchronous Tree Adjoining Grammars can be used for Machine Translation. However, translating a free order language such as Korean to English is complicated. I present a mechanism to translate scrambled Korean sentences into English by combining the concepts of Multi-Component TAGs (MC-TAGs) and Synchronous TAGs (STAGs).<|reference_end|> | arxiv | @article{park1995mapping,
title={Mapping Scrambled Korean Sentences into English Using Synchronous TAGs},
author={Hyun S. Park, (University of Cambridge)},
journal={arXiv preprint arXiv:cmp-lg/9505029},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505029},
primaryClass={cmp-lg cs.CL}
} | park1995mapping |
arxiv-668641 | cmp-lg/9505030 | Encoding Lexicalized Tree Adjoining Grammars with a Nonmonotonic Inheritance Hierarchy | <|reference_start|>Encoding Lexicalized Tree Adjoining Grammars with a Nonmonotonic Inheritance Hierarchy: This paper shows how DATR, a widely used formal language for lexical knowledge representation, can be used to define an LTAG lexicon as an inheritance hierarchy with internal lexical rules. A bottom-up featural encoding is used for LTAG trees and this allows lexical rules to be implemented as covariation constraints within feature structures. Such an approach eliminates the considerable redundancy otherwise associated with an LTAG lexicon.<|reference_end|> | arxiv | @article{evans1995encoding,
title={Encoding Lexicalized Tree Adjoining Grammars with a Nonmonotonic
Inheritance Hierarchy},
author={Roger Evans (University of Brighton), Gerald Gazdar, David Weir
(University of Sussex)},
journal={Proceedings of ACL95},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505030},
primaryClass={cmp-lg cs.CL}
} | evans1995encoding |
arxiv-668642 | cmp-lg/9505031 | The Compactness of Construction Grammars | <|reference_start|>The Compactness of Construction Grammars: We present an argument for {\em construction grammars} based on the minimum description length (MDL) principle (a formal version of the Ockham Razor). The argument consists in using linguistic and computational evidence in setting up a formal model, and then applying the MDL principle to prove its superiority with respect to alternative models. We show that construction-based representations are at least an order of magnitude more compact that the corresponding lexicalized representations of the same linguistic data. The result is significant for our understanding of the relationship between syntax and semantics, and consequently for choosing NLP architectures. For instance, whether the processing should proceed in a pipeline from syntax to semantics to pragmatics, and whether all linguistic information should be combined in a set of constraints. From a broader perspective, this paper does not only argue for a certain model of processing, but also provides a methodology for determining advantages of different approaches to NLP.<|reference_end|> | arxiv | @article{zadrozny1995the,
title={The Compactness of Construction Grammars},
author={Wlodek Zadrozny (IBM Research, T. J. Watson Research Center)},
journal={arXiv preprint arXiv:cmp-lg/9505031},
year={1995},
number={IBM Research Report RC 20003 (88493)},
archivePrefix={arXiv},
eprint={cmp-lg/9505031},
primaryClass={cmp-lg cs.CL}
} | zadrozny1995the |
arxiv-668643 | cmp-lg/9505032 | Context and ontology in understanding of dialogs | <|reference_start|>Context and ontology in understanding of dialogs: We present a model of NLP in which ontology and context are directly included in a grammar. The model is based on the concept of {\em construction}, consisting of a set of features of form, a set of semantic and pragmatic conditions describing its application context, and a description of its meaning. In this model ontology is embedded into the grammar; e.g. the hierarchy of {\it np} constructions is based on the corresponding ontology. Ontology is also used in defining contextual parameters; e.g. $\left[ current\_question \ time(\_) \right] $. A parser based on this model allowed us to build a set of dialog understanding systems that include an on-line calendar, a banking machine, and an insurance quote system. The proposed approach is an alternative to the standard "pipeline" design of morphology-syntax-semantics-pragmatics; the account of meaning conforms to our intuitions about compositionality, but there is no homomorphism from syntax to semantics.<|reference_end|> | arxiv | @article{zadrozny1995context,
title={Context and ontology in understanding of dialogs},
author={Wlodek Zadrozny (IBM Research, T. J. Watson Research Center)},
journal={arXiv preprint arXiv:cmp-lg/9505032},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505032},
primaryClass={cmp-lg cs.CL}
} | zadrozny1995context |
arxiv-668644 | cmp-lg/9505033 | User-Defined Nonmonotonicity in Unification-Based Formalisms | <|reference_start|>User-Defined Nonmonotonicity in Unification-Based Formalisms: A common feature of recent unification-based grammar formalisms is that they give the user the ability to define his own structures. However, this possibility is mostly limited and does not include nonmonotonic operations. In this paper we show how nonmonotonic operations can also be user-defined by applying default logic (Reiter 1980) and generalizing previous results on nonmonotonic sorts (Young & Rounds 1993).<|reference_end|> | arxiv | @article{stromback1995user-defined,
title={User-Defined Nonmonotonicity in Unification-Based Formalisms},
author={Lena Stromback (Department of Computer and Information Science
Linkoping University, Sweden)},
journal={ACL-95},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505033},
primaryClass={cmp-lg cs.CL}
} | stromback1995user-defined |
arxiv-668645 | cmp-lg/9505034 | Semantic Ambiguity and Perceived Ambiguity | <|reference_start|>Semantic Ambiguity and Perceived Ambiguity: I explore some of the issues that arise when trying to establish a connection between the underspecification hypothesis pursued in the NLP literature and work on ambiguity in semantics and in the psychological literature. A theory of underspecification is developed `from the first principles', i.e., starting from a definition of what it means for a sentence to be semantically ambiguous and from what we know about the way humans deal with ambiguity. An underspecified language is specified as the translation language of a grammar covering sentences that display three classes of semantic ambiguity: lexical ambiguity, scopal ambiguity, and referential ambiguity. The expressions of this language denote sets of senses. A formalization of defeasible reasoning with underspecified representations is presented, based on Default Logic. Some issues to be confronted by such a formalization are discussed.<|reference_end|> | arxiv | @article{poesio1995semantic,
title={Semantic Ambiguity and Perceived Ambiguity},
author={Massimo Poesio (University of Edinburgh, Centre for Cognitive Science)},
journal={K. van Deemter and S. Peters (eds.), Semantic ambiguity and
Underspecification, CSLI, 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505034},
primaryClass={cmp-lg cs.CL}
} | poesio1995semantic |
arxiv-668646 | cmp-lg/9505035 | Development of a Spanish Version of the Xerox Tagger | <|reference_start|>Development of a Spanish Version of the Xerox Tagger: This paper describes work performed withing the CRATER ({\em C}orpus {\em R}esources {\em A}nd {\em T}erminology {\em E}xt{\em R}action, MLAP-93/20) project, funded by the Commission of the European Communities. In particular, it addresses the issue of adapting the Xerox Tagger to Spanish in order to tag the Spanish version of the ITU (International Telecommunications Union) corpus. The model implemented by this tagger is briefly presented along with some modifications performed on it in order to use some parameters not probabilistically estimated. Initial decisions, like the tagset, the lexicon and the training corpus are also discussed. Finally, results are presented and the benefits of the {\em mixed model} justified.<|reference_end|> | arxiv | @article{león1995development,
title={Development of a Spanish Version of the Xerox Tagger},
author={Fernando S'anchez Le'on (Laboratorio de Ling"u'istica
Inform'atica, Facultad de Filosof'ia y Letras, Universidad Aut'onoma de
Madrid), and Amalio F. Nieto Serrano (Departamento de Ingenier'ia de
Sistemas Telem'aticos, Escuela Superior de Ingenieros de Telecomunicaciones,
Universidad Polit'ecnica de Madrid)},
journal={arXiv preprint arXiv:cmp-lg/9505035},
year={1995},
number={CRATER/WP6/FR1},
archivePrefix={arXiv},
eprint={cmp-lg/9505035},
primaryClass={cmp-lg cs.CL}
} | león1995development |
arxiv-668647 | cmp-lg/9505036 | Integrating Gricean and Attentional Constraints | <|reference_start|>Integrating Gricean and Attentional Constraints: This paper concerns how to generate and understand discourse anaphoric noun phrases. I present the results of an analysis of all discourse anaphoric noun phrases (N=1,233) in a corpus of ten narrative monologues, where the choice between a definite pronoun or phrasal NP conforms largely to Gricean constraints on informativeness. I discuss Dale and Reiter's [To appear] recent model and show how it can be augmented for understanding as well as generating the range of data presented here. I argue that integrating centering [Grosz et al., 1983] [Kameyama, 1985] with this model can be applied uniformly to discourse anaphoric pronouns and phrasal NPs. I conclude with a hypothesis for addressing the interaction between local and global discourse processing.<|reference_end|> | arxiv | @article{passonneau1995integrating,
title={Integrating Gricean and Attentional Constraints},
author={Rebecca J. Passonneau (Bellcore)},
journal={arXiv preprint arXiv:cmp-lg/9505036},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505036},
primaryClass={cmp-lg cs.CL}
} | passonneau1995integrating |
arxiv-668648 | cmp-lg/9505037 | Identifying Word Translations in Non-Parallel Texts | <|reference_start|>Identifying Word Translations in Non-Parallel Texts: Common algorithms for sentence and word-alignment allow the automatic identification of word translations from parallel texts. This study suggests that the identification of word translations should also be possible with non-parallel and even unrelated texts. The method proposed is based on the assumption that there is a correlation between the patterns of word co-occurrences in texts of different languages.<|reference_end|> | arxiv | @article{rapp1995identifying,
title={Identifying Word Translations in Non-Parallel Texts},
author={Reinhard Rapp (ISSCO, University of Geneva)},
journal={Proceedings of ACL-95},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505037},
primaryClass={cmp-lg cs.CL}
} | rapp1995identifying |
arxiv-668649 | cmp-lg/9505038 | Ubiquitous Talker: Spoken Language Interaction with Real World Objects | <|reference_start|>Ubiquitous Talker: Spoken Language Interaction with Real World Objects: Augmented reality is a research area that tries to embody an electronic information space within the real world, through computational devices. A crucial issue within this area, is the recognition of real world objects or situations. In natural language processing, it is much easier to determine interpretations of utterances, even if they are ill-formed, when the context or situation is fixed. We therefore introduce robust, natural language processing into a system of augmented reality with situation awareness. Based on this idea, we have developed a portable system, called the Ubiquitous Talker. This consists of an LCD display that reflects the scene at which a user is looking as if it is a transparent glass, a CCD camera for recognizing real world objects with color-bar ID codes, a microphone for recognizing a human voice and a speaker which outputs a synthesized voice. The Ubiquitous Talker provides its user with some information related to a recognized object, by using the display and voice. It also accepts requests or questions as voice inputs. The user feels as if he/she is talking with the object itself through the system.<|reference_end|> | arxiv | @article{nagao1995ubiquitous,
title={Ubiquitous Talker: Spoken Language Interaction with Real World Objects},
author={Katashi Nagao and Jun Rekimoto (Sony Computer Science Laboratory Inc.)},
journal={arXiv preprint arXiv:cmp-lg/9505038},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505038},
primaryClass={cmp-lg cs.CL}
} | nagao1995ubiquitous |
arxiv-668650 | cmp-lg/9505039 | Generating efficient belief models for task-oriented dialogues | <|reference_start|>Generating efficient belief models for task-oriented dialogues: We have shown that belief modelling for dialogue can be simplified if the assumption is made that the participants are cooperating, i.e., they are not committed to any goals requiring deception. In such domains, there is no need to maintain individual representations of deeply nested beliefs; instead, three specific types of belief can be used to summarize all the states of nested belief that can exist about a domain entity. Here, we set out to design a ``compiler'' for belief models. This system will accept as input a description of agents' interactions with a task domain expressed in a fully-expressive belief logic with non-monotonic and temporal extensions. It generates an operational belief model for use in that domain, sufficient for the requirements of cooperative dialogue, including the negotiation of complex domain plans. The compiled model incorporates the belief simplification mentioned above, and also uses a simplified temporal logic of belief based on the restricted circumstances under which beliefs can change. We shall review the motivation for creating such a system, and introduce a general procedure for taking a logical specification for a domain and procesing it into an operational model. We shall then discuss the specific changes that are made during this procedure for limiting the level of abstraction at which the concepts of belief nesting, default reasoning and time are expressed. Finally we shall go through a worked example relating to the Map Task, a simple cooperative problem-solving exercise.<|reference_end|> | arxiv | @article{taylor1995generating,
title={Generating efficient belief models for task-oriented dialogues},
author={Jasper Taylor (Human Communication Research Centre University of
Edinburgh)},
journal={arXiv preprint arXiv:cmp-lg/9505039},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505039},
primaryClass={cmp-lg cs.CL}
} | taylor1995generating |
arxiv-668651 | cmp-lg/9505040 | Text Chunking using Transformation-Based Learning | <|reference_start|>Text Chunking using Transformation-Based Learning: Eric Brill introduced transformation-based learning and showed that it can do part-of-speech tagging with fairly high accuracy. The same method can be applied at a higher level of textual interpretation for locating chunks in the tagged text, including non-recursive ``baseNP'' chunks. For this purpose, it is convenient to view chunking as a tagging problem by encoding the chunk structure in new tags attached to each word. In automatic tests using Treebank-derived data, this technique achieved recall and precision rates of roughly 92% for baseNP chunks and 88% for somewhat more complex chunks that partition the sentence. Some interesting adaptations to the transformation-based learning approach are also suggested by this application.<|reference_end|> | arxiv | @article{ramshaw1995text,
title={Text Chunking using Transformation-Based Learning},
author={Lance A. Ramshaw (Bowdoin College) and Mitchell P. Marcus (University
of Pennsylvania)},
journal={ACL Third Workshop on Very Large Corpora, June 1995, pp. 82-94},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505040},
primaryClass={cmp-lg cs.CL}
} | ramshaw1995text |
arxiv-668652 | cmp-lg/9505041 | On Descriptive Complexity, Language Complexity, and GB | <|reference_start|>On Descriptive Complexity, Language Complexity, and GB: We introduce $L^2_{K,P}$, a monadic second-order language for reasoning about trees which characterizes the strongly Context-Free Languages in the sense that a set of finite trees is definable in $L^2_{K,P}$ iff it is (modulo a projection) a Local Set---the set of derivation trees generated by a CFG. This provides a flexible approach to establishing language-theoretic complexity results for formalisms that are based on systems of well-formedness constraints on trees. We demonstrate this technique by sketching two such results for Government and Binding Theory. First, we show that {\em free-indexation\/}, the mechanism assumed to mediate a variety of agreement and binding relationships in GB, is not definable in $L^2_{K,P}$ and therefore not enforcible by CFGs. Second, we show how, in spite of this limitation, a reasonably complete GB account of English can be defined in $L^2_{K,P}$. Consequently, the language licensed by that account is strongly context-free. We illustrate some of the issues involved in establishing this result by looking at the definition, in $L^2_{K,P}$, of chains. The limitations of this definition provide some insight into the types of natural linguistic principles that correspond to higher levels of language complexity. We close with some speculation on the possible significance of these results for generative linguistics.<|reference_end|> | arxiv | @article{rogers1995on,
title={On Descriptive Complexity, Language Complexity, and GB},
author={James Rogers (Institute for Research in Cognitive Science, University
of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9505041},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505041},
primaryClass={cmp-lg cs.CL}
} | rogers1995on |
arxiv-668653 | cmp-lg/9505042 | Robust Parsing Based on Discourse Information: Completing partial parses of ill-formed sentences on the basis of discourse information | <|reference_start|>Robust Parsing Based on Discourse Information: Completing partial parses of ill-formed sentences on the basis of discourse information: In a consistent text, many words and phrases are repeatedly used in more than one sentence. When an identical phrase (a set of consecutive words) is repeated in different sentences, the constituent words of those sentences tend to be associated in identical modification patterns with identical parts of speech and identical modifiee-modifier relationships. Thus, when a syntactic parser cannot parse a sentence as a unified structure, parts of speech and modifiee-modifier relationships among morphologically identical words in complete parses of other sentences within the same text provide useful information for obtaining partial parses of the sentence. In this paper, we describe a method for completing partial parses by maintaining consistency among morphologically identical words within the same text as regards their part of speech and their modifiee-modifier relationship. The experimental results obtained by using this method with technical documents offer good prospects for improving the accuracy of sentence analysis in a broad-coverage natural language processing system such as a machine translation system.<|reference_end|> | arxiv | @article{nasukawa1995robust,
title={Robust Parsing Based on Discourse Information: Completing partial parses
of ill-formed sentences on the basis of discourse information},
author={Tetsuya Nasukawa (IBM Research, Tokyo Research Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9505042},
year={1995},
number={TRL-4372},
archivePrefix={arXiv},
eprint={cmp-lg/9505042},
primaryClass={cmp-lg cs.CL}
} | nasukawa1995robust |
arxiv-668654 | cmp-lg/9505043 | Using Decision Trees for Coreference Resolution | <|reference_start|>Using Decision Trees for Coreference Resolution: This paper describes RESOLVE, a system that uses decision trees to learn how to classify coreferent phrases in the domain of business joint ventures. An experiment is presented in which the performance of RESOLVE is compared to the performance of a manually engineered set of rules for the same task. The results show that decision trees achieve higher performance than the rules in two of three evaluation metrics developed for the coreference task. In addition to achieving better performance than the rules, RESOLVE provides a framework that facilitates the exploration of the types of knowledge that are useful for solving the coreference problem.<|reference_end|> | arxiv | @article{mccarthy1995using,
title={Using Decision Trees for Coreference Resolution},
author={Joseph F. McCarthy and Wendy G. Lehnert (University of Massachusetts)},
journal={arXiv preprint arXiv:cmp-lg/9505043},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505043},
primaryClass={cmp-lg cs.CL}
} | mccarthy1995using |
arxiv-668655 | cmp-lg/9505044 | Automatic Evaluation and Uniform Filter Cascades for Inducing N-Best Translation Lexicons | <|reference_start|>Automatic Evaluation and Uniform Filter Cascades for Inducing N-Best Translation Lexicons: This paper shows how to induce an N-best translation lexicon from a bilingual text corpus using statistical properties of the corpus together with four external knowledge sources. The knowledge sources are cast as filters, so that any subset of them can be cascaded in a uniform framework. A new objective evaluation measure is used to compare the quality of lexicons induced with different filter cascades. The best filter cascades improve lexicon quality by up to 137% over the plain vanilla statistical method, and approach human performance. Drastically reducing the size of the training corpus has a much smaller impact on lexicon quality when these knowledge sources are used. This makes it practical to train on small hand-built corpora for language pairs where large bilingual corpora are unavailable. Moreover, three of the four filters prove useful even when used with large training corpora.<|reference_end|> | arxiv | @article{melamed1995automatic,
title={Automatic Evaluation and Uniform Filter Cascades for Inducing N-Best
Translation Lexicons},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9505044},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9505044},
primaryClass={cmp-lg cs.CL}
} | melamed1995automatic |
arxiv-668656 | cmp-lg/9505045 | Hybrid Transfer in an English-French Spoken Language Translator | <|reference_start|>Hybrid Transfer in an English-French Spoken Language Translator: The paper argues the importance of high-quality translation for spoken language translation systems. It describes an architecture suitable for rapid development of high-quality limited-domain translation systems, which has been implemented within an advanced prototype English to French spoken language translator. The focus of the paper is the hybrid transfer model which combines unification-based rules and a set of trainable statistical preferences; roughly, rules encode domain-independent grammatical information and preferences encode domain-dependent distributional information. The preferences are trained from sets of examples produced by the system, which have been annotated by human judges as correct or incorrect. An experiment is described in which the model was tested on a 2000 utterance sample of previously unseen data.<|reference_end|> | arxiv | @article{rayner1995hybrid,
title={Hybrid Transfer in an English-French Spoken Language Translator},
author={Manny Rayner (SRI International, Cambridge, UK), Pierrette Bouillon
(ISSCO, Geneva, Switzerland)},
journal={arXiv preprint arXiv:cmp-lg/9505045},
year={1995},
number={CRC-056; see http://www.cam.sri.com/},
archivePrefix={arXiv},
eprint={cmp-lg/9505045},
primaryClass={cmp-lg cs.CL}
} | rayner1995hybrid |
arxiv-668657 | cmp-lg/9506001 | Ma(r)king concessions in English and German | <|reference_start|>Ma(r)king concessions in English and German: In order to generate cohesive discourse, many of the relations holding between text segments need to be signalled to the reader by means of cue words, or {\em discourse markers}. Programs usually do this in a simplistic way, e.g., by using one marker per relation. In reality, however, language offers a very wide range of markers from which informed choices should be made. In order to account for the variety and to identify the parameters governing the choices, detailled linguistic analyses are necessary. We worked with one area of discourse relations, the Concession family, identified its underlying pragmatics and semantics, and undertook extensive corpus studies to examine the range of markers used in both English and German. On the basis of an initial classification of these markers, we propose a generation model for producing bilingual text that can incorporate marker choice into its overall decision framework.<|reference_end|> | arxiv | @article{grote1995ma(r)king,
title={Ma(r)king concessions in English and German},
author={Brigitte Grote (FAW Ulm), Nils Lenke (Universitaet Duisburg), Manfred
Stede (FAW Ulm and University of Toronto)},
journal={Proc. of the 6th European Workshop on Natural Language Generation,
NL-Leiden, May 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506001},
primaryClass={cmp-lg cs.CL}
} | grote1995ma(r)king |
arxiv-668658 | cmp-lg/9506002 | Weak subsumption Constraints for Type Diagnosis: An Incremental Algorithm | <|reference_start|>Weak subsumption Constraints for Type Diagnosis: An Incremental Algorithm: We introduce constraints necessary for type checking a higher-order concurrent constraint language, and solve them with an incremental algorithm. Our constraint system extends rational unification by constraints x$\subseteq$ y saying that ``$x$ has at least the structure of $y$'', modelled by a weak instance relation between trees. This notion of instance has been carefully chosen to be weaker than the usual one which renders semi-unification undecidable. Semi-unification has more than once served to link unification problems arising from type inference and those considered in computational linguistics. Just as polymorphic recursion corresponds to subsumption through the semi-unification problem, our type constraint problem corresponds to weak subsumption of feature graphs in linguistics. The decidability problem for \WhatsIt for feature graphs has been settled by D\"orre~\cite{Doerre:WeakSubsumption:94}. \nocite{RuppRosnerJohnson:94} In contrast to D\"orre's, our algorithm is fully incremental and does not refer to finite state automata. Our algorithm also is a lot more flexible. It allows a number of extensions (records, sorts, disjunctive types, type declarations, and others) which make it suitable for type inference of a full-fledged programming language.<|reference_end|> | arxiv | @article{mueller1995weak,
title={Weak subsumption Constraints for Type Diagnosis: An Incremental
Algorithm},
author={Martin Mueller, Joachim Niehren (German Research Center for Artificial
Intelligence (DFKI), Saarbr"ucken, mmueller)},
journal={arXiv preprint arXiv:cmp-lg/9506002},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506002},
primaryClass={cmp-lg cs.CL}
} | mueller1995weak |
arxiv-668659 | cmp-lg/9506003 | Syllable parsing in English and French | <|reference_start|>Syllable parsing in English and French: In this paper I argue that Optimality Theory provides for an explanatory model of syllabic parsing in English and French. The argument is based on psycholinguistic facts that have been mysterious up to now. This argument is further buttressed by the computational implementation developed here. This model is important for several reasons. First, it provides a demonstration of how OT can be used in a performance domain. Second, it suggests a new relationship between phonological theory and psycholinguistics. (Code in Perl is included and a WWW-interface is running at http://mayo.douglass.arizona.edu.)<|reference_end|> | arxiv | @article{hammond1995syllable,
title={Syllable parsing in English and French},
author={Michael Hammond (University of Arizona)},
journal={arXiv preprint arXiv:cmp-lg/9506003},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506003},
primaryClass={cmp-lg cs.CL}
} | hammond1995syllable |
arxiv-668660 | cmp-lg/9506004 | Using Higher-Order Logic Programming for Semantic Interpretation of Coordinate Constructs | <|reference_start|>Using Higher-Order Logic Programming for Semantic Interpretation of Coordinate Constructs: Many theories of semantic interpretation use lambda-term manipulation to compositionally compute the meaning of a sentence. These theories are usually implemented in a language such as Prolog that can simulate lambda-term operations with first-order unification. However, for some interesting cases, such as a Combinatory Categorial Grammar account of coordination constructs, this can only be done by obscuring the underlying linguistic theory with the ``tricks'' needed for implementation. This paper shows how the use of abstract syntax permitted by higher-order logic programming allows an elegant implementation of the semantics of Combinatory Categorial Grammar, including its handling of coordination constructs.<|reference_end|> | arxiv | @article{kulick1995using,
title={Using Higher-Order Logic Programming for Semantic Interpretation of
Coordinate Constructs},
author={Seth Kulick (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9506004},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506004},
primaryClass={cmp-lg cs.CL}
} | kulick1995using |
arxiv-668661 | cmp-lg/9506005 | A Support Tool for Tagset Mapping | <|reference_start|>A Support Tool for Tagset Mapping: Many different tagsets are used in existing corpora; these tagsets vary according to the objectives of specific projects (which may be as far apart as robust parsing vs. spelling correction). In many situations, however, one would like to have uniform access to the linguistic information encoded in corpus annotations without having to know the classification schemes in detail. This paper describes a tool which maps unstructured morphosyntactic tags to a constraint-based, typed, configurable specification language, a ``standard tagset''. The mapping relies on a manually written set of mapping rules, which is automatically checked for consistency. In certain cases, unsharp mappings are unavoidable, and noise, i.e. groups of word forms {\sl not} conforming to the specification, will appear in the output of the mapping. The system automatically detects such noise and informs the user about it. The tool has been tested with rules for the UPenn tagset \cite{up} and the SUSANNE tagset \cite{garside}, in the framework of the EAGLES\footnote{LRE project EAGLES, cf. \cite{eagles}.} validation phase for standardised tagsets for European languages.<|reference_end|> | arxiv | @article{teufel1995a,
title={A Support Tool for Tagset Mapping},
author={Simone Teufel (IMS-CL, University of Stuttgart)},
journal={arXiv preprint arXiv:cmp-lg/9506005},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506005},
primaryClass={cmp-lg cs.CL}
} | teufel1995a |
arxiv-668662 | cmp-lg/9506006 | Automatic Extraction of Tagset Mappings from Parallel-Annotated Corpora | <|reference_start|>Automatic Extraction of Tagset Mappings from Parallel-Annotated Corpora: This paper describes some of the recent work of project AMALGAM (automatic mapping among lexico-grammatical annotation models). We are investigating ways to map between the leading corpus annotation schemes in order to improve their resuability. Collation of all the included corpora into a single large annotated corpus will provide a more detailed language model to be developed for tasks such as speech and handwriting recognition. In particular, we focus here on a method of extracting mappings from corpora that have been annotated according to more than one annotation scheme.<|reference_end|> | arxiv | @article{hughes1995automatic,
title={Automatic Extraction of Tagset Mappings from Parallel-Annotated Corpora},
author={John Hughes, Clive Souter and Eric Atwell (University of Leeds, UK)},
journal={arXiv preprint arXiv:cmp-lg/9506006},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506006},
primaryClass={cmp-lg cs.CL}
} | hughes1995automatic |
arxiv-668663 | cmp-lg/9506007 | Features and Agreement | <|reference_start|>Features and Agreement: This paper compares the consistency-based account of agreement phenomena in `unification-based' grammars with an implication-based account based on a simple feature extension to Lambek Categorial Grammar (LCG). We show that the LCG treatment accounts for constructions that have been recognized as problematic for `unification-based' treatments.<|reference_end|> | arxiv | @article{bayer1995features,
title={Features and Agreement},
author={Sam Bayer and Mark Johnson (Brown University)},
journal={arXiv preprint arXiv:cmp-lg/9506007},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506007},
primaryClass={cmp-lg cs.CL}
} | bayer1995features |
arxiv-668664 | cmp-lg/9506008 | CLiFF Notes: Research in the Language, Information and Computation Laboratory of the University of Pennsylvania | <|reference_start|>CLiFF Notes: Research in the Language, Information and Computation Laboratory of the University of Pennsylvania: Short abstracts by computational linguistics researchers at the University of Pennsylvania describing ongoing individual and joint projects.<|reference_end|> | arxiv | @article{editors1995cliff,
title={CLiFF Notes: Research in the Language, Information and Computation
Laboratory of the University of Pennsylvania},
author={Editors: Matthew Stone and Libby Levison},
journal={arXiv preprint arXiv:cmp-lg/9506008},
year={1995},
number={Technical Report CIS 95-07},
archivePrefix={arXiv},
eprint={cmp-lg/9506008},
primaryClass={cmp-lg cs.CL}
} | editors1995cliff |
arxiv-668665 | cmp-lg/9506009 | Filling Knowledge Gaps in a Broad-Coverage Machine Translation System | <|reference_start|>Filling Knowledge Gaps in a Broad-Coverage Machine Translation System: Knowledge-based machine translation (KBMT) techniques yield high quality in domains with detailed semantic models, limited vocabulary, and controlled input grammar. Scaling up along these dimensions means acquiring large knowledge resources. It also means behaving reasonably when definitive knowledge is not yet available. This paper describes how we can fill various KBMT knowledge gaps, often using robust statistical techniques. We describe quantitative and qualitative results from JAPANGLOSS, a broad-coverage Japanese-English MT system.<|reference_end|> | arxiv | @article{knight1995filling,
title={Filling Knowledge Gaps in a Broad-Coverage Machine Translation System},
author={Kevin Knight, Ishwar Chander, Matthew Haines, Vasileios
Hatzivassiloglou, Eduard Hovy, Masayo Iida, Steve K. Luk, Richard Whitney,
Kenji Yamada (USC/Information Sciences Institute)},
journal={arXiv preprint arXiv:cmp-lg/9506009},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506009},
primaryClass={cmp-lg cs.CL}
} | knight1995filling |
arxiv-668666 | cmp-lg/9506010 | Two-level, Many-Paths Generation | <|reference_start|>Two-level, Many-Paths Generation: Large-scale natural language generation requires the integration of vast amounts of knowledge: lexical, grammatical, and conceptual. A robust generator must be able to operate well even when pieces of knowledge are missing. It must also be robust against incomplete or inaccurate inputs. To attack these problems, we have built a hybrid generator, in which gaps in symbolic knowledge are filled by statistical methods. We describe algorithms and show experimental results. We also discuss how the hybrid generation model can be used to simplify current generators and enhance their portability, even when perfect knowledge is in principle obtainable.<|reference_end|> | arxiv | @article{knight1995two-level,,
title={Two-level, Many-Paths Generation},
author={Kevin Knight (USC/Information Sciences Institute) and Vasileios
Hatzivassiloglou (Columbia University)},
journal={arXiv preprint arXiv:cmp-lg/9506010},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506010},
primaryClass={cmp-lg cs.CL}
} | knight1995two-level, |
arxiv-668667 | cmp-lg/9506011 | Unification-Based Glossing | <|reference_start|>Unification-Based Glossing: We present an approach to syntax-based machine translation that combines unification-style interpretation with statistical processing. This approach enables us to translate any Japanese newspaper article into English, with quality far better than a word-for-word translation. Novel ideas include the use of feature structures to encode word lattices and the use of unification to compose and manipulate lattices. Unification also allows us to specify abstract features that delay target-language synthesis until enough source-language information is assembled. Our statistical component enables us to search efficiently among competing translations and locate those with high English fluency.<|reference_end|> | arxiv | @article{hatzivassiloglou1995unification-based,
title={Unification-Based Glossing},
author={Vasileios Hatzivassiloglou (Columbia University) and Kevin Knight
(USC/Information Sciences Institute)},
journal={arXiv preprint arXiv:cmp-lg/9506011},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506011},
primaryClass={cmp-lg cs.CL}
} | hatzivassiloglou1995unification-based |
arxiv-668668 | cmp-lg/9506012 | Presenting Punctuation | <|reference_start|>Presenting Punctuation: Until recently, punctuation has received very little attention in the linguistics and computational linguistics literature. Since the publication of Nunberg's (1990) monograph on the topic, however, punctuation has seen its stock begin to rise: spurred in part by Nunberg's ground-breaking work, a number of valuable inquiries have been subsequently undertaken, including Hovy and Arens (1991), Dale (1991), Pascual (1993), Jones (1994), and Briscoe (1994). Continuing this line of research, I investigate in this paper how Nunberg's approach to presenting punctuation (and other formatting devices) might be incorporated into NLG systems. Insofar as the present paper focuses on the proper syntactic treatment of punctuation, it differs from these other subsequent works in that it is the first to examine this issue from the generation perspective.<|reference_end|> | arxiv | @article{white1995presenting,
title={Presenting Punctuation},
author={Michael White (CoGenTex, Inc.)},
journal={In Proceedings of the Fifth European Workshop on Natural Language
Generation, Leiden, The Netherlands, pp. 107--125.},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506012},
primaryClass={cmp-lg cs.CL}
} | white1995presenting |
arxiv-668669 | cmp-lg/9506013 | A Study of the Context(s) in a Specific Type of Texts: Car Accident Reports | <|reference_start|>A Study of the Context(s) in a Specific Type of Texts: Car Accident Reports: This paper addresses the issue of defining context, and more specifically the different contexts needed for understanding a particular type of texts. The corpus chosen is homogeneous and allows us to determine characteristic properties of the texts from which certain inferences can be drawn by the reader. These characteristic properties come from the real world domain (K-context), the type of events the texts describe (F-context) and the genre of the texts (E-context). Together, these three contexts provide elements for the resolution of anaphoric expressions and for several types of disambiguation. We show in particular that the argumentation aspect of these texts is an essential part of the context and explains some of the inferences that can be drawn.<|reference_end|> | arxiv | @article{estival1995a,
title={A Study of the Context(s) in a Specific Type of Texts: Car Accident
Reports},
author={Dominique Estival (ISSCO, Universite de Geneve), Francoise Gayral
(LIPN, Universite Paris-Nord)},
journal={arXiv preprint arXiv:cmp-lg/9506013},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506013},
primaryClass={cmp-lg cs.CL}
} | estival1995a |
arxiv-668670 | cmp-lg/9506014 | Inducing Features of Random Fields | <|reference_start|>Inducing Features of Random Fields: We present a technique for constructing random fields from a set of training samples. The learning paradigm builds increasingly complex fields by allowing potential functions, or features, that are supported by increasingly large subgraphs. Each feature has a weight that is trained by minimizing the Kullback-Leibler divergence between the model and the empirical distribution of the training data. A greedy algorithm determines how features are incrementally added to the field and an iterative scaling algorithm is used to estimate the optimal values of the weights. The statistical modeling techniques introduced in this paper differ from those common to much of the natural language processing literature since there is no probabilistic finite state or push-down automaton on which the model is built. Our approach also differs from the techniques common to the computer vision literature in that the underlying random fields are non-Markovian and have a large number of parameters that must be estimated. Relations to other learning approaches including decision trees and Boltzmann machines are given. As a demonstration of the method, we describe its application to the problem of automatic word classification in natural language processing. Key words: random field, Kullback-Leibler divergence, iterative scaling, divergence geometry, maximum entropy, EM algorithm, statistical learning, clustering, word morphology, natural language processing<|reference_end|> | arxiv | @article{della pietra1995inducing,
title={Inducing Features of Random Fields},
author={S. Della Pietra, V. Della Pietra (IBM), and J. Lafferty (CMU)},
journal={arXiv preprint arXiv:cmp-lg/9506014},
year={1995},
number={CMU-CS-95-144},
archivePrefix={arXiv},
eprint={cmp-lg/9506014},
primaryClass={cmp-lg cs.CL}
} | della pietra1995inducing |
arxiv-668671 | cmp-lg/9506015 | Ambiguity in the Acquisition of Lexical Information | <|reference_start|>Ambiguity in the Acquisition of Lexical Information: This paper describes an approach to the automatic identification of lexical information in on-line dictionaries. This approach uses bootstrapping techniques, specifically so that ambiguity in the dictionary text can be treated properly. This approach consists of processing an on-line dictionary multiple times, each time refining the lexical information previously acquired and identifying new lexical information. The strength of this approach is that lexical information can be acquired from definitions which are syntactically ambiguous, given that information acquired during the first pass can be used to improve the syntactic analysis of definitions in subsequent passes. In the context of a lexical knowledge base, the types of lexical information that need to be represented cannot be viewed as a fixed set, but rather as a set that will change given the resources of the lexical knowledge base and the requirements of analysis systems which access it.<|reference_end|> | arxiv | @article{vanderwende1995ambiguity,
title={Ambiguity in the Acquisition of Lexical Information},
author={Lucy Vanderwende},
journal={arXiv preprint arXiv:cmp-lg/9506015},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506015},
primaryClass={cmp-lg cs.CL}
} | vanderwende1995ambiguity |
arxiv-668672 | cmp-lg/9506016 | Indefeasible Semantics and Defeasible Pragmatics | <|reference_start|>Indefeasible Semantics and Defeasible Pragmatics: An account of utterance interpretation in discourse needs to face the issue of how the discourse context controls the space of interacting preferences. Assuming a discourse processing architecture that distinguishes the grammar and pragmatics subsystems in terms of monotonic and nonmonotonic inferences, I will discuss how independently motivated default preferences interact in the interpretation of intersentential pronominal anaphora. In the framework of a general discourse processing model that integrates both the grammar and pragmatics subsystems, I will propose a fine structure of the preferential interpretation in pragmatics in terms of defeasible rule interactions. The pronoun interpretation preferences that serve as the empirical ground draw from the survey data specifically obtained for the present purpose.<|reference_end|> | arxiv | @article{kameyama1995indefeasible,
title={Indefeasible Semantics and Defeasible Pragmatics},
author={Megumi Kameyama (AI Center and CSLI, SRI International)},
journal={arXiv preprint arXiv:cmp-lg/9506016},
year={1995},
number={This is a revised and shorter version of CWI Report CS-R9441 and SRI
Technical Note 544, 1994 (these reports contain an additional section on
prioritized circumscription -- available from web page
http://www.ai.sri.com/~megumi/ )},
archivePrefix={arXiv},
eprint={cmp-lg/9506016},
primaryClass={cmp-lg cs.CL}
} | kameyama1995indefeasible |
arxiv-668673 | cmp-lg/9506017 | The Effect of Pitch Accenting on Pronoun Referent Resolution | <|reference_start|>The Effect of Pitch Accenting on Pronoun Referent Resolution: By strictest interpretation, theories of both centering and intonational meaning fail to predict the existence of pitch accented pronominals. Yet they occur felicitously in spoken discourse. To explain this, I emphasize the dual functions served by pitch accents, as markers of both propositional (semantic/pragmatic) and attentional salience. This distinction underlies my proposals about the attentional consequences of pitch accents when applied to pronominals, in particular, that while most pitch accents may weaken or reinforce a cospecifier's status as the center of attention, a contrastively stressed pronominal may force a shift, even when contraindicated by textual features.<|reference_end|> | arxiv | @article{cahn1995the,
title={The Effect of Pitch Accenting on Pronoun Referent Resolution},
author={Janet Cahn (Massachusetts Institute of Technology)},
journal={Proceedings of the ACL, 1995.},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506017},
primaryClass={cmp-lg cs.CL}
} | cahn1995the |
arxiv-668674 | cmp-lg/9506018 | Intelligent Voice Prosthesis: Converting Icons into Natural Language Sentences | <|reference_start|>Intelligent Voice Prosthesis: Converting Icons into Natural Language Sentences: The Intelligent Voice Prosthesis is a communication tool which reconstructs the meaning of an ill-structured sequence of icons or symbols, and expresses this meaning into sentences of a Natural Language (French). It has been developed for the use of people who cannot express themselves orally in natural language, and further, who are not able to comply to grammatical rules such as those of natural language. We describe how available corpora of iconic communication by children with Cerebral Palsy has led us to implement a simple and relevant semantic description of the symbol lexicon. We then show how a unification-based, bottom-up semantic analysis allows the system to uncover the meaning of the user's utterances by computing proper dependencies between the symbols. The result of the analysis is then passed to a lexicalization module which chooses the right words of natural language to use, and builds a linguistic semantic network. This semantic network is then generated into French sentences via hierarchization into trees, using a lexicalized Tree Adjoining Grammar. Finally we describe the modular, customizable interface which has been developed for this system.<|reference_end|> | arxiv | @article{vaillant1995intelligent,
title={Intelligent Voice Prosthesis: Converting Icons into Natural Language
Sentences},
author={Pascal Vaillant (Thomson-CSF LCR), Michael Checler (ESIEA /
Thomson-CSF LCR)},
journal={arXiv preprint arXiv:cmp-lg/9506018},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506018},
primaryClass={cmp-lg cs.CL}
} | vaillant1995intelligent |
arxiv-668675 | cmp-lg/9506019 | Review of Charniak's "Statistical Language Learning" | <|reference_start|>Review of Charniak's "Statistical Language Learning": This article is an in-depth review of Eugene Charniak's book, "Statistical Language Learning". The review evaluates the appropriateness of the book as an introductory text for statistical language learning for a variety of audiences. It also includes an extensive bibliography of articles and papers which might be used as a supplement to this book for learning or teaching statistical language modeling.<|reference_end|> | arxiv | @article{magerman1995review,
title={Review of Charniak's "Statistical Language Learning"},
author={David M. Magerman},
journal={Computational Linguistics 21:1, 103-111},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506019},
primaryClass={cmp-lg cs.CL}
} | magerman1995review |
arxiv-668676 | cmp-lg/9506020 | GLR-Parsing of Word Lattices Using a Beam Search Method | <|reference_start|>GLR-Parsing of Word Lattices Using a Beam Search Method: This paper presents an approach that allows the efficient integration of speech recognition and language understanding using Tomita's generalized LR-parsing algorithm. For this purpose the GLRP-algorithm is revised so that an agenda mechanism can be used to control the flow of computation of the parsing process. This new approach is used to integrate speech recognition and speech understanding incrementally with a beam search method. These considerations have been implemented and tested on ten word lattices.<|reference_end|> | arxiv | @article{staab1995glr-parsing,
title={GLR-Parsing of Word Lattices Using a Beam Search Method},
author={Steffen Staab (Universitaet Erlangen-Nuernberg, IMMD 8)},
journal={arXiv preprint arXiv:cmp-lg/9506020},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506020},
primaryClass={cmp-lg cs.CL}
} | staab1995glr-parsing |
arxiv-668677 | cmp-lg/9506021 | Prepositional Phrase Attachment through a Backed-Off Model | <|reference_start|>Prepositional Phrase Attachment through a Backed-Off Model: Recent work has considered corpus-based or statistical approaches to the problem of prepositional phrase attachment ambiguity. Typically, ambiguous verb phrases of the form {v np1 p np2} are resolved through a model which considers values of the four head words (v, n1, p and n2). This paper shows that the problem is analogous to n-gram language models in speech recognition, and that one of the most common methods for language modeling, the backed-off estimate, is applicable. Results on Wall Street Journal data of 84.5% accuracy are obtained using this method. A surprising result is the importance of low-count events - ignoring events which occur less than 5 times in training data reduces performance to 81.6%.<|reference_end|> | arxiv | @article{collins1995prepositional,
title={Prepositional Phrase Attachment through a Backed-Off Model},
author={Michael Collins, James Brooks (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9506021},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506021},
primaryClass={cmp-lg cs.CL}
} | collins1995prepositional |
arxiv-668678 | cmp-lg/9506022 | Deriving Procedural and Warning Instructions from Device and Environment Models | <|reference_start|>Deriving Procedural and Warning Instructions from Device and Environment Models: This study is centred on the generation of instructions for household appliances. We show how knowledge about a device, together with knowledge about the environment, can be used for reasoning about instructions. The information communicated by the instructions can be planned from a version of the knowledge of the artifact and environment. We present the latter, which we call the {\it planning knowledge}, in the form of axioms in the {\it situation calculus}. This planning knowledge formally characterizes the behaviour of the artifact, and it is used to produce a basic plan of actions that the device and user take to accomplish a given goal. We explain how both procedural and warning instructions can be generated from this basic plan. In order to partially justify that instruction generation can be automated from a formal device design specification, we assume that the planning knowledge is {\it derivable\/} from the device and world knowledge.<|reference_end|> | arxiv | @article{ansari1995deriving,
title={Deriving Procedural and Warning Instructions from Device and Environment
Models},
author={Daniel Ansari (University of Toronto)},
journal={arXiv preprint arXiv:cmp-lg/9506022},
year={1995},
number={CSRI-329},
archivePrefix={arXiv},
eprint={cmp-lg/9506022},
primaryClass={cmp-lg cs.CL}
} | ansari1995deriving |
arxiv-668679 | cmp-lg/9506023 | Empirical Discovery in Linguistics | <|reference_start|>Empirical Discovery in Linguistics: A discovery system for detecting correspondences in data is described, based on the familiar induction methods of J. S. Mill. Given a set of observations, the system induces the ``causally'' related facts in these observations. Its application to empirical linguistic discovery is described.<|reference_end|> | arxiv | @article{pericliev1995empirical,
title={Empirical Discovery in Linguistics},
author={Vladimir Pericliev (Institute of Mathematics, bl.8, 1113 Sofia,
Bulgaria)},
journal={arXiv preprint arXiv:cmp-lg/9506023},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506023},
primaryClass={cmp-lg cs.CL}
} | pericliev1995empirical |
arxiv-668680 | cmp-lg/9506024 | An Approach to Proper Name Tagging for German | <|reference_start|>An Approach to Proper Name Tagging for German: This paper presents an incremental method for the tagging of proper names in German newspaper texts. The tagging is performed by the analysis of the syntactic and textual contexts of proper names together with a morphological analysis. The proper names selected by this process supply new contexts which can be used for finding new proper names, and so on. This procedure was applied to a small German corpus (50,000 words) and correctly disambiguated 65% of the capitalized words, which should improve when it is applied to a very large corpus.<|reference_end|> | arxiv | @article{thielen1995an,
title={An Approach to Proper Name Tagging for German},
author={Christine Thielen (SfS, University of T"ubingen)},
journal={arXiv preprint arXiv:cmp-lg/9506024},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506024},
primaryClass={cmp-lg cs.CL}
} | thielen1995an |
arxiv-668681 | cmp-lg/9506025 | A Categorial Framework for Composition in Multiple Linguistic Domains | <|reference_start|>A Categorial Framework for Composition in Multiple Linguistic Domains: This paper describes a computational framework for a grammar architecture in which different linguistic domains such as morphology, syntax, and semantics are treated not as separate components but compositional domains. Word and phrase formation are modeled as uniform processes contributing to the derivation of the semantic form. The morpheme, as well as the lexeme, has lexical representation in the form of semantic content, tactical constraints, and phonological realization. The motivation for this work is to handle morphology-syntax interaction (e.g., valency change in causatives, subcategorization imposed by case-marking affixes) in an incremental way. The model is based on Combinatory Categorial Grammars.<|reference_end|> | arxiv | @article{bozsahin1995a,
title={A Categorial Framework for Composition in Multiple Linguistic Domains},
author={Cem Bozsahin and Elvan Gocmen (Middle East Technical University)},
journal={Proceedings of the Fourth Int Conf on Cognitive Science of NLP,},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506025},
primaryClass={cmp-lg cs.CL}
} | bozsahin1995a |
arxiv-668682 | cmp-lg/9506026 | A Computational Approach to Aspectual Composition | <|reference_start|>A Computational Approach to Aspectual Composition: In this paper, I argue, contrary to the prevailing opinion in the linguistics and philosophy literature, that a sortal approach to aspectual composition can indeed be explanatory. In support of this view, I develop a synthesis of competing proposals by Hinrichs, Krifka and Jackendoff which takes Jackendoff's cross-cutting sortal distinctions as its point of departure. To show that the account is well-suited for computational purposes, I also sketch an implemented calculus of eventualities which yields many of the desired inferences. Further details on both the model-theoretic semantics and the implementation can be found in (White, 1994).<|reference_end|> | arxiv | @article{white1995a,
title={A Computational Approach to Aspectual Composition},
author={Michael White (CoGenTex, Inc.)},
journal={In Workshop Notes of the 5th International Workshop TSM 95 (Time,
Space and Movement: Meaning and Knowledge in the Sensible World), Chateau de
Bonas, Gascony, France, 23-27 June 1995.},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9506026},
primaryClass={cmp-lg cs.CL}
} | white1995a |
arxiv-668683 | cmp-lg/9507001 | Constraint Categorial Grammars | <|reference_start|>Constraint Categorial Grammars: Although unification can be used to implement a weak form of $\beta$-reduction, several linguistic phenomena are better handled by using some form of $\lambda$-calculus. In this paper we present a higher order feature description calculus based on a typed $\lambda$-calculus. We show how the techniques used in \CLG for resolving complex feature constraints can be efficiently extended. \CCLG is a simple formalism, based on categorial grammars, designed to test the practical feasibility of such a calculus.<|reference_end|> | arxiv | @article{damas1995constraint,
title={Constraint Categorial Grammars},
author={Luis Damas, Nelma Moreira},
journal={arXiv preprint arXiv:cmp-lg/9507001},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507001},
primaryClass={cmp-lg cs.CL}
} | damas1995constraint |
arxiv-668684 | cmp-lg/9507002 | A framework for lexical representation | <|reference_start|>A framework for lexical representation: In this paper we present a unification-based lexical platform designed for highly inflected languages (like Roman ones). A formalism is proposed for encoding a lemma-based lexical source, well suited for linguistic generalizations. From this source, we automatically generate an allomorph indexed dictionary, adequate for efficient processing. A set of software tools have been implemented around this formalism: access libraries, morphological processors, etc.<|reference_end|> | arxiv | @article{goñi1995a,
title={A framework for lexical representation},
author={Jos'e M. Go~ni, Jos'e C. Gonz'alez (E.T.S.I. Telecomunicaci'on,
Universidad Polit'ecnica de Madrid, Madrid, Spain)},
journal={AI95: 15th International Conference. Language Engineering 95
(Montpellier, France), pp. 243-252.},
year={1995},
number={UPM/DIT/GSI 18/95},
archivePrefix={arXiv},
eprint={cmp-lg/9507002},
primaryClass={cmp-lg cs.CL}
} | goñi1995a |
arxiv-668685 | cmp-lg/9507003 | Robust Processing of Natural Language | <|reference_start|>Robust Processing of Natural Language: Previous approaches to robustness in natural language processing usually treat deviant input by relaxing grammatical constraints whenever a successful analysis cannot be provided by ``normal'' means. This schema implies, that error detection always comes prior to error handling, a behaviour which hardly can compete with its human model, where many erroneous situations are treated without even noticing them. The paper analyses the necessary preconditions for achieving a higher degree of robustness in natural language processing and suggests a quite different approach based on a procedure for structural disambiguation. It not only offers the possibility to cope with robustness issues in a more natural way but eventually might be suited to accommodate quite different aspects of robust behaviour within a single framework.<|reference_end|> | arxiv | @article{menzel1995robust,
title={Robust Processing of Natural Language},
author={Wolfgang Menzel (University of Hamburg)},
journal={arXiv preprint arXiv:cmp-lg/9507003},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507003},
primaryClass={cmp-lg cs.CL}
} | menzel1995robust |
arxiv-668686 | cmp-lg/9507004 | GRAMPAL: A Morphological Processor for Spanish implemented in Prolog | <|reference_start|>GRAMPAL: A Morphological Processor for Spanish implemented in Prolog: A model for the full treatment of Spanish inflection for verbs, nouns and adjectives is presented. This model is based on feature unification and it relies upon a lexicon of allomorphs both for stems and morphemes. Word forms are built by the concatenation of allomorphs by means of special contextual features. We make use of standard Definite Clause Grammars (DCG) included in most Prolog implementations, instead of the typical finite-state approach. This allows us to take advantage of the declarativity and bidirectionality of Logic Programming for NLP. The most salient feature of this approach is simplicity: A really straightforward rule and lexical components. We have developed a very simple model for complex phenomena. Declarativity, bidirectionality, consistency and completeness of the model are discussed: all and only correct word forms are analysed or generated, even alternative ones and gaps in paradigms are preserved. A Prolog implementation has been developed for both analysis and generation of Spanish word forms. It consists of only six DCG rules, because our {\em lexicalist\/} approach --i.e. most information is in the dictionary. Although it is quite efficient, the current implementation could be improved for analysis by using the non logical features of Prolog, especially in word segmentation and dictionary access.<|reference_end|> | arxiv | @article{moreno1995grampal:,
title={GRAMPAL: A Morphological Processor for Spanish implemented in Prolog},
author={Antonio Moreno (Universidad Aut'onoma de Madrid, Madrid, Spain) and
Jos'e M. Go~ni (Universidad Polit'ecnica de Madrid, Madrid, Spain)},
journal={GULP-PRODE95: Joint Conference on Declarative Programming, Marina
di Vietri, Salerno (Italy). September, 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507004},
primaryClass={cmp-lg cs.CL}
} | moreno1995grampal: |
arxiv-668687 | cmp-lg/9507005 | Comparative Ellipsis and Variable Binding | <|reference_start|>Comparative Ellipsis and Variable Binding: In this paper, we discuss the question whether phrasal comparatives should be given a direct interpretation, or require an analysis as elliptic constructions, and answer it with Yes and No. The most adequate analysis of wide reading attributive (WRA) comparatives seems to be as cases of ellipsis, while a direct (but asymmetric) analysis fits the data for narrow scope attributive comparatives. The question whether it is a syntactic or a semantic process which provides the missing linguistic material in the complement of WRA comparatives is also given a complex answer: Linguistic context is accessed by combining a reconstruction operation and a mechanism of anaphoric reference. The analysis makes only few and straightforward syntactic assumptions. In part, this is made possible because the use of Generalized Functional Application as a semantic operation allows us to model semantic composition in a flexible way.<|reference_end|> | arxiv | @article{lerner1995comparative,
title={Comparative Ellipsis and Variable Binding},
author={Jan Lerner and Manfred Pinkal (University of the Saarland, Dept. of
Computational Linguistics)},
journal={to appear in SALT V Proceedings, Cornell University,},
year={1995},
number={CLAUS 64},
archivePrefix={arXiv},
eprint={cmp-lg/9507005},
primaryClass={cmp-lg cs.CL}
} | lerner1995comparative |
arxiv-668688 | cmp-lg/9507006 | Transfer in a Connectionist Model of the Acquisition of Morphology | <|reference_start|>Transfer in a Connectionist Model of the Acquisition of Morphology: The morphological systems of natural languages are replete with examples of the same devices used for multiple purposes: (1) the same type of morphological process (for example, suffixation for both noun case and verb tense) and (2) identical morphemes (for example, the same suffix for English noun plural and possessive). These sorts of similarity would be expected to convey advantages on language learners in the form of transfer from one morphological category to another. Connectionist models of morphology acquisition have been faulted for their supposed inability to represent phonological similarity across morphological categories and hence to facilitate transfer. This paper describes a connectionist model of the acquisition of morphology which is shown to exhibit transfer of this type. The model treats the morphology acquisition problem as one of learning to map forms onto meanings and vice versa. As the network learns these mappings, it makes phonological generalizations which are embedded in connection weights. Since these weights are shared by different morphological categories, transfer is enabled. In a set of experiments with artificial stimuli, networks were trained first on one morphological task (e.g., tense) and then on a second (e.g., number). It is shown that in the context of suffixation, prefixation, and template rules, the second task is facilitated when the second category either makes use of the same forms or the same general process type (e.g., prefixation) as the first.<|reference_end|> | arxiv | @article{gasser1995transfer,
title={Transfer in a Connectionist Model of the Acquisition of Morphology},
author={Michael Gasser (Indiana University)},
journal={arXiv preprint arXiv:cmp-lg/9507006},
year={1995},
number={IU Cognitive Science TR 147},
archivePrefix={arXiv},
eprint={cmp-lg/9507006},
primaryClass={cmp-lg cs.CL}
} | gasser1995transfer |
arxiv-668689 | cmp-lg/9507007 | An Efficient Algorithm for Surface Generation | <|reference_start|>An Efficient Algorithm for Surface Generation: A method is given that "inverts" a logic grammar and displays it from the point of view of the logical form, rather than from that of the word string. LR-compiling techniques are used to allow a recursive-descent generation algorithm to perform "functor merging" much in the same way as an LR parser performs prefix merging. This is an improvement on the semantic-head-driven generator that results in a much smaller search space. The amount of semantic lookahead can be varied, and appropriate tradeoff points between table size and resulting nondeterminism can be found automatically.<|reference_end|> | arxiv | @article{samuelsson1995an,
title={An Efficient Algorithm for Surface Generation},
author={Christer Samuelsson (University of the Saarland)},
journal={IJCAI 95},
year={1995},
number={CLAUS 44 Technical Report},
archivePrefix={arXiv},
eprint={cmp-lg/9507007},
primaryClass={cmp-lg cs.CL}
} | samuelsson1995an |
arxiv-668690 | cmp-lg/9507008 | A Constraint-based Case Frame Lexicon Architecture | <|reference_start|>A Constraint-based Case Frame Lexicon Architecture: In Turkish, (and possibly in many other languages) verbs often convey several meanings (some totally unrelated) when they are used with subjects, objects, oblique objects, adverbial adjuncts, with certain lexical, morphological, and semantic features, and co-occurrence restrictions. In addition to the usual sense variations due to selectional restrictions on verbal arguments, in most cases, the meaning conveyed by a case frame is idiomatic and not compositional, with subtle constraints. In this paper, we present an approach to building a constraint-based case frame lexicon for use in natural language processing in Turkish, whose prototype we have implemented under the TFS system developed at Univ. of Stuttgart. A number of observations that we have made on Turkish have indicated that we need something beyond the traditional transitive and intransitive distinction, and utilize a framework where verb valence is considered as the obligatory co-existence of an arbitrary subset of possible arguments along with the obligatory exclusion of certain others, relative to a verb sense. Additional morphological lexical and semantic constraints on the syntactic constituents organized as a 5-tier constraint hierarchy, are utilized to map a given syntactic structure case-fraame to a specific verb sense.<|reference_end|> | arxiv | @article{oflazer1995a,
title={A Constraint-based Case Frame Lexicon Architecture},
author={Kemal Oflazer and Okan Yilmaz (Department of Computer Engineering and
Information Science, Bilkent University, Ankara Turkey)},
journal={arXiv preprint arXiv:cmp-lg/9507008},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507008},
primaryClass={cmp-lg cs.CL}
} | oflazer1995a |
arxiv-668691 | cmp-lg/9507009 | Specifying Logic Programs in Controlled Natural Language | <|reference_start|>Specifying Logic Programs in Controlled Natural Language: Writing specifications for computer programs is not easy since one has to take into account the disparate conceptual worlds of the application domain and of software development. To bridge this conceptual gap we propose controlled natural language as a declarative and application-specific specification language. Controlled natural language is a subset of natural language that can be accurately and efficiently processed by a computer, but is expressive enough to allow natural usage by non-specialists. Specifications in controlled natural language are automatically translated into Prolog clauses, hence become formal and executable. The translation uses a definite clause grammar (DCG) enhanced by feature structures. Inter-text references of the specification, e.g. anaphora, are resolved with the help of discourse representation theory (DRT). The generated Prolog clauses are added to a knowledge base. We have implemented a prototypical specification system that successfully processes the specification of a simple automated teller machine.<|reference_end|> | arxiv | @article{fuchs1995specifying,
title={Specifying Logic Programs in Controlled Natural Language},
author={Norbert E. Fuchs, Rolf Schwitter (Department of Computer Science,
University of Zurich)},
journal={Proceedings CLNLP 95, COMPULOGNET/ELSNET/EAGLES},
year={1995},
number={IFI 95.17},
archivePrefix={arXiv},
eprint={cmp-lg/9507009},
primaryClass={cmp-lg cs.CL}
} | fuchs1995specifying |
arxiv-668692 | cmp-lg/9507010 | On-line Learning of Binary Lexical Relations Using Two-dimensional Weighted Majority Algorithms | <|reference_start|>On-line Learning of Binary Lexical Relations Using Two-dimensional Weighted Majority Algorithms: We consider the problem of learning a certain type of lexical semantic knowledge that can be expressed as a binary relation between words, such as the so-called sub-categorization of verbs (a verb-noun relation) and the compound noun phrase relation (a noun-noun relation). Specifically, we view this problem as an on-line learning problem in the sense of Littlestone's learning model in which the learner's goal is to minimize the total number of prediction mistakes. In the computational learning theory literature, Goldman, Rivest and Schapire and subsequently Goldman and Warmuth have considered the on-line learning problem for binary relations R : X * Y -> {0, 1} in which one of the domain sets X can be partitioned into a relatively small number of types, namely clusters consisting of behaviorally indistinguishable members of X. In this paper, we extend this model and suppose that both of the sets X, Y can be partitioned into a small number of types, and propose a host of prediction algorithms which are two-dimensional extensions of Goldman and Warmuth's weighted majority type algorithm proposed for the original model. We apply these algorithms to the learning problem for the `compound noun phrase' relation, in which a noun is related to another just in case they can form a noun phrase together. Our experimental results show that all of our algorithms out-perform Goldman and Warmuth's algorithm. We also theoretically analyze the performance of one of our algorithms, in the form of an upper bound on the worst case number of prediction mistakes it makes.<|reference_end|> | arxiv | @article{abe1995on-line,
title={On-line Learning of Binary Lexical Relations Using Two-dimensional
Weighted Majority Algorithms},
author={Naoki Abe, Hang Li, Atsuyoshi Nakamura (Theory NEC Lab., RWCP)},
journal={Proc. of The 12th Int. Conf. on Machine Learning, 1995},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507010},
primaryClass={cmp-lg cs.CL}
} | abe1995on-line |
arxiv-668693 | cmp-lg/9507011 | Generalizing Case Frames Using a Thesaurus and the MDL Principle | <|reference_start|>Generalizing Case Frames Using a Thesaurus and the MDL Principle: We address the problem of automatically acquiring case-frame patterns from large corpus data. In particular, we view this problem as the problem of estimating a (conditional) distribution over a partition of words, and propose a new generalization method based on the MDL (Minimum Description Length) principle. In order to assist with the efficiency, our method makes use of an existing thesaurus and restricts its attention on those partitions that are present as `cuts' in the thesaurus tree, thus reducing the generalization problem to that of estimating the `tree cut models' of the thesaurus. We then give an efficient algorithm which provably obtains the optimal tree cut model for the given frequency data, in the sense of MDL. We have used the case-frame patterns obtained using our method to resolve pp-attachment ambiguity.Our experimental results indicate that our method improves upon or is at least as effective as existing methods.<|reference_end|> | arxiv | @article{li1995generalizing,
title={Generalizing Case Frames Using a Thesaurus and the MDL Principle},
author={Hang Li, Naoki Abe (C&C Res. Labs.,NEC)},
journal={Proc. of Recent Advances in Natural Language Processing, 239-248,
1995.},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507011},
primaryClass={cmp-lg cs.CL}
} | li1995generalizing |
arxiv-668694 | cmp-lg/9507012 | A Grammar Formalism and Cross-Serial Dependencies | <|reference_start|>A Grammar Formalism and Cross-Serial Dependencies: First we define a unification grammar formalism called the Tree Homomorphic Feature Structure Grammar. It is based on Lexical Functional Grammar (LFG), but has a strong restriction on the syntax of the equations. We then show that this grammar formalism defines a full abstract family of languages, and that it is capable of describing cross-serial dependencies of the type found in Swiss German.<|reference_end|> | arxiv | @article{burheim1995a,
title={A Grammar Formalism and Cross-Serial Dependencies},
author={Tore Burheim (University of Bergen)},
journal={arXiv preprint arXiv:cmp-lg/9507012},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507012},
primaryClass={cmp-lg cs.CL}
} | burheim1995a |
arxiv-668695 | cmp-lg/9507013 | Indexed Languages and Unification Grammars | <|reference_start|>Indexed Languages and Unification Grammars: Indexed languages are interesting in computational linguistics because they are the least class of languages in the Chomsky hierarchy that has not been shown not to be adequate to describe the string set of natural language sentences. We here define a class of unification grammars that exactly describe the class of indexed languages.<|reference_end|> | arxiv | @article{burheim1995indexed,
title={Indexed Languages and Unification Grammars},
author={Tore Burheim (University of Bergen)},
journal={arXiv preprint arXiv:cmp-lg/9507013},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507013},
primaryClass={cmp-lg cs.CL}
} | burheim1995indexed |
arxiv-668696 | cmp-lg/9507014 | Co-Indexing Labelled DRSs to Represent and Reason with Ambiguities | <|reference_start|>Co-Indexing Labelled DRSs to Represent and Reason with Ambiguities: The paper addresses the problem of representing ambiguities in a way that allows for monotonic disambiguation and for direct deductive computation. The paper focuses on an extension of the formalism of underspecified DRSs to ambiguities introduced by plural NPs. It deals with the collective/distributive distinction, and also with generic and cumulative readings. In addition it provides a systematic account for an underspecified treatment of plural pronoun resolution.<|reference_end|> | arxiv | @article{reyle1995co-indexing,
title={Co-Indexing Labelled DRSs to Represent and Reason with Ambiguities},
author={Uwe Reyle (Institute for Computational Linguistics, University of
Stuttgart)},
journal={arXiv preprint arXiv:cmp-lg/9507014},
year={1995},
archivePrefix={arXiv},
eprint={cmp-lg/9507014},
primaryClass={cmp-lg cs.CL}
} | reyle1995co-indexing |
arxiv-668697 | cmp-lg/9508001 | Bridging as Coercive Accommodation | <|reference_start|>Bridging as Coercive Accommodation: In this paper we discuss the notion of "bridging" in Discourse Representation Theory as a tool to account for discourse referents that have only been established implicitly, through the lexical semantics of other referents. In doing so, we use ideas from Generative Lexicon theory, to introduce antecedents for anaphoric expressions that cannot be "linked" to a proper antecedent, but that do not need to be "accommodated" because they have some connection to the network of discourse referents that is already established.<|reference_end|> | arxiv | @article{bos1995bridging,
title={Bridging as Coercive Accommodation},
author={Johan Bos (University of the Saarland, Germany), Paul Buitelaar
(Brandeis University, USA) and Anne-Marie Mineur (University of the Saarland,
Germany)},
journal={arXiv preprint arXiv:cmp-lg/9508001},
year={1995},
number={CLAUS 52 Technical Report},
archivePrefix={arXiv},
eprint={cmp-lg/9508001},
primaryClass={cmp-lg cs.CL}
} | bos1995bridging |
arxiv-668698 | cmp-lg/9508002 | A Compositional Treatment of Polysemous Arguments in Categorial Grammar | <|reference_start|>A Compositional Treatment of Polysemous Arguments in Categorial Grammar: We discuss an extension of the standard logical rules (functional application and abstraction) in Categorial Grammar (CG), in order to deal with some specific cases of polysemy. We borrow from Generative Lexicon theory which proposes the mechanism of {\em coercion}, next to a rich nominal lexical semantic structure called {\em qualia structure}. In a previous paper we introduced coercion into the framework of {\em sign-based} Categorial Grammar and investigated its impact on traditional Fregean compositionality. In this paper we will elaborate on this idea, mostly working towards the introduction of a new semantic dimension. Where in current versions of sign-based Categorial Grammar only two representations are derived: a prosodic one (form) and a logical one (modelling), here we introduce also a more detaled representation of the lexical semantics. This extra knowledge will serve to account for linguistic phenomena like {\em metonymy\/}.<|reference_end|> | arxiv | @article{mineur1995a,
title={A Compositional Treatment of Polysemous Arguments in Categorial Grammar},
author={Anne-Marie Mineur (University of the Saarland, Germany) and Paul
Buitelaar (Brandeis University, USA)},
journal={arXiv preprint arXiv:cmp-lg/9508002},
year={1995},
number={CLAUS 49 Technical Report},
archivePrefix={arXiv},
eprint={cmp-lg/9508002},
primaryClass={cmp-lg cs.CL}
} | mineur1995a |
arxiv-668699 | cmp-lg/9508003 | A Robust Parsing Algorithm For Link Grammars | <|reference_start|>A Robust Parsing Algorithm For Link Grammars: In this paper we present a robust parsing algorithm based on the link grammar formalism for parsing natural languages. Our algorithm is a natural extension of the original dynamic programming recognition algorithm which recursively counts the number of linkages between two words in the input sentence. The modified algorithm uses the notion of a null link in order to allow a connection between any pair of adjacent words, regardless of their dictionary definitions. The algorithm proceeds by making three dynamic programming passes. In the first pass, the input is parsed using the original algorithm which enforces the constraints on links to ensure grammaticality. In the second pass, the total cost of each substring of words is computed, where cost is determined by the number of null links necessary to parse the substring. The final pass counts the total number of parses with minimal cost. All of the original pruning techniques have natural counterparts in the robust algorithm. When used together with memoization, these techniques enable the algorithm to run efficiently with cubic worst-case complexity. We have implemented these ideas and tested them by parsing the Switchboard corpus of conversational English. This corpus is comprised of approximately three million words of text, corresponding to more than 150 hours of transcribed speech collected from telephone conversations restricted to 70 different topics. Although only a small fraction of the sentences in this corpus are "grammatical" by standard criteria, the robust link grammar parser is able to extract relevant structure for a large portion of the sentences. We present the results of our experiments using this system, including the analyses of selected and random sentences from the corpus.<|reference_end|> | arxiv | @article{grinberg1995a,
title={A Robust Parsing Algorithm For Link Grammars},
author={Dennis Grinberg, John Lafferty, Daniel Sleator (Carnegie Mellon)},
journal={arXiv preprint arXiv:cmp-lg/9508003},
year={1995},
number={CMU-CS-TR-95-125},
archivePrefix={arXiv},
eprint={cmp-lg/9508003},
primaryClass={cmp-lg cs.CL}
} | grinberg1995a |
arxiv-668700 | cmp-lg/9508004 | Parsing English with a Link Grammar | <|reference_start|>Parsing English with a Link Grammar: We develop a formal grammatical system called a link grammar, show how English grammar can be encoded in such a system, and give algorithms for efficiently parsing with a link grammar. Although the expressive power of link grammars is equivalent to that of context free grammars, encoding natural language grammars appears to be much easier with the new system. We have written a program for general link parsing and written a link grammar for the English language. The performance of this preliminary system -- both in the breadth of English phenomena that it captures and in the computational resources used -- indicates that the approach may have practical uses as well as linguistic significance. Our program is written in C and may be obtained through the internet.<|reference_end|> | arxiv | @article{sleator1995parsing,
title={Parsing English with a Link Grammar},
author={Daniel D. K.Sleator (Carnegie Mellon) and Davy Temperley (Columbia
University)},
journal={arXiv preprint arXiv:cmp-lg/9508004},
year={1995},
number={CMU-CS-TR-91-126},
archivePrefix={arXiv},
eprint={cmp-lg/9508004},
primaryClass={cmp-lg cs.CL}
} | sleator1995parsing |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.