corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-669101 | cmp-lg/9801002 | Identifying Discourse Markers in Spoken Dialog | <|reference_start|>Identifying Discourse Markers in Spoken Dialog: In this paper, we present a method for identifying discourse marker usage in spontaneous speech based on machine learning. Discourse markers are denoted by special POS tags, and thus the process of POS tagging can be used to identify discourse markers. By incorporating POS tagging into language modeling, discourse markers can be identified during speech recognition, in which the timeliness of the information can be used to help predict the following words. We contrast this approach with an alternative machine learning approach proposed by Litman (1996). This paper also argues that discourse markers can be used to help the hearer predict the role that the upcoming utterance plays in the dialog. Thus discourse markers should provide valuable evidence for automatic dialog act prediction.<|reference_end|> | arxiv | @article{heeman1998identifying,
title={Identifying Discourse Markers in Spoken Dialog},
author={Peter A. Heeman (Oregon Graduate Institute), Donna Byron (U. of
Rochester), James F. Allen (U. of Rochester)},
journal={AAAI 1998 Spring Symposium on Applying Machine Learning to
Discourse Processing},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9801002},
primaryClass={cmp-lg cs.CL}
} | heeman1998identifying |
arxiv-669102 | cmp-lg/9801003 | Do not forget: Full memory in memory-based learning of word pronunciation | <|reference_start|>Do not forget: Full memory in memory-based learning of word pronunciation: Memory-based learning, keeping full memory of learning material, appears a viable approach to learning NLP tasks, and is often superior in generalisation accuracy to eager learning approaches that abstract from learning material. Here we investigate three partial memory-based learning approaches which remove from memory specific task instance types estimated to be exceptional. The three approaches each implement one heuristic function for estimating exceptionality of instance types: (i) typicality, (ii) class prediction strength, and (iii) friendly-neighbourhood size. Experiments are performed with the memory-based learning algorithm IB1-IG trained on English word pronunciation. We find that removing instance types with low prediction strength (ii) is the only tested method which does not seriously harm generalisation accuracy. We conclude that keeping full memory of types rather than tokens, and excluding minority ambiguities appear to be the only performance-preserving optimisations of memory-based learning.<|reference_end|> | arxiv | @article{bosch1998do,
title={Do not forget: Full memory in memory-based learning of word
pronunciation},
author={Antal van den Bosch (ILK / Computational Linguistics, Tilburg
University), and Walter Daelemans (ILK / Computational Linguistics, Tilburg
University)},
journal={Proceedings of NeMLaP3/CoNLL98, 195-204},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9801003},
primaryClass={cmp-lg cs.CL}
} | bosch1998do |
arxiv-669103 | cmp-lg/9801004 | Modularity in inductively-learned word pronunciation systems | <|reference_start|>Modularity in inductively-learned word pronunciation systems: In leading morpho-phonological theories and state-of-the-art text-to-speech systems it is assumed that word pronunciation cannot be learned or performed without in-between analyses at several abstraction levels (e.g., morphological, graphemic, phonemic, syllabic, and stress levels). We challenge this assumption for the case of English word pronunciation. Using IGTree, an inductive-learning decision-tree algorithms, we train and test three word-pronunciation systems in which the number of abstraction levels (implemented as sequenced modules) is reduced from five, via three, to one. The latter system, classifying letter strings directly as mapping to phonemes with stress markers, yields significantly better generalisation accuracies than the two multi-module systems. Analyses of empirical results indicate that positive utility effects of sequencing modules are outweighed by cascading errors passed on between modules.<|reference_end|> | arxiv | @article{bosch1998modularity,
title={Modularity in inductively-learned word pronunciation systems},
author={Antal van den Bosch (ILK / Computational Linguistics, Tilburg
University), Ton Weijters (Dept. of Information Technology, Eindhoven
University of Technology), and Walter Daelemans (ILK / Computational
Linguistics, Tilburg University)},
journal={Proceedings of NeMLaP3/CoNLL98, 185-194},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9801004},
primaryClass={cmp-lg cs.CL}
} | bosch1998modularity |
arxiv-669104 | cmp-lg/9801005 | A General, Sound and Efficient Natural Language Parsing Algorithm based on Syntactic Constraints Propagation | <|reference_start|>A General, Sound and Efficient Natural Language Parsing Algorithm based on Syntactic Constraints Propagation: This paper presents a new context-free parsing algorithm based on a bidirectional strictly horizontal strategy which incorporates strong top-down predictions (derivations and adjacencies). From a functional point of view, the parser is able to propagate syntactic constraints reducing parsing ambiguity. From a computational perspective, the algorithm includes different techniques aimed at the improvement of the manipulation and representation of the structures used.<|reference_end|> | arxiv | @article{quesada1998a,
title={A General, Sound and Efficient Natural Language Parsing Algorithm based
on Syntactic Constraints Propagation},
author={Jose F. Quesada},
journal={Proceedings CAEPIA'97, Malaga, Spain. pp. 775-786},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9801005},
primaryClass={cmp-lg cs.CL}
} | quesada1998a |
arxiv-669105 | cmp-lg/9802001 | Look-Back and Look-Ahead in the Conversion of Hidden Markov Models into Finite State Transducers | <|reference_start|>Look-Back and Look-Ahead in the Conversion of Hidden Markov Models into Finite State Transducers: This paper describes the conversion of a Hidden Markov Model into a finite state transducer that closely approximates the behavior of the stochastic model. In some cases the transducer is equivalent to the HMM. This conversion is especially advantageous for part-of-speech tagging because the resulting transducer can be composed with other transducers that encode correction rules for the most frequent tagging errors. The speed of tagging is also improved. The described methods have been implemented and successfully tested.<|reference_end|> | arxiv | @article{kempe1998look-back,
title={Look-Back and Look-Ahead in the Conversion of Hidden Markov Models into
Finite State Transducers},
author={Andre Kempe (Xerox Research Centre Europe, Grenoble Laboratory,
France)},
journal={NeMLaP3/CoNLL'98, pp.29-37, Sydney, Australia. January 15-17, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9802001},
primaryClass={cmp-lg cs.CL}
} | kempe1998look-back |
arxiv-669106 | cmp-lg/9802002 | A Hybrid Environment for Syntax-Semantic Tagging | <|reference_start|>A Hybrid Environment for Syntax-Semantic Tagging: The thesis describes the application of the relaxation labelling algorithm to NLP disambiguation. Language is modelled through context constraint inspired on Constraint Grammars. The constraints enable the use of a real value statind "compatibility". The technique is applied to POS tagging, Shallow Parsing and Word Sense Disambigation. Experiments and results are reported. The proposed approach enables the use of multi-feature constraint models, the simultaneous resolution of several NL disambiguation tasks, and the collaboration of linguistic and statistical models.<|reference_end|> | arxiv | @article{padro1998a,
title={A Hybrid Environment for Syntax-Semantic Tagging},
author={Lluis Padro},
journal={arXiv preprint arXiv:cmp-lg/9802002},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9802002},
primaryClass={cmp-lg cs.CL}
} | padro1998a |
arxiv-669107 | cmp-lg/9803001 | Automating Coreference: The Role of Annotated Training Data | <|reference_start|>Automating Coreference: The Role of Annotated Training Data: We report here on a study of interannotator agreement in the coreference task as defined by the Message Understanding Conference (MUC-6 and MUC-7). Based on feedback from annotators, we clarified and simplified the annotation specification. We then performed an analysis of disagreement among several annotators, concluding that only 16% of the disagreements represented genuine disagreement about coreference; the remainder of the cases were mostly typographical errors or omissions, easily reconciled. Initially, we measured interannotator agreement in the low 80s for precision and recall. To try to improve upon this, we ran several experiments. In our final experiment, we separated the tagging of candidate noun phrases from the linking of actual coreferring expressions. This method shows promise - interannotator agreement climbed to the low 90s - but it needs more extensive validation. These results position the research community to broaden the coreference task to multiple languages, and possibly to different kinds of coreference.<|reference_end|> | arxiv | @article{hirschman1998automating,
title={Automating Coreference: The Role of Annotated Training Data},
author={Lynette Hirschman, Patricia Robinson, John Burger, Marc Vilain},
journal={arXiv preprint arXiv:cmp-lg/9803001},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9803001},
primaryClass={cmp-lg cs.CL}
} | hirschman1998automating |
arxiv-669108 | cmp-lg/9803002 | Time, Tense and Aspect in Natural Language Database Interfaces | <|reference_start|>Time, Tense and Aspect in Natural Language Database Interfaces: Most existing natural language database interfaces (NLDBs) were designed to be used with database systems that provide very limited facilities for manipulating time-dependent data, and they do not support adequately temporal linguistic mechanisms (verb tenses, temporal adverbials, temporal subordinate clauses, etc.). The database community is becoming increasingly interested in temporal database systems, that are intended to store and manipulate in a principled manner information not only about the present, but also about the past and future. When interfacing to temporal databases, supporting temporal linguistic mechanisms becomes crucial. We present a framework for constructing natural language interfaces for temporal databases (NLTDBs), that draws on research in tense and aspect theories, temporal logics, and temporal databases. The framework consists of a temporal intermediate representation language, called TOP, an HPSG grammar that maps a wide range of questions involving temporal mechanisms to appropriate TOP expressions, and a provably correct method for translating from TOP to TSQL2, TSQL2 being a recently proposed temporal extension of the SQL database language. This framework was employed to implement a prototype NLTDB using ALE and Prolog.<|reference_end|> | arxiv | @article{androutsopoulos1998time,,
title={Time, Tense and Aspect in Natural Language Database Interfaces},
author={I. Androutsopoulos (Microsoft Research Institute, Macquarie
University, Sydney), G.D. Ritchie (Dept. of Artificial Intelligence, Univ. of
Edinburgh), P. Thanisch (Dept. of Computer Science, Univ. of Edinburgh)},
journal={Natural Language Engineering, 4(3), pp. 229-276, Sept. 1998,
Cambridge Univ. Press.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9803002},
primaryClass={cmp-lg cs.CL}
} | androutsopoulos1998time, |
arxiv-669109 | cmp-lg/9803003 | Nymble: a High-Performance Learning Name-finder | <|reference_start|>Nymble: a High-Performance Learning Name-finder: This paper presents a statistical, learned approach to finding names and other non-recursive entities in text (as per the MUC-6 definition of the NE task), using a variant of the standard hidden Markov model. We present our justification for the problem and our approach, a detailed discussion of the model itself and finally the successful results of this new approach.<|reference_end|> | arxiv | @article{bikel1998nymble:,
title={Nymble: a High-Performance Learning Name-finder},
author={Daniel M. Bikel (BBN), Scott Miller (BBN), Richard Schwartz (BBN),
Ralph Weischedel (BBN)},
journal={Proceedings of the Fifth Conference on Applied Natural Language
Processing, 1997, pp. 194-201},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9803003},
primaryClass={cmp-lg cs.CL}
} | bikel1998nymble: |
arxiv-669110 | cmp-lg/9804001 | Graph Interpolation Grammars: a Rule-based Approach to the Incremental Parsing of Natural Languages | <|reference_start|>Graph Interpolation Grammars: a Rule-based Approach to the Incremental Parsing of Natural Languages: Graph Interpolation Grammars are a declarative formalism with an operational semantics. Their goal is to emulate salient features of the human parser, and notably incrementality. The parsing process defined by GIGs incrementally builds a syntactic representation of a sentence as each successive lexeme is read. A GIG rule specifies a set of parse configurations that trigger its application and an operation to perform on a matching configuration. Rules are partly context-sensitive; furthermore, they are reversible, meaning that their operations can be undone, which allows the parsing process to be nondeterministic. These two factors confer enough expressive power to the formalism for parsing natural languages.<|reference_end|> | arxiv | @article{larcheveque1998graph,
title={Graph Interpolation Grammars: a Rule-based Approach to the Incremental
Parsing of Natural Languages},
author={John Larcheveque (INRIA, Rocquencourt, France)},
journal={arXiv preprint arXiv:cmp-lg/9804001},
year={1998},
number={RR-3390},
archivePrefix={arXiv},
eprint={cmp-lg/9804001},
primaryClass={cmp-lg cs.CL}
} | larcheveque1998graph |
arxiv-669111 | cmp-lg/9804002 | The Proper Treatment of Optimality in Computational Phonology | <|reference_start|>The Proper Treatment of Optimality in Computational Phonology: This paper presents a novel formalization of optimality theory. Unlike previous treatments of optimality in computational linguistics, starting with Ellison (1994), the new approach does not require any explicit marking and counting of constraint violations. It is based on the notion of "lenient composition," defined as the combination of ordinary composition and priority union. If an underlying form has outputs that can meet a given constraint, lenient composition enforces the constraint; if none of the output candidates meet the constraint, lenient composition allows all of them. For the sake of greater efficiency, we may "leniently compose" the GEN relation and all the constraints into a single finite-state transducer that maps each underlying form directly into its optimal surface realizations, and vice versa, without ever producing any failing candidates. Seen from this perspective, optimality theory is surprisingly similar to the two older strains of finite-state phonology: classical rewrite systems and two-level models. In particular, the ranking of optimality constraints corresponds to the ordering of rewrite rules.<|reference_end|> | arxiv | @article{karttunen1998the,
title={The Proper Treatment of Optimality in Computational Phonology},
author={Lauri Karttunen (Xerox Research Centre Europe)},
journal={Proceedings of FSMNLP'98. International Workshop on Finite-State
Methods in Natural Language Processing, pages 1-12, June 29 - July 1, 1998.
Bilkent University. Ankara, Turkey},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9804002},
primaryClass={cmp-lg cs.CL}
} | karttunen1998the |
arxiv-669112 | cmp-lg/9804003 | Treatment of Epsilon-Moves in Subset Construction | <|reference_start|>Treatment of Epsilon-Moves in Subset Construction: The paper discusses the problem of determinising finite-state automata containing large numbers of epsilon-moves. Experiments with finite-state approximations of natural language grammars often give rise to very large automata with a very large number of epsilon-moves. The paper identifies three subset construction algorithms which treat epsilon-moves. A number of experiments has been performed which indicate that the algorithms differ considerably in practice. Furthermore, the experiments suggest that the average number of epsilon-moves per state can be used to predict which algorithm is likely to perform best for a given input automaton.<|reference_end|> | arxiv | @article{van noord1998treatment,
title={Treatment of Epsilon-Moves in Subset Construction},
author={Gertjan van Noord (Groningen University)},
journal={Proceedings of FSMNLP'98. International Workshop on Finite-State
Methods in Natural Language Processing, pages 1-12, June 29 - July 1, 1998.
Bilkent University. Ankara, Turkey.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9804003},
primaryClass={cmp-lg cs.CL}
} | van noord1998treatment |
arxiv-669113 | cmp-lg/9804004 | Corpus-Based Word Sense Disambiguation | <|reference_start|>Corpus-Based Word Sense Disambiguation: Resolution of lexical ambiguity, commonly termed ``word sense disambiguation'', is expected to improve the analytical accuracy for tasks which are sensitive to lexical semantics. Such tasks include machine translation, information retrieval, parsing, natural language understanding and lexicography. Reflecting the growth in utilization of machine readable texts, word sense disambiguation techniques have been explored variously in the context of corpus-based approaches. Within one corpus-based framework, that is the similarity-based method, systems use a database, in which example sentences are manually annotated with correct word senses. Given an input, systems search the database for the most similar example to the input. The lexical ambiguity of a word contained in the input is resolved by selecting the sense annotation of the retrieved example. In this research, we apply this method of resolution of verbal polysemy, in which the similarity between two examples is computed as the weighted average of the similarity between complements governed by a target polysemous verb. We explore similarity-based verb sense disambiguation focusing on the following three methods. First, we propose a weighting schema for each verb complement in the similarity computation. Second, in similarity-based techniques, the overhead for manual supervision and searching the large-sized database can be prohibitive. To resolve this problem, we propose a method to select a small number of effective examples, for system usage. Finally, the efficiency of our system is highly dependent on the similarity computation used. To maximize efficiency, we propose a method which integrates the advantages of previous methods for similarity computation.<|reference_end|> | arxiv | @article{fujii1998corpus-based,
title={Corpus-Based Word Sense Disambiguation},
author={Atsushi Fujii (University of Library and Information Science)},
journal={arXiv preprint arXiv:cmp-lg/9804004},
year={1998},
number={TR98-0003, Tokyo Institute of Technology},
archivePrefix={arXiv},
eprint={cmp-lg/9804004},
primaryClass={cmp-lg cs.CL}
} | fujii1998corpus-based |
arxiv-669114 | cmp-lg/9804005 | On the existence of certain total recursive functions in nontrivial axiom systems, I | <|reference_start|>On the existence of certain total recursive functions in nontrivial axiom systems, I: We investigate the existence of a class of ZFC-provably total recursive unary functions, given certain constraints, and apply some of those results to show that, for $\Sigma_1$-sound set theory, ZFC$\not\vdash P<NP$.<|reference_end|> | arxiv | @article{da costa1998on,
title={On the existence of certain total recursive functions in nontrivial
axiom systems, I},
author={N. C. A. da Costa (University of Sao Paulo) and F. A. Doria (Federal
University at Rio de Janeiro)},
journal={arXiv preprint arXiv:cmp-lg/9804005},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9804005},
primaryClass={cmp-lg cs.CL}
} | da costa1998on |
arxiv-669115 | cmp-lg/9805001 | Valence Induction with a Head-Lexicalized PCFG | <|reference_start|>Valence Induction with a Head-Lexicalized PCFG: This paper presents an experiment in learning valences (subcategorization frames) from a 50 million word text corpus, based on a lexicalized probabilistic context free grammar. Distributions are estimated using a modified EM algorithm. We evaluate the acquired lexicon both by comparison with a dictionary and by entropy measures. Results show that our model produces highly accurate frame distributions.<|reference_end|> | arxiv | @article{carroll1998valence,
title={Valence Induction with a Head-Lexicalized PCFG},
author={Glenn Carroll and Mats Rooth (IMS, Universit"at Stuttgart)},
journal={arXiv preprint arXiv:cmp-lg/9805001},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805001},
primaryClass={cmp-lg cs.CL}
} | carroll1998valence |
arxiv-669116 | cmp-lg/9805002 | Group Theory and Grammatical Description | <|reference_start|>Group Theory and Grammatical Description: This paper presents a model for linguistic description based on group theory. A grammar in this model, or "G-grammar", is a collection of lexical expressions which are products of logical forms, phonological forms, and their inverses. Phrasal descriptions are obtained by forming products of lexical expressions and by cancelling contiguous elements which are inverses of each other. We show applications of this model to parsing and generation, long-distance movement, and quantifier scoping. We believe that by moving from the free monoid over a vocabulary V --- standard in formal language studies --- to the free group over V, deep affinities between linguistic phenomena and classical algebra come to the surface, and that the consequences of tapping the mathematical connections thus established could be considerable.<|reference_end|> | arxiv | @article{dymetman1998group,
title={Group Theory and Grammatical Description},
author={Marc Dymetman (Xerox Research Centre Europe, Grenoble)},
journal={arXiv preprint arXiv:cmp-lg/9805002},
year={1998},
number={MLTT-033},
archivePrefix={arXiv},
eprint={cmp-lg/9805002},
primaryClass={cmp-lg cs.CL}
} | dymetman1998group |
arxiv-669117 | cmp-lg/9805003 | Models of Co-occurrence | <|reference_start|>Models of Co-occurrence: A model of co-occurrence in bitext is a boolean predicate that indicates whether a given pair of word tokens co-occur in corresponding regions of the bitext space. Co-occurrence is a precondition for the possibility that two tokens might be mutual translations. Models of co-occurrence are the glue that binds methods for mapping bitext correspondence with methods for estimating translation models into an integrated system for exploiting parallel texts. Different models of co-occurrence are possible, depending on the kind of bitext map that is available, the language-specific information that is available, and the assumptions made about the nature of translational equivalence. Although most statistical translation models are based on models of co-occurrence, modeling co-occurrence correctly is more difficult than it may at first appear.<|reference_end|> | arxiv | @article{melamed1998models,
title={Models of Co-occurrence},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9805003},
year={1998},
number={IRCS TR #98-05},
archivePrefix={arXiv},
eprint={cmp-lg/9805003},
primaryClass={cmp-lg cs.CL}
} | melamed1998models |
arxiv-669118 | cmp-lg/9805004 | Annotation Style Guide for the Blinker Project | <|reference_start|>Annotation Style Guide for the Blinker Project: This annotation style guide was created by and for the Blinker project at the University of Pennsylvania. The Blinker project was so named after the ``bilingual linker'' GUI, which was created to enable bilingual annotators to ``link'' word tokens that are mutual translations in parallel texts. The parallel text chosen for this project was the Bible, because it is probably the easiest text to obtain in electronic form in multiple languages. The languages involved were English and French, because, of the languages with which the project co-ordinator was familiar, these were the two for which a sufficient number of annotators was likely to be found.<|reference_end|> | arxiv | @article{melamed1998annotation,
title={Annotation Style Guide for the Blinker Project},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9805004},
year={1998},
number={IRCS TR #98-06},
archivePrefix={arXiv},
eprint={cmp-lg/9805004},
primaryClass={cmp-lg cs.CL}
} | melamed1998annotation |
arxiv-669119 | cmp-lg/9805005 | Manual Annotation of Translational Equivalence: The Blinker Project | <|reference_start|>Manual Annotation of Translational Equivalence: The Blinker Project: Bilingual annotators were paid to link roughly sixteen thousand corresponding words between on-line versions of the Bible in modern French and modern English. These annotations are freely available to the research community from http://www.cis.upenn.edu/~melamed . The annotations can be used for several purposes. First, they can be used as a standard data set for developing and testing translation lexicons and statistical translation models. Second, researchers in lexical semantics will be able to mine the annotations for insights about cross-linguistic lexicalization patterns. Third, the annotations can be used in research into certain recently proposed methods for monolingual word-sense disambiguation. This paper describes the annotated texts, the specially-designed annotation tool, and the strategies employed to increase the consistency of the annotations. The annotation process was repeated five times by different annotators. Inter-annotator agreement rates indicate that the annotations are reasonably reliable and that the method is easy to replicate.<|reference_end|> | arxiv | @article{melamed1998manual,
title={Manual Annotation of Translational Equivalence: The Blinker Project},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9805005},
year={1998},
number={IRCS TR #98-07},
archivePrefix={arXiv},
eprint={cmp-lg/9805005},
primaryClass={cmp-lg cs.CL}
} | melamed1998manual |
arxiv-669120 | cmp-lg/9805006 | Word-to-Word Models of Translational Equivalence | <|reference_start|>Word-to-Word Models of Translational Equivalence: Parallel texts (bitexts) have properties that distinguish them from other kinds of parallel data. First, most words translate to only one other word. Second, bitext correspondence is noisy. This article presents methods for biasing statistical translation models to reflect these properties. Analysis of the expected behavior of these biases in the presence of sparse data predicts that they will result in more accurate models. The prediction is confirmed by evaluation with respect to a gold standard -- translation models that are biased in this fashion are significantly more accurate than a baseline knowledge-poor model. This article also shows how a statistical translation model can take advantage of various kinds of pre-existing knowledge that might be available about particular language pairs. Even the simplest kinds of language-specific knowledge, such as the distinction between content words and function words, is shown to reliably boost translation model performance on some tasks. Statistical models that are informed by pre-existing knowledge about the model domain combine the best of both the rationalist and empiricist traditions.<|reference_end|> | arxiv | @article{melamed1998word-to-word,
title={Word-to-Word Models of Translational Equivalence},
author={I. Dan Melamed (University of Pennsylvania)},
journal={arXiv preprint arXiv:cmp-lg/9805006},
year={1998},
number={IRCS TR #98-08},
archivePrefix={arXiv},
eprint={cmp-lg/9805006},
primaryClass={cmp-lg cs.CL}
} | melamed1998word-to-word |
arxiv-669121 | cmp-lg/9805007 | Parsing Inside-Out | <|reference_start|>Parsing Inside-Out: The inside-outside probabilities are typically used for reestimating Probabilistic Context Free Grammars (PCFGs), just as the forward-backward probabilities are typically used for reestimating HMMs. I show several novel uses, including improving parser accuracy by matching parsing algorithms to evaluation criteria; speeding up DOP parsing by 500 times; and 30 times faster PCFG thresholding at a given accuracy level. I also give an elegant, state-of-the-art grammar formalism, which can be used to compute inside-outside probabilities; and a parser description formalism, which makes it easy to derive inside-outside formulas and many others.<|reference_end|> | arxiv | @article{goodman1998parsing,
title={Parsing Inside-Out},
author={Joshua Goodman (Harvard University)},
journal={arXiv preprint arXiv:cmp-lg/9805007},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805007},
primaryClass={cmp-lg cs.CL}
} | goodman1998parsing |
arxiv-669122 | cmp-lg/9805008 | A Descriptive Characterization of Tree-Adjoining Languages (Full Version) | <|reference_start|>A Descriptive Characterization of Tree-Adjoining Languages (Full Version): Since the early Sixties and Seventies it has been known that the regular and context-free languages are characterized by definability in the monadic second-order theory of certain structures. More recently, these descriptive characterizations have been used to obtain complexity results for constraint- and principle-based theories of syntax and to provide a uniform model-theoretic framework for exploring the relationship between theories expressed in disparate formal terms. These results have been limited, to an extent, by the lack of descriptive characterizations of language classes beyond the context-free. Recently, we have shown that tree-adjoining languages (in a mildly generalized form) can be characterized by recognition by automata operating on three-dimensional tree manifolds, a three-dimensional analog of trees. In this paper, we exploit these automata-theoretic results to obtain a characterization of the tree-adjoining languages by definability in the monadic second-order theory of these three-dimensional tree manifolds. This not only opens the way to extending the tools of model-theoretic syntax to the level of TALs, but provides a highly flexible mechanism for defining TAGs in terms of logical constraints. This is the full version of a paper to appear in the proceedings of COLING-ACL'98 as a project note.<|reference_end|> | arxiv | @article{rogers1998a,
title={A Descriptive Characterization of Tree-Adjoining Languages (Full
Version)},
author={James Rogers (University of Central Florida)},
journal={arXiv preprint arXiv:cmp-lg/9805008},
year={1998},
number={CS-TR-98-01},
archivePrefix={arXiv},
eprint={cmp-lg/9805008},
primaryClass={cmp-lg cs.CL}
} | rogers1998a |
arxiv-669123 | cmp-lg/9805009 | Discovery of Linguistic Relations Using Lexical Attraction | <|reference_start|>Discovery of Linguistic Relations Using Lexical Attraction: This work has been motivated by two long term goals: to understand how humans learn language and to build programs that can understand language. Using a representation that makes the relevant features explicit is a prerequisite for successful learning and understanding. Therefore, I chose to represent relations between individual words explicitly in my model. Lexical attraction is defined as the likelihood of such relations. I introduce a new class of probabilistic language models named lexical attraction models which can represent long distance relations between words and I formalize this new class of models using information theory. Within the framework of lexical attraction, I developed an unsupervised language acquisition program that learns to identify linguistic relations in a given sentence. The only explicitly represented linguistic knowledge in the program is lexical attraction. There is no initial grammar or lexicon built in and the only input is raw text. Learning and processing are interdigitated. The processor uses the regularities detected by the learner to impose structure on the input. This structure enables the learner to detect higher level regularities. Using this bootstrapping procedure, the program was trained on 100 million words of Associated Press material and was able to achieve 60% precision and 50% recall in finding relations between content-words. Using knowledge of lexical attraction, the program can identify the correct relations in syntactically ambiguous sentences such as ``I saw the Statue of Liberty flying over New York.''<|reference_end|> | arxiv | @article{yuret1998discovery,
title={Discovery of Linguistic Relations Using Lexical Attraction},
author={Deniz Yuret (MIT Artificial Intelligence Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9805009},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805009},
primaryClass={cmp-lg cs.CL}
} | yuret1998discovery |
arxiv-669124 | cmp-lg/9805010 | Integrating Text Plans for Conciseness and Coherence | <|reference_start|>Integrating Text Plans for Conciseness and Coherence: Our experience with a critiquing system shows that when the system detects problems with the user's performance, multiple critiques are often produced. Analysis of a corpus of actual critiques revealed that even though each individual critique is concise and coherent, the set of critiques as a whole may exhibit several problems that detract from conciseness and coherence, and consequently assimilation. Thus a text planner was needed that could integrate the text plans for individual communicative goals to produce an overall text plan representing a concise, coherent message. This paper presents our general rule-based system for accomplishing this task. The system takes as input a \emph{set} of individual text plans represented as RST-style trees, and produces a smaller set of more complex trees representing integrated messages that still achieve the multiple communicative goals of the individual text plans. Domain-independent rules are used to capture strategies across domains, while the facility for addition of domain-dependent rules enables the system to be tuned to the requirements of a particular domain. The system has been tested on a corpus of critiques in the domain of trauma care.<|reference_end|> | arxiv | @article{harvey1998integrating,
title={Integrating Text Plans for Conciseness and Coherence},
author={Terrence Harvey and Sandra Carberry (Department of Computer Science,
University of Delaware)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL '98)},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805010},
primaryClass={cmp-lg cs.CL}
} | harvey1998integrating |
arxiv-669125 | cmp-lg/9805011 | Automatic summarising: factors and directions | <|reference_start|>Automatic summarising: factors and directions: This position paper suggests that progress with automatic summarising demands a better research methodology and a carefully focussed research strategy. In order to develop effective procedures it is necessary to identify and respond to the context factors, i.e. input, purpose, and output factors, that bear on summarising and its evaluation. The paper analyses and illustrates these factors and their implications for evaluation. It then argues that this analysis, together with the state of the art and the intrinsic difficulty of summarising, imply a nearer-term strategy concentrating on shallow, but not surface, text analysis and on indicative summarising. This is illustrated with current work, from which a potentially productive research programme can be developed.<|reference_end|> | arxiv | @article{jones1998automatic,
title={Automatic summarising: factors and directions},
author={Karen Sparck Jones (Computer Laboratory, University of Cambridge)},
journal={arXiv preprint arXiv:cmp-lg/9805011},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805011},
primaryClass={cmp-lg cs.CL}
} | jones1998automatic |
arxiv-669126 | cmp-lg/9805012 | Recognizing Syntactic Errors in the Writing of Second Language Learners | <|reference_start|>Recognizing Syntactic Errors in the Writing of Second Language Learners: This paper reports on the recognition component of an intelligent tutoring system that is designed to help foreign language speakers learn standard English. The system models the grammar of the learner, with this instantiation of the system tailored to signers of American Sign Language (ASL). We discuss the theoretical motivations for the system, various difficulties that have been encountered in the implementation, as well as the methods we have used to overcome these problems. Our method of capturing ungrammaticalities involves using mal-rules (also called 'error productions'). However, the straightforward addition of some mal-rules causes significant performance problems with the parser. For instance, the ASL population has a strong tendency to drop pronouns and the auxiliary verb `to be'. Being able to account for these as sentences results in an explosion in the number of possible parses for each sentence. This explosion, left unchecked, greatly hampers the performance of the system. We discuss how this is handled by taking into account expectations from the specific population (some of which are captured in our unique user model). The different representations of lexical items at various points in the acquisition process are modeled by using mal-rules, which obviates the need for multiple lexicons. The grammar is evaluated on its ability to correctly diagnose agreement problems in actual sentences produced by ASL native speakers.<|reference_end|> | arxiv | @article{schneider1998recognizing,
title={Recognizing Syntactic Errors in the Writing of Second Language Learners},
author={David A. Schneider and Kathleen F. McCoy (University of Delaware)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL '98)},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9805012},
primaryClass={cmp-lg cs.CL}
} | schneider1998recognizing |
arxiv-669127 | cmp-lg/9806001 | Learning Correlations between Linguistic Indicators and Semantic Constraints: Reuse of Context-Dependent Descriptions of Entities | <|reference_start|>Learning Correlations between Linguistic Indicators and Semantic Constraints: Reuse of Context-Dependent Descriptions of Entities: This paper presents the results of a study on the semantic constraints imposed on lexical choice by certain contextual indicators. We show how such indicators are computed and how correlations between them and the choice of a noun phrase description of a named entity can be automatically established using supervised learning. Based on this correlation, we have developed a technique for automatic lexical choice of descriptions of entities in text generation. We discuss the underlying relationship between the pragmatics of choosing an appropriate description that serves a specific purpose in the automatically generated text and the semantics of the description itself. We present our work in the framework of the more general concept of reuse of linguistic structures that are automatically extracted from large corpora. We present a formal evaluation of our approach and we conclude with some thoughts on potential applications of our method.<|reference_end|> | arxiv | @article{radev1998learning,
title={Learning Correlations between Linguistic Indicators and Semantic
Constraints: Reuse of Context-Dependent Descriptions of Entities},
author={Dragomir R. Radev (Columbia University)},
journal={arXiv preprint arXiv:cmp-lg/9806001},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806001},
primaryClass={cmp-lg cs.CL}
} | radev1998learning |
arxiv-669128 | cmp-lg/9806002 | Computing Dialogue Acts from Features with Transformation-Based Learning | <|reference_start|>Computing Dialogue Acts from Features with Transformation-Based Learning: To interpret natural language at the discourse level, it is very useful to accurately recognize dialogue acts, such as SUGGEST, in identifying speaker intentions. Our research explores the utility of a machine learning method called Transformation-Based Learning (TBL) in computing dialogue acts, because TBL has a number of advantages over alternative approaches for this application. We have identified some extensions to TBL that are necessary in order to address the limitations of the original algorithm and the particular demands of discourse processing. We use a Monte Carlo strategy to increase the applicability of the TBL method, and we select features of utterances that can be used as input to improve the performance of TBL. Our system is currently being tested on the VerbMobil corpora of spoken dialogues, producing promising preliminary results.<|reference_end|> | arxiv | @article{samuel1998computing,
title={Computing Dialogue Acts from Features with Transformation-Based Learning},
author={Ken Samuel, Sandra Carberry, and K. Vijay-Shanker (Department of
Computer Science, University of Delaware)},
journal={Applying Machine Learning to Discourse Processing: Papers from the
1998 AAAI Spring Symposium},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806002},
primaryClass={cmp-lg cs.CL}
} | samuel1998computing |
arxiv-669129 | cmp-lg/9806003 | Lazy Transformation-Based Learning | <|reference_start|>Lazy Transformation-Based Learning: We introduce a significant improvement for a relatively new machine learning method called Transformation-Based Learning. By applying a Monte Carlo strategy to randomly sample from the space of rules, rather than exhaustively analyzing all possible rules, we drastically reduce the memory and time costs of the algorithm, without compromising accuracy on unseen data. This enables Transformation- Based Learning to apply to a wider range of domains, as it can effectively consider a larger number of different features and feature interactions in the data. In addition, the Monte Carlo improvement decreases the labor demands on the human developer, who no longer needs to develop a minimal set of rule templates to maintain tractability.<|reference_end|> | arxiv | @article{samuel1998lazy,
title={Lazy Transformation-Based Learning},
author={Ken Samuel (Department of Computer Science, University of Delaware)},
journal={Proceedings of the 11th International Florida Artificial
Intelligence Research Symposium Conference},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806003},
primaryClass={cmp-lg cs.CL}
} | samuel1998lazy |
arxiv-669130 | cmp-lg/9806004 | Rationality, Cooperation and Conversational Implicature | <|reference_start|>Rationality, Cooperation and Conversational Implicature: Conversational implicatures are usually described as being licensed by the disobeying or flouting of a Principle of Cooperation. However, the specification of this principle has proved computationally elusive. In this paper we suggest that a more useful concept is rationality. Such a concept can be specified explicitely in planning terms and we argue that speakers perform utterances as part of the optimal plan for their particular communicative goals. Such an assumption can be used by the hearer to infer conversational implicatures implicit in the speaker's utterance.<|reference_end|> | arxiv | @article{lee1998rationality,,
title={Rationality, Cooperation and Conversational Implicature},
author={Mark Lee (University of Sheffield)},
journal={arXiv preprint arXiv:cmp-lg/9806004},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806004},
primaryClass={cmp-lg cs.CL}
} | lee1998rationality, |
arxiv-669131 | cmp-lg/9806005 | Eliminating deceptions and mistaken belief to infer conversational implicature | <|reference_start|>Eliminating deceptions and mistaken belief to infer conversational implicature: Conversational implicatures are usually described as being licensed by the disobeying or flouting of some principle by the speaker in cooperative dialogue. However, such work has failed to distinguish cases of the speaker flouting such a principle from cases where the speaker is either deceptive or holds a mistaken belief. In this paper, we demonstrate how the three different cases can be distinguished in terms of the beliefs ascribed to the speaker of the utterance. We argue that in the act of distinguishing the speaker's intention and ascribing such beliefs, the intended inference can be made by the hearer. This theory is implemented in ViewGen, a pre-existing belief modelling system used in a medical counselling domain.<|reference_end|> | arxiv | @article{lee1998eliminating,
title={Eliminating deceptions and mistaken belief to infer conversational
implicature},
author={Mark Lee and Yorick Wilks (University of Sheffield)},
journal={arXiv preprint arXiv:cmp-lg/9806005},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806005},
primaryClass={cmp-lg cs.CL}
} | lee1998eliminating |
arxiv-669132 | cmp-lg/9806006 | Dialogue Act Tagging with Transformation-Based Learning | <|reference_start|>Dialogue Act Tagging with Transformation-Based Learning: For the task of recognizing dialogue acts, we are applying the Transformation-Based Learning (TBL) machine learning algorithm. To circumvent a sparse data problem, we extract values of well-motivated features of utterances, such as speaker direction, punctuation marks, and a new feature, called dialogue act cues, which we find to be more effective than cue phrases and word n-grams in practice. We present strategies for constructing a set of dialogue act cues automatically by minimizing the entropy of the distribution of dialogue acts in a training corpus, filtering out irrelevant dialogue act cues, and clustering semantically-related words. In addition, to address limitations of TBL, we introduce a Monte Carlo strategy for training efficiently and a committee method for computing confidence measures. These ideas are combined in our working implementation, which labels held-out data as accurately as any other reported system for the dialogue act tagging task.<|reference_end|> | arxiv | @article{samuel1998dialogue,
title={Dialogue Act Tagging with Transformation-Based Learning},
author={Ken Samuel, Sandra Carberry, and K. Vijay-Shanker (Department of
Computer and Information Sciences, University of Delaware)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL '98)},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806006},
primaryClass={cmp-lg cs.CL}
} | samuel1998dialogue |
arxiv-669133 | cmp-lg/9806007 | An Investigation of Transformation-Based Learning in Discourse | <|reference_start|>An Investigation of Transformation-Based Learning in Discourse: This paper presents results from the first attempt to apply Transformation-Based Learning to a discourse-level Natural Language Processing task. To address two limitations of the standard algorithm, we developed a Monte Carlo version of Transformation-Based Learning to make the method tractable for a wider range of problems without degradation in accuracy, and we devised a committee method for assigning confidence measures to tags produced by Transformation-Based Learning. The paper describes these advances, presents experimental evidence that Transformation-Based Learning is as effective as alternative approaches (such as Decision Trees and N-Grams) for a discourse task called Dialogue Act Tagging, and argues that Transformation-Based Learning has desirable features that make it particularly appealing for the Dialogue Act Tagging task.<|reference_end|> | arxiv | @article{samuel1998an,
title={An Investigation of Transformation-Based Learning in Discourse},
author={Ken Samuel, Sandra Carberry, and K. Vijay-Shanker (Department of
Computer and Information Sciences, University of Delaware)},
journal={Machine Learning: Proceedings of the 15th International Conference},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806007},
primaryClass={cmp-lg cs.CL}
} | samuel1998an |
arxiv-669134 | cmp-lg/9806008 | Unlimited Vocabulary Grapheme to Phoneme Conversion for Korean TTS | <|reference_start|>Unlimited Vocabulary Grapheme to Phoneme Conversion for Korean TTS: This paper describes a grapheme-to-phoneme conversion method using phoneme connectivity and CCV conversion rules. The method consists of mainly four modules including morpheme normalization, phrase-break detection, morpheme to phoneme conversion and phoneme connectivity check. The morpheme normalization is to replace non-Korean symbols into standard Korean graphemes. The phrase-break detector assigns phrase breaks using part-of-speech (POS) information. In the morpheme-to-phoneme conversion module, each morpheme in the phrase is converted into phonetic patterns by looking up the morpheme phonetic pattern dictionary which contains candidate phonological changes in boundaries of the morphemes. Graphemes within a morpheme are grouped into CCV patterns and converted into phonemes by the CCV conversion rules. The phoneme connectivity table supports grammaticality checking of the adjacent two phonetic morphemes. In the experiments with a corpus of 4,973 sentences, we achieved 99.9% of the grapheme-to-phoneme conversion performance and 97.5% of the sentence conversion performance. The full Korean TTS system is now being implemented using this conversion method.<|reference_end|> | arxiv | @article{kim1998unlimited,
title={Unlimited Vocabulary Grapheme to Phoneme Conversion for Korean TTS},
author={Byeongchang Kim, WonIl Lee, Geunbae Lee and Jong-Hyeok Lee (POSTECH,
Korea)},
journal={arXiv preprint arXiv:cmp-lg/9806008},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806008},
primaryClass={cmp-lg cs.CL}
} | kim1998unlimited |
arxiv-669135 | cmp-lg/9806009 | Methods and Tools for Building the Catalan WordNet | <|reference_start|>Methods and Tools for Building the Catalan WordNet: In this paper we introduce the methodology used and the basic phases we followed to develop the Catalan WordNet, and shich lexical resources have been employed in its building. This methodology, as well as the tools we made use of, have been thought in a general way so that they could be applied to any other language.<|reference_end|> | arxiv | @article{benitez1998methods,
title={Methods and Tools for Building the Catalan WordNet},
author={Laura Benitez, Sergi Cervell, Gerard Escudero, Monica Lopez, German
Rigau & Mariona Taule (Natural Language Processing Group of Universitat
Politecnica de Catalunya and Universitat de Barcelona)},
journal={arXiv preprint arXiv:cmp-lg/9806009},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806009},
primaryClass={cmp-lg cs.CL}
} | benitez1998methods |
arxiv-669136 | cmp-lg/9806010 | Towards a single proposal is spelling correction | <|reference_start|>Towards a single proposal is spelling correction: The study presented here relies on the integrated use of different kinds of knowledge in order to improve first-guess accuracy in non-word context-sensitive correction for general unrestricted texts. State of the art spelling correction systems, e.g. ispell, apart from detecting spelling errors, also assist the user by offering a set of candidate corrections that are close to the misspelled word. Based on the correction proposals of ispell, we built several guessers, which were combined in different ways. Firstly, we evaluated all possibilities and selected the best ones in a corpus with artificially generated typing errors. Secondly, the best combinations were tested on texts with genuine spelling errors. The results for the latter suggest that we can expect automatic non-word correction for all the errors in a free running text with 80% precision and a single proposal 98% of the times (1.02 proposals on average).<|reference_end|> | arxiv | @article{agirre1998towards,
title={Towards a single proposal is spelling correction},
author={E. Agirre, K. Gojenola, and K. Sarasola},
journal={arXiv preprint arXiv:cmp-lg/9806010},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806010},
primaryClass={cmp-lg cs.CL}
} | agirre1998towards |
arxiv-669137 | cmp-lg/9806011 | A Memory-Based Approach to Learning Shallow Natural Language Patterns | <|reference_start|>A Memory-Based Approach to Learning Shallow Natural Language Patterns: Recognizing shallow linguistic patterns, such as basic syntactic relationships between words, is a common task in applied natural language and text processing. The common practice for approaching this task is by tedious manual definition of possible pattern structures, often in the form of regular expressions or finite automata. This paper presents a novel memory-based learning method that recognizes shallow patterns in new text based on a bracketed training corpus. The training data are stored as-is, in efficient suffix-tree data structures. Generalization is performed on-line at recognition time by comparing subsequences of the new text to positive and negative evidence in the corpus. This way, no information in the training is lost, as can happen in other learning systems that construct a single generalized model at the time of training. The paper presents experimental results for recognizing noun phrase, subject-verb and verb-object patterns in English. Since the learning approach enables easy porting to new domains, we plan to apply it to syntactic patterns in other languages and to sub-language patterns for information extraction.<|reference_end|> | arxiv | @article{argamon1998a,
title={A Memory-Based Approach to Learning Shallow Natural Language Patterns},
author={Shlomo Argamon, Ido Dagan, Yuval Krymolowski},
journal={arXiv preprint arXiv:cmp-lg/9806011},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806011},
primaryClass={cmp-lg cs.CL}
} | argamon1998a |
arxiv-669138 | cmp-lg/9806012 | Bayesian Stratified Sampling to Assess Corpus Utility | <|reference_start|>Bayesian Stratified Sampling to Assess Corpus Utility: This paper describes a method for asking statistical questions about a large text corpus. We exemplify the method by addressing the question, "What percentage of Federal Register documents are real documents, of possible interest to a text researcher or analyst?" We estimate an answer to this question by evaluating 200 documents selected from a corpus of 45,820 Federal Register documents. Stratified sampling is used to reduce the sampling uncertainty of the estimate from over 3100 documents to fewer than 1000. The stratification is based on observed characteristics of real documents, while the sampling procedure incorporates a Bayesian version of Neyman allocation. A possible application of the method is to establish baseline statistics used to estimate recall rates for information retrieval systems.<|reference_end|> | arxiv | @article{hochberg1998bayesian,
title={Bayesian Stratified Sampling to Assess Corpus Utility},
author={Judith Hochberg, Clint Scovel, Timothy Thomas, Sam Hall},
journal={arXiv preprint arXiv:cmp-lg/9806012},
year={1998},
number={98-1922},
archivePrefix={arXiv},
eprint={cmp-lg/9806012},
primaryClass={cmp-lg cs.CL}
} | hochberg1998bayesian |
arxiv-669139 | cmp-lg/9806013 | Can Subcategorisation Probabilities Help a Statistical Parser? | <|reference_start|>Can Subcategorisation Probabilities Help a Statistical Parser?: Research into the automatic acquisition of lexical information from corpora is starting to produce large-scale computational lexicons containing data on the relative frequencies of subcategorisation alternatives for individual verbal predicates. However, the empirical question of whether this type of frequency information can in practice improve the accuracy of a statistical parser has not yet been answered. In this paper we describe an experiment with a wide-coverage statistical grammar and parser for English and subcategorisation frequencies acquired from ten million words of text which shows that this information can significantly improve parse accuracy.<|reference_end|> | arxiv | @article{carroll1998can,
title={Can Subcategorisation Probabilities Help a Statistical Parser?},
author={John Carroll, Guido Minnen (University of Sussex), Ted Briscoe
(Cambridge University)},
journal={6th Workshop on Very Large Corpora, Montreal, Canada, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806013},
primaryClass={cmp-lg cs.CL}
} | carroll1998can |
arxiv-669140 | cmp-lg/9806014 | Word Sense Disambiguation using Optimised Combinations of Knowledge Sources | <|reference_start|>Word Sense Disambiguation using Optimised Combinations of Knowledge Sources: Word sense disambiguation algorithms, with few exceptions, have made use of only one lexical knowledge source. We describe a system which performs unrestricted word sense disambiguation (on all content words in free text) by combining different knowledge sources: semantic preferences, dictionary definitions and subject/domain codes along with part-of-speech tags. The usefulness of these sources is optimised by means of a learning algorithm. We also describe the creation of a new sense tagged corpus by combining existing resources. Tested accuracy of our approach on this corpus exceeds 92%, demonstrating the viability of all-word disambiguation rather than restricting oneself to a small sample.<|reference_end|> | arxiv | @article{wilks1998word,
title={Word Sense Disambiguation using Optimised Combinations of Knowledge
Sources},
author={Yorick Wilks and Mark Stevenson},
journal={arXiv preprint arXiv:cmp-lg/9806014},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806014},
primaryClass={cmp-lg cs.CL}
} | wilks1998word |
arxiv-669141 | cmp-lg/9806015 | Building Accurate Semantic Taxonomies from Monolingual MRDs | <|reference_start|>Building Accurate Semantic Taxonomies from Monolingual MRDs: This paper presents a method that combines a set of unsupervised algorithms in order to accurately build large taxonomies from any machine-readable dictionary (MRD). Our aim is to profit from conventional MRDs, with no explicit semantic coding. We propose a system that 1) performs fully automatic exraction of taxonomic links from MRD entries and 2) ranks the extracted relations in a way that selective manual refinement is allowed. Tested accuracy can reach around 100% depending on the degree of coverage selected, showing that taxonomy building is not limited to structured dictionaries such as LDOCE.<|reference_end|> | arxiv | @article{rigau1998building,
title={Building Accurate Semantic Taxonomies from Monolingual MRDs},
author={German Rigau & Horacio Rodriguez (Departament de LSI, Universitat
Politecnica de Catalunya) & Eneko Agirre (Lengoia eta Informatikoak saila,
Euskal Erriko Universitatea)},
journal={arXiv preprint arXiv:cmp-lg/9806015},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806015},
primaryClass={cmp-lg cs.CL}
} | rigau1998building |
arxiv-669142 | cmp-lg/9806016 | Using WordNet for Building WordNets | <|reference_start|>Using WordNet for Building WordNets: This paper summarises a set of methodologies and techniques for the fast construction of multilingual WordNets. The English WordNet is used in this approach as a backbone for Catalan and Spanish WordNets and as a lexical knowledge resource for several subtasks.<|reference_end|> | arxiv | @article{farreres1998using,
title={Using WordNet for Building WordNets},
author={Xavier Farreres, German Rigau & Horacio Rodriguez (Departament de
Llenguatges i Sistemes Informatics of the Universitat Politecnica de
Catalunya)},
journal={arXiv preprint arXiv:cmp-lg/9806016},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806016},
primaryClass={cmp-lg cs.CL}
} | farreres1998using |
arxiv-669143 | cmp-lg/9806017 | Anchoring a Lexicalized Tree-Adjoining Grammar for Discourse | <|reference_start|>Anchoring a Lexicalized Tree-Adjoining Grammar for Discourse: We here explore a ``fully'' lexicalized Tree-Adjoining Grammar for discourse that takes the basic elements of a (monologic) discourse to be not simply clauses, but larger structures that are anchored on variously realized discourse cues. This link with intra-sentential grammar suggests an account for different patterns of discourse cues, while the different structures and operations suggest three separate sources for elements of discourse meaning: (1) a compositional semantics tied to the basic trees and operations; (2) a presuppositional semantics carried by cue phrases that freely adjoin to trees; and (3) general inference, that draws additional, defeasible conclusions that flesh out what is conveyed compositionally.<|reference_end|> | arxiv | @article{webber1998anchoring,
title={Anchoring a Lexicalized Tree-Adjoining Grammar for Discourse},
author={Bonnie Lynn Webber, Aravind K. Joshi (University of Pennsylvania)},
journal={Proceedings of COLING-ACL'98 Workshop on Discourse Relations and
Discourse Markers. (Reproduced with permission of the Universite de Montreal},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806017},
primaryClass={cmp-lg cs.CL}
} | webber1998anchoring |
arxiv-669144 | cmp-lg/9806018 | Never Look Back: An Alternative to Centering | <|reference_start|>Never Look Back: An Alternative to Centering: I propose a model for determining the hearer's attentional state which depends solely on a list of salient discourse entities (S-list). The ordering among the elements of the S-list covers also the function of the backward-looking center in the centering model. The ranking criteria for the S-list are based on the distinction between hearer-old and hearer-new discourse entities and incorporate preferences for inter- and intra-sentential anaphora. The model is the basis for an algorithm which operates incrementally, word by word.<|reference_end|> | arxiv | @article{strube1998never,
title={Never Look Back: An Alternative to Centering},
author={Michael Strube (IRCS, University of Pennsylvania)},
journal={Proceedings of COLING-ACL '98},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806018},
primaryClass={cmp-lg cs.CL}
} | strube1998never |
arxiv-669145 | cmp-lg/9806019 | An Empirical Investigation of Proposals in Collaborative Dialogues | <|reference_start|>An Empirical Investigation of Proposals in Collaborative Dialogues: We describe a corpus-based investigation of proposals in dialogue. First, we describe our DRI compliant coding scheme and report our inter-coder reliability results. Next, we test several hypotheses about what constitutes a well-formed proposal.<|reference_end|> | arxiv | @article{di eugenio1998an,
title={An Empirical Investigation of Proposals in Collaborative Dialogues},
author={Barbara Di Eugenio, Pamela W. Jordan, Johanna D. Moore and Richmond H.
Thomason (University of Pittsburgh)},
journal={Proceedings of COLING-ACL 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806019},
primaryClass={cmp-lg cs.CL}
} | di eugenio1998an |
arxiv-669146 | cmp-lg/9806020 | Textual Economy through Close Coupling of Syntax and Semantics | <|reference_start|>Textual Economy through Close Coupling of Syntax and Semantics: We focus on the production of efficient descriptions of objects, actions and events. We define a type of efficiency, textual economy, that exploits the hearer's recognition of inferential links to material elsewhere within a sentence. Textual economy leads to efficient descriptions because the material that supports such inferences has been included to satisfy independent communicative goals, and is therefore overloaded in Pollack's sense. We argue that achieving textual economy imposes strong requirements on the representation and reasoning used in generating sentences. The representation must support the generator's simultaneous consideration of syntax and semantics. Reasoning must enable the generator to assess quickly and reliably at any stage how the hearer will interpret the current sentence, with its (incomplete) syntax and semantics. We show that these representational and reasoning requirements are met in the SPUD system for sentence planning and realization.<|reference_end|> | arxiv | @article{stone1998textual,
title={Textual Economy through Close Coupling of Syntax and Semantics},
author={Matthew Stone, Bonnie Webber (University of Pennsylvania)},
journal={Proceedings 1998 Int'l Workshop on Natural Language Generation,
Niagara-on-the-Lake, Canada, August 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9806020},
primaryClass={cmp-lg cs.CL}
} | stone1998textual |
arxiv-669147 | cmp-lg/9807001 | Evaluating a Focus-Based Approach to Anaphora Resolution | <|reference_start|>Evaluating a Focus-Based Approach to Anaphora Resolution: We present an approach to anaphora resolution based on a focusing algorithm, and implemented within an existing MUC (Message Understanding Conference) Information Extraction system, allowing quantitative evaluation against a substantial corpus of annotated real-world texts. Extensions to the basic focusing mechanism can be easily tested, resulting in refinements to the mechanism and resolution rules. Results are compared with the results of a simpler heuristic-based approach.<|reference_end|> | arxiv | @article{azzam1998evaluating,
title={Evaluating a Focus-Based Approach to Anaphora Resolution},
author={Saliha Azzam, Kevin Humphreys and Robert Gaizauskas (University of
Sheffield)},
journal={Proceedings of COLING-ACL '98},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807001},
primaryClass={cmp-lg cs.CL}
} | azzam1998evaluating |
arxiv-669148 | cmp-lg/9807002 | The Role of Verbs in Document Analysis | <|reference_start|>The Role of Verbs in Document Analysis: We present results of two methods for assessing the event profile of news articles as a function of verb type. The unique contribution of this research is the focus on the role of verbs, rather than nouns. Two algorithms are presented and evaluated, one of which is shown to accurately discriminate documents by type and semantic properties, i.e. the event profile. The initial method, using WordNet (Miller et al. 1990), produced multiple cross-classification of articles, primarily due to the bushy nature of the verb tree coupled with the sense disambiguation problem. Our second approach using English Verb Classes and Alternations (EVCA) Levin (1993) showed that monosemous categorization of the frequent verbs in WSJ made it possible to usefully discriminate documents. For example, our results show that articles in which communication verbs predominate tend to be opinion pieces, whereas articles with a high percentage of agreement verbs tend to be about mergers or legal cases. An evaluation is performed on the results using Kendall's Tau. We present convincing evidence for using verb semantic classes as a discriminant in document classification.<|reference_end|> | arxiv | @article{klavans1998the,
title={The Role of Verbs in Document Analysis},
author={Judith L. Klavans (Center for Research on Information Access, Columbia
University), Min-Yen Kan (Computer Science, Columbia University)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL '98), Montreal, Canada: Aug. 1998. pp. 680-686.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807002},
primaryClass={cmp-lg cs.CL}
} | klavans1998the |
arxiv-669149 | cmp-lg/9807003 | Centering in Dynamic Semantics | <|reference_start|>Centering in Dynamic Semantics: Centering theory posits a discourse center, a distinguished discourse entity that is the topic of a discourse. A simplified version of this theory is developed in a Dynamic Semantics framework. In the resulting system, the mechanism of center shift allows a simple, elegant analysis of a variety of phenomena involving sloppy identity in ellipsis and ``paycheck pronouns''.<|reference_end|> | arxiv | @article{hardt1998centering,
title={Centering in Dynamic Semantics},
author={Daniel Hardt},
journal={Proceedings of COLING 96, Copenhagen, Denmark},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807003},
primaryClass={cmp-lg cs.CL}
} | hardt1998centering |
arxiv-669150 | cmp-lg/9807004 | Word Clustering and Disambiguation Based on Co-occurrence Data | <|reference_start|>Word Clustering and Disambiguation Based on Co-occurrence Data: We address the problem of clustering words (or constructing a thesaurus) based on co-occurrence data, and using the acquired word classes to improve the accuracy of syntactic disambiguation. We view this problem as that of estimating a joint probability distribution specifying the joint probabilities of word pairs, such as noun verb pairs. We propose an efficient algorithm based on the Minimum Description Length (MDL) principle for estimating such a probability distribution. Our method is a natural extension of those proposed in (Brown et al 92) and (Li & Abe 96), and overcomes their drawbacks while retaining their advantages. We then combined this clustering method with the disambiguation method of (Li & Abe 95) to derive a disambiguation method that makes use of both automatically constructed thesauruses and a hand-made thesaurus. The overall disambiguation accuracy achieved by our method is 85.2%, which compares favorably against the accuracy (82.4%) obtained by the state-of-the-art disambiguation method of (Brill & Resnik 94).<|reference_end|> | arxiv | @article{li1998word,
title={Word Clustering and Disambiguation Based on Co-occurrence Data},
author={Hang Li, Naoki Abe (NEC Corporation)},
journal={arXiv preprint arXiv:cmp-lg/9807004},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807004},
primaryClass={cmp-lg cs.CL}
} | li1998word |
arxiv-669151 | cmp-lg/9807005 | Graph Interpolation Grammars as Context-Free Automata | <|reference_start|>Graph Interpolation Grammars as Context-Free Automata: A derivation step in a Graph Interpolation Grammar has the effect of scanning an input token. This feature, which aims at emulating the incrementality of the natural parser, restricts the formal power of GIGs. This contrasts with the fact that the derivation mechanism involves a context-sensitive device similar to tree adjunction in TAGs. The combined effect of input-driven derivation and restricted context-sensitiveness would be conceivably unfortunate if it turned out that Graph Interpolation Languages did not subsume Context Free Languages while being partially context-sensitive. This report sets about examining relations between CFGs and GIGs, and shows that GILs are a proper superclass of CFLs. It also brings out a strong equivalence between CFGs and GIGs for the class of CFLs. Thus, it lays the basis for meaningfully investigating the amount of context-sensitiveness supported by GIGs, but leaves this investigation for further research.<|reference_end|> | arxiv | @article{larcheveque1998graph,
title={Graph Interpolation Grammars as Context-Free Automata},
author={John Larcheveque (INRIA)},
journal={arXiv preprint arXiv:cmp-lg/9807005},
year={1998},
number={RR-3456},
archivePrefix={arXiv},
eprint={cmp-lg/9807005},
primaryClass={cmp-lg cs.CL}
} | larcheveque1998graph |
arxiv-669152 | cmp-lg/9807006 | A Maximum-Entropy Partial Parser for Unrestricted Text | <|reference_start|>A Maximum-Entropy Partial Parser for Unrestricted Text: This paper describes a partial parser that assigns syntactic structures to sequences of part-of-speech tags. The program uses the maximum entropy parameter estimation method, which allows a flexible combination of different knowledge sources: the hierarchical structure, parts of speech and phrasal categories. In effect, the parser goes beyond simple bracketing and recognises even fairly complex structures. We give accuracy figures for different applications of the parser.<|reference_end|> | arxiv | @article{skut1998a,
title={A Maximum-Entropy Partial Parser for Unrestricted Text},
author={Wojciech Skut and Thorsten Brants (Computational Linguistics,
Universitity of the Saarland, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9807006},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807006},
primaryClass={cmp-lg cs.CL}
} | skut1998a |
arxiv-669153 | cmp-lg/9807007 | Chunk Tagger - Statistical Recognition of Noun Phrases | <|reference_start|>Chunk Tagger - Statistical Recognition of Noun Phrases: We describe a stochastic approach to partial parsing, i.e., the recognition of syntactic structures of limited depth. The technique utilises Markov Models, but goes beyond usual bracketing approaches, since it is capable of recognising not only the boundaries, but also the internal structure and syntactic category of simple as well as complex NP's, PP's, AP's and adverbials. We compare tagging accuracy for different applications and encoding schemes.<|reference_end|> | arxiv | @article{skut1998chunk,
title={Chunk Tagger - Statistical Recognition of Noun Phrases},
author={Wojciech Skut and Thorsten Brants (Computational Linguistics,
Universitity of the Saarland, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9807007},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807007},
primaryClass={cmp-lg cs.CL}
} | skut1998chunk |
arxiv-669154 | cmp-lg/9807008 | A Linguistically Interpreted Corpus of German Newspaper Text | <|reference_start|>A Linguistically Interpreted Corpus of German Newspaper Text: In this paper, we report on the development of an annotation scheme and annotation tools for unrestricted German text. Our representation format is based on argument structure, but also permits the extraction of other kinds of representations. We discuss several methodological issues and the analysis of some phenomena. Additional focus is on the tools developed in our project and their applications.<|reference_end|> | arxiv | @article{skut1998a,
title={A Linguistically Interpreted Corpus of German Newspaper Text},
author={Wojciech Skut, Thorsten Brants, Brigitte Krenn, Hans Uszkoreit
(Computational Linguistics, Universitity of the Saarland, Germany)},
journal={arXiv preprint arXiv:cmp-lg/9807008},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807008},
primaryClass={cmp-lg cs.CL}
} | skut1998a |
arxiv-669155 | cmp-lg/9807009 | A Projection Architecture for Dependency Grammar and How it Compares to LFG | <|reference_start|>A Projection Architecture for Dependency Grammar and How it Compares to LFG: This paper explores commonalities and differences between \dachs, a variant of Dependency Grammar, and Lexical-Functional Grammar. \dachs\ is based on traditional linguistic insights, but on modern mathematical tools, aiming to integrate different knowledge systems (from syntax and semantics) via their coupling to an abstract syntactic primitive, the dependency relation. These knowledge systems correspond rather closely to projections in LFG. We will investigate commonalities arising from the usage of the projection approach in both theories, and point out differences due to the incompatible linguistic premises. The main difference to LFG lies in the motivation and status of the dimensions, and the information coded there. We will argue that LFG confounds different information in one projection, preventing it to achieve a good separation of alternatives and calling the motivation of the projection into question.<|reference_end|> | arxiv | @article{broeker1998a,
title={A Projection Architecture for Dependency Grammar and How it Compares to
LFG},
author={Norbert Broeker (IMS, Stuttgart University)},
journal={arXiv preprint arXiv:cmp-lg/9807009},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807009},
primaryClass={cmp-lg cs.CL}
} | broeker1998a |
arxiv-669156 | cmp-lg/9807010 | Automatically Creating Bilingual Lexicons for Machine Translation from Bilingual Text | <|reference_start|>Automatically Creating Bilingual Lexicons for Machine Translation from Bilingual Text: A method is presented for automatically augmenting the bilingual lexicon of an existing Machine Translation system, by extracting bilingual entries from aligned bilingual text. The proposed method only relies on the resources already available in the MT system itself. It is based on the use of bilingual lexical templates to match the terminal symbols in the parses of the aligned sentences.<|reference_end|> | arxiv | @article{turcato1998automatically,
title={Automatically Creating Bilingual Lexicons for Machine Translation from
Bilingual Text},
author={Davide Turcato (Simon Fraser University and TCC Communications,
Canada)},
journal={Proceedings of COLING-ACL'98},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807010},
primaryClass={cmp-lg cs.CL}
} | turcato1998automatically |
arxiv-669157 | cmp-lg/9807011 | Statistical Models for Unsupervised Prepositional Phrase Attachment | <|reference_start|>Statistical Models for Unsupervised Prepositional Phrase Attachment: We present several unsupervised statistical models for the prepositional phrase attachment task that approach the accuracy of the best supervised methods for this task. Our unsupervised approach uses a heuristic based on attachment proximity and trains from raw text that is annotated with only part-of-speech tags and morphological base forms, as opposed to attachment information. It is therefore less resource-intensive and more portable than previous corpus-based algorithms proposed for this task. We present results for prepositional phrase attachment in both English and Spanish.<|reference_end|> | arxiv | @article{ratnaparkhi1998statistical,
title={Statistical Models for Unsupervised Prepositional Phrase Attachment},
author={Adwait Ratnaparkhi (University of Pennsylvania)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL '98)},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807011},
primaryClass={cmp-lg cs.CL}
} | ratnaparkhi1998statistical |
arxiv-669158 | cmp-lg/9807012 | Character design for soccer commmentary | <|reference_start|>Character design for soccer commmentary: In this paper we present early work on an animated talking head commentary system called {\bf Byrne}\footnote{David Byrne is the lead singer of the Talking Heads.}. The goal of this project is to develop a system which can take the output from the RoboCup soccer simulator, and generate appropriate affective speech and facial expressions, based on the character's personality, emotional state, and the state of play. Here we describe a system which takes pre-analysed simulator output as input, and which generates text marked-up for use by a speech generator and a face animation system. We make heavy use of inter-system standards, so that future versions of Byrne will be able to take advantage of advances in the technologies that it incorporates.<|reference_end|> | arxiv | @article{binsted1998character,
title={Character design for soccer commmentary},
author={Kim Binsted (Sony Computer Science Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9807012},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807012},
primaryClass={cmp-lg cs.CL}
} | binsted1998character |
arxiv-669159 | cmp-lg/9807013 | Improving Data Driven Wordclass Tagging by System Combination | <|reference_start|>Improving Data Driven Wordclass Tagging by System Combination: In this paper we examine how the differences in modelling between different data driven systems performing the same NLP task can be exploited to yield a higher accuracy than the best individual system. We do this by means of an experiment involving the task of morpho-syntactic wordclass tagging. Four well-known tagger generators (Hidden Markov Model, Memory-Based, Transformation Rules and Maximum Entropy) are trained on the same corpus data. After comparison, their outputs are combined using several voting strategies and second stage classifiers. All combination taggers outperform their best component, with the best combination showing a 19.1% lower error rate than the best individual tagger.<|reference_end|> | arxiv | @article{van halteren1998improving,
title={Improving Data Driven Wordclass Tagging by System Combination},
author={Hans van Halteren (University of Nijmegen), Jakub Zavrel, Walter
Daelemans (Tilburg University)},
journal={Proceedings of the 17th International Conference on Computational
Linguistics (COLING-ACL'98)},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9807013},
primaryClass={cmp-lg cs.CL}
} | van halteren1998improving |
arxiv-669160 | cmp-lg/9808001 | An Empirical Evaluation of Probabilistic Lexicalized Tree Insertion Grammars | <|reference_start|>An Empirical Evaluation of Probabilistic Lexicalized Tree Insertion Grammars: We present an empirical study of the applicability of Probabilistic Lexicalized Tree Insertion Grammars (PLTIG), a lexicalized counterpart to Probabilistic Context-Free Grammars (PCFG), to problems in stochastic natural-language processing. Comparing the performance of PLTIGs with non-hierarchical N-gram models and PCFGs, we show that PLTIG combines the best aspects of both, with language modeling capability comparable to N-grams, and improved parsing performance over its non-lexicalized counterpart. Furthermore, training of PLTIGs displays faster convergence than PCFGs.<|reference_end|> | arxiv | @article{hwa1998an,
title={An Empirical Evaluation of Probabilistic Lexicalized Tree Insertion
Grammars},
author={Rebecca Hwa (Harvard University)},
journal={Proceedings of COLING-ACL'98},
year={1998},
number={TR-06-98},
archivePrefix={arXiv},
eprint={cmp-lg/9808001},
primaryClass={cmp-lg cs.CL}
} | hwa1998an |
arxiv-669161 | cmp-lg/9808002 | Indexing with WordNet synsets can improve Text Retrieval | <|reference_start|>Indexing with WordNet synsets can improve Text Retrieval: The classical, vector space model for text retrieval is shown to give better results (up to 29% better in our experiments) if WordNet synsets are chosen as the indexing space, instead of word forms. This result is obtained for a manually disambiguated test collection (of queries and documents) derived from the Semcor semantic concordance. The sensitivity of retrieval performance to (automatic) disambiguation errors when indexing documents is also measured. Finally, it is observed that if queries are not disambiguated, indexing by synsets performs (at best) only as good as standard word indexing.<|reference_end|> | arxiv | @article{gonzalo1998indexing,
title={Indexing with WordNet synsets can improve Text Retrieval},
author={Julio Gonzalo, Felisa Verdejo, Irina Chugur and Juan Cigarran (UNED,
Spain)},
journal={Proceedings of the COLING/ACL'98 Workshop on Usage of WordNet for
NLP, Montreal, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808002},
primaryClass={cmp-lg cs.CL}
} | gonzalo1998indexing |
arxiv-669162 | cmp-lg/9808003 | Parallel Strands: A Preliminary Investigation into Mining the Web for Bilingual Text | <|reference_start|>Parallel Strands: A Preliminary Investigation into Mining the Web for Bilingual Text: Parallel corpora are a valuable resource for machine translation, but at present their availability and utility is limited by genre- and domain-specificity, licensing restrictions, and the basic difficulty of locating parallel texts in all but the most dominant of the world's languages. A parallel corpus resource not yet explored is the World Wide Web, which hosts an abundance of pages in parallel translation, offering a potential solution to some of these problems and unique opportunities of its own. This paper presents the necessary first step in that exploration: a method for automatically finding parallel translated documents on the Web. The technique is conceptually simple, fully language independent, and scalable, and preliminary evaluation results indicate that the method may be accurate enough to apply without human intervention.<|reference_end|> | arxiv | @article{resnik1998parallel,
title={Parallel Strands: A Preliminary Investigation into Mining the Web for
Bilingual Text},
author={Philip Resnik (University of Maryland)},
journal={Proceedings of AMTA-98},
year={1998},
number={UMIACS TR 98-41},
archivePrefix={arXiv},
eprint={cmp-lg/9808003},
primaryClass={cmp-lg cs.CL}
} | resnik1998parallel |
arxiv-669163 | cmp-lg/9808004 | Word Length Frequency and Distribution in English: Observations, Theory, and Implications for the Construction of Verse Lines | <|reference_start|>Word Length Frequency and Distribution in English: Observations, Theory, and Implications for the Construction of Verse Lines: Recent observations in the theory of verse and empirical metrics have suggested that constructing a verse line involves a pattern-matching search through a source text, and that the number of found elements (complete words totaling a specified number of syllables) is given by dividing the total number of words by the mean number of syllables per word in the source text. This paper makes this latter point explicit mathematically, and in the course of this demonstration shows that the word length frequency totals in English output are distributed geometrically (previous researchers reported an adjusted Poisson distribution), and that the sequential distribution is random at the global level, with significant non-randomness in the fine structure. Data from a corpus of just under two million words, and a syllable-count lexicon of 71,000 word-forms is reported. The pattern-matching theory is shown to be internally coherent, and it is observed that some of the analytic techniques described here form a satisfactory test for regular (isometric) lineation in a text.<|reference_end|> | arxiv | @article{aoyama1998word,
title={Word Length Frequency and Distribution in English: Observations, Theory,
and Implications for the Construction of Verse Lines},
author={Hideaki Aoyama and John Constable},
journal={arXiv preprint arXiv:cmp-lg/9808004},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808004},
primaryClass={cmp-lg cs.CL}
} | aoyama1998word |
arxiv-669164 | cmp-lg/9808005 | Combining Expression and Content in Domains for Dialog Managers | <|reference_start|>Combining Expression and Content in Domains for Dialog Managers: We present work in progress on abstracting dialog managers from their domain in order to implement a dialog manager development tool which takes (among other data) a domain description as input and delivers a new dialog manager for the described domain as output. Thereby we will focus on two topics; firstly, the construction of domain descriptions with description logics and secondly, the interpretation of utterances in a given domain.<|reference_end|> | arxiv | @article{ludwig1998combining,
title={Combining Expression and Content in Domains for Dialog Managers},
author={Bernd Ludwig, Guenther Goerz, Heinrich Niemann (FORWISS and Institute
of Computer Science, University of Erlangen)},
journal={Proceedings of DL '98, pp. 126-130, Trento, Italy},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808005},
primaryClass={cmp-lg cs.CL}
} | ludwig1998combining |
arxiv-669165 | cmp-lg/9808006 | Isometric Lineation in English Texts: An Empirical and Mathematical Examination of its Character and Consequences | <|reference_start|>Isometric Lineation in English Texts: An Empirical and Mathematical Examination of its Character and Consequences: In this paper we build on earlier observations and theory regarding word length frequency and sequential distribution to develop a mathematical characterization of some of the language features distinguishing isometrically lineated text from unlineated text, in other words the features distinguishing isometrical verse from prose. It is shown that the frequency of syllables making complete words produces a flat distribution for prose, while that for verse exhibits peaks at the line length position and subsequent multiples of that position. Data from several verse authors is presented, including a detailed mathematical analysis of the dynamics underlying peak creation, and comments are offered on the processes by which authors construct lines. We note that the word-length sequence of prose is random, whereas lineation necessitates non-random word-length sequencing, and that this has the probable consequence of introducing a degree of randomness into the otherwise highly ordered grammatical sequence. In addition we observe that this effect can be ameliorated by a reduction in the mean word length of the text (confirming earlier observations that verse tends to use shorter words) and the use of lines varying from the core isometrical set. The frequency of variant lines is shown to be coincident with the frequency of polysyllables, suggesting that the use of variant lines is motivated by polysyllabic word placement. The restrictive effects of different line lengths, the relationship between metrical restriction and poetic effect, and the general character of metrical rules are also discussed.<|reference_end|> | arxiv | @article{aoyama1998isometric,
title={Isometric Lineation in English Texts: An Empirical and Mathematical
Examination of its Character and Consequences},
author={Hideaki Aoyama and John Constable},
journal={arXiv preprint arXiv:cmp-lg/9808006},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808006},
primaryClass={cmp-lg cs.CL}
} | aoyama1998isometric |
arxiv-669166 | cmp-lg/9808007 | Some Properties of Preposition and Subordinate Conjunction Attachments | <|reference_start|>Some Properties of Preposition and Subordinate Conjunction Attachments: Determining the attachments of prepositions and subordinate conjunctions is a key problem in parsing natural language. This paper presents a trainable approach to making these attachments through transformation sequences and error-driven learning. Our approach is broad coverage, and accounts for roughly three times the attachment cases that have previously been handled by corpus-based techniques. In addition, our approach is based on a simplified model of syntax that is more consistent with the practice in current state-of-the-art language processing systems. This paper sketches syntactic and algorithmic details, and presents experimental results on data sets derived from the Penn Treebank. We obtain an attachment accuracy of 75.4% for the general case, the first such corpus-based result to be reported. For the restricted cases previously studied with corpus-based methods, our approach yields an accuracy comparable to current work (83.1%).<|reference_end|> | arxiv | @article{yeh1998some,
title={Some Properties of Preposition and Subordinate Conjunction Attachments},
author={Alexander S. Yeh and Marc B. Vilain (Mitre Corporation)},
journal={Proceedings of COLING-ACL '98: 36th Annual Meeting of the
Association for Computational Linguistics and 17th International Conference
on Computational Linguistics, Montreal, Canada, 1998. Pages 1436-1442.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808007},
primaryClass={cmp-lg cs.CL}
} | yeh1998some |
arxiv-669167 | cmp-lg/9808008 | Deriving the Predicate-Argument Structure for a Free Word Order Language | <|reference_start|>Deriving the Predicate-Argument Structure for a Free Word Order Language: In relatively free word order languages, grammatical functions are intricately related to case marking. Assuming an ordered representation of the predicate-argument structure, this work proposes a Combinatory Categorial Grammar formulation of relating surface case cues to categories and types for correctly placing the arguments in the predicate-argument structure. This is achieved by assigning case markers GF-encoding categories. Unlike other CG formulations, type shifting does not proliferate or cause spurious ambiguity. Categories of all argument-encoding grammatical functions follow from the same principle of category assignment. Normal order evaluation of the combinatory form reveals the predicate-argument structure. Application of the method to Turkish is shown.<|reference_end|> | arxiv | @article{bozsahin1998deriving,
title={Deriving the Predicate-Argument Structure for a Free Word Order Language},
author={Cem Bozsahin (Middle East Technical University, Ankara)},
journal={Proceedings of COLING-ACL '98, 167-173},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808008},
primaryClass={cmp-lg cs.CL}
} | bozsahin1998deriving |
arxiv-669168 | cmp-lg/9808009 | How to define a context-free backbone for DGs: Implementing a DG in the LFG formalism | <|reference_start|>How to define a context-free backbone for DGs: Implementing a DG in the LFG formalism: This paper presents a multidimensional Dependency Grammar (DG), which decouples the dependency tree from word order, such that surface ordering is not determined by traversing the dependency tree. We develop the notion of a \emph{word order domain structure}, which is linked but structurally dissimilar to the syntactic dependency tree. We then discuss the implementation of such a DG using constructs from a unification-based phrase-structure approach, namely Lexical-Functional Grammar (LFG). Particular attention is given to the analysis of discontinuities in DG in terms of LFG's functional uncertainty.<|reference_end|> | arxiv | @article{broeker1998how,
title={How to define a context-free backbone for DGs: Implementing a DG in the
LFG formalism},
author={Norbert Broeker (IMS, Stuttgart University)},
journal={Proc. COLING-ACL'98 Workshop ``Processing of Dependency-Based
Grammars'', pp 29-38},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808009},
primaryClass={cmp-lg cs.CL}
} | broeker1998how |
arxiv-669169 | cmp-lg/9808010 | Letter to Sound Rules for Accented Lexicon Compression | <|reference_start|>Letter to Sound Rules for Accented Lexicon Compression: This paper presents trainable methods for generating letter to sound rules from a given lexicon for use in pronouncing out-of-vocabulary words and as a method for lexicon compression. As the relationship between a string of letters and a string of phonemes representing its pronunciation for many languages is not trivial, we discuss two alignment procedures, one fully automatic and one hand-seeded which produce reasonable alignments of letters to phones. Top Down Induction Tree models are trained on the aligned entries. We show how combined phoneme/stress prediction is better than separate prediction processes, and still better when including in the model the last phonemes transcribed and part of speech information. For the lexicons we have tested, our models have a word accuracy (including stress) of 78% for OALD, 62% for CMU and 94% for BRULEX. The extremely high scores on the training sets allow substantial size reductions (more than 1/20). WWW site: http://tcts.fpms.ac.be/synthesis/mbrdico<|reference_end|> | arxiv | @article{pagel1998letter,
title={Letter to Sound Rules for Accented Lexicon Compression},
author={V. Pagel, K. Lenzo, A. Black},
journal={arXiv preprint arXiv:cmp-lg/9808010},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808010},
primaryClass={cmp-lg cs.CL}
} | pagel1998letter |
arxiv-669170 | cmp-lg/9808011 | Primitive Part-of-Speech Tagging using Word Length and Sentential Structure | <|reference_start|>Primitive Part-of-Speech Tagging using Word Length and Sentential Structure: It has been argued that, when learning a first language, babies use a series of small clues to aid recognition and comprehension, and that one of these clues is word length. In this paper we present a statistical part of speech tagger which trains itself solely on the number of letters in each word in a sentence.<|reference_end|> | arxiv | @article{cozens1998primitive,
title={Primitive Part-of-Speech Tagging using Word Length and Sentential
Structure},
author={Simon Cozens},
journal={arXiv preprint arXiv:cmp-lg/9808011},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808011},
primaryClass={cmp-lg cs.CL}
} | cozens1998primitive |
arxiv-669171 | cmp-lg/9808012 | Separating Surface Order and Syntactic Relations in a Dependency Grammar | <|reference_start|>Separating Surface Order and Syntactic Relations in a Dependency Grammar: This paper proposes decoupling the dependency tree from word order, such that surface ordering is not determined by traversing the dependency tree. We develop the notion of a \emph{word order domain structure}, which is linked but structurally dissimilar to the syntactic dependency tree. The proposal results in a lexicalized, declarative, and formally precise description of word order; features which lack previous proposals for dependency grammars. Contrary to other lexicalized approaches to word order, our proposal does not require lexical ambiguities for ordering alternatives.<|reference_end|> | arxiv | @article{broeker1998separating,
title={Separating Surface Order and Syntactic Relations in a Dependency Grammar},
author={Norbert Broeker (IMS, Stuttgart University)},
journal={Proc. COLING-ACL'98, pp 174-180},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808012},
primaryClass={cmp-lg cs.CL}
} | broeker1998separating |
arxiv-669172 | cmp-lg/9808013 | Partial Evaluation for Efficient Access to Inheritance Lexicons | <|reference_start|>Partial Evaluation for Efficient Access to Inheritance Lexicons: Multiple default inheritance formalisms for lexicons have attracted much interest in recent years. I propose a new efficient method to access such lexicons. After showing two basic strategies for lookup in inheritance lexicons, a compromise is developed which combines to a large degree (from a practical point of view) the advantages of both strategies and avoids their disadvantages. The method is a kind of (off-line) partial evaluation that makes a subset of inherited information explicit before using the lexicon. I identify the parts of a lexicon which should be evaluated, and show how partial evaluation works for inheritance lexicons. Finally, the theoretical results are confirmed by a complete implementation. Speedups by a factor of 10-100 are reached.<|reference_end|> | arxiv | @article{hartrumpf1998partial,
title={Partial Evaluation for Efficient Access to Inheritance Lexicons},
author={Sven Hartrumpf (University of Hagen, Germany)},
journal={Proceedings of the 2nd International Conference on Recent Advances
in Natural Language Processing (RANLP-97), pp. 43-50, Tzigov Chark, Bulgaria,
September 1997.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808013},
primaryClass={cmp-lg cs.CL}
} | hartrumpf1998partial |
arxiv-669173 | cmp-lg/9808014 | Spotting Prosodic Boundaries in Continuous Speech in French | <|reference_start|>Spotting Prosodic Boundaries in Continuous Speech in French: A radio speech corpus of 9mn has been prosodically marked by a phonetician expert, and non expert listeners. this corpus is large enough to train and test an automatic boundary spotting system, namely a time delay neural network fed with F0 values, vowels and pseudo-syllable durations. Results validate both prosodic marking and automatic spotting of prosodic events.<|reference_end|> | arxiv | @article{pagel1998spotting,
title={Spotting Prosodic Boundaries in Continuous Speech in French},
author={V. Pagel, N. Carbonell, Y. Laprie, J. Vaissiere},
journal={arXiv preprint arXiv:cmp-lg/9808014},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808014},
primaryClass={cmp-lg cs.CL}
} | pagel1998spotting |
arxiv-669174 | cmp-lg/9808015 | Error-Driven Pruning of Treebank Grammars for Base Noun Phrase Identification | <|reference_start|>Error-Driven Pruning of Treebank Grammars for Base Noun Phrase Identification: Finding simple, non-recursive, base noun phrases is an important subtask for many natural language processing applications. While previous empirical methods for base NP identification have been rather complex, this paper instead proposes a very simple algorithm that is tailored to the relative simplicity of the task. In particular, we present a corpus-based approach for finding base NPs by matching part-of-speech tag sequences. The training phase of the algorithm is based on two successful techniques: first the base NP grammar is read from a ``treebank'' corpus; then the grammar is improved by selecting rules with high ``benefit'' scores. Using this simple algorithm with a naive heuristic for matching rules, we achieve surprising accuracy in an evaluation on the Penn Treebank Wall Street Journal.<|reference_end|> | arxiv | @article{cardie1998error-driven,
title={Error-Driven Pruning of Treebank Grammars for Base Noun Phrase
Identification},
author={Claire Cardie, David Pierce (Cornell University)},
journal={Proceedings of COLING-ACL'98, pages 218-224.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808015},
primaryClass={cmp-lg cs.CL}
} | cardie1998error-driven |
arxiv-669175 | cmp-lg/9808016 | Segregatory Coordination and Ellipsis in Text Generation | <|reference_start|>Segregatory Coordination and Ellipsis in Text Generation: In this paper, we provide an account of how to generate sentences with coordination constructions from clause-sized semantic representations. An algorithm is developed to generate sentences with ellipsis, gapping, right-node-raising, and non-constituent coordination constructions. Various examples from linguistic literature will be used to demonstrate that the algorithm does its job well.<|reference_end|> | arxiv | @article{shaw1998segregatory,
title={Segregatory Coordination and Ellipsis in Text Generation},
author={James Shaw (Columbia University)},
journal={COLING-ACL'98, pp. 1220-1226},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808016},
primaryClass={cmp-lg cs.CL}
} | shaw1998segregatory |
arxiv-669176 | cmp-lg/9808017 | A Variant of Earley Parsing | <|reference_start|>A Variant of Earley Parsing: The Earley algorithm is a widely used parsing method in natural language processing applications. We introduce a variant of Earley parsing that is based on a ``delayed'' recognition of constituents. This allows us to start the recognition of a constituent only in cases in which all of its subconstituents have been found within the input string. This is particularly advantageous in several cases in which partial analysis of a constituent cannot be completed and in general in all cases of productions sharing some suffix of their right-hand sides (even for different left-hand side nonterminals). Although the two algorithms result in the same asymptotic time and space complexity, from a practical perspective our algorithm improves the time and space requirements of the original method, as shown by reported experimental results.<|reference_end|> | arxiv | @article{nederhof1998a,
title={A Variant of Earley Parsing},
author={Mark-Jan Nederhof (University of Groningen) and Giorgio Satta
(Universita di Padova)},
journal={AI*IA 97: Advances in Artificial Intelligence, 5th Congress of the
Italian Association for Artificial Intelligence, LNAI 1321, Springer Verlag,
pages 84-95, 1997.},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9808017},
primaryClass={cmp-lg cs.CL}
} | nederhof1998a |
arxiv-669177 | cmp-lg/9809001 | Towards an implementable dependency grammar | <|reference_start|>Towards an implementable dependency grammar: The aim of this paper is to define a dependency grammar framework which is both linguistically motivated and computationally parsable. See the demo at http://www.conexor.fi/analysers.html#testing<|reference_end|> | arxiv | @article{jarvinen1998towards,
title={Towards an implementable dependency grammar},
author={Timo Jarvinen and Pasi Tapanainen},
journal={in CoLing-ACL'98 workshop 'Processing of Dependency-Based
Grammars', Kahane and Polguere (eds), p. 1-10, Montreal, Canada, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9809001},
primaryClass={cmp-lg cs.CL}
} | jarvinen1998towards |
arxiv-669178 | cmp-lg/9809002 | Some Ontological Principles for Designing Upper Level Lexical Resources | <|reference_start|>Some Ontological Principles for Designing Upper Level Lexical Resources: The purpose of this paper is to explore some semantic problems related to the use of linguistic ontologies in information systems, and to suggest some organizing principles aimed to solve such problems. The taxonomic structure of current ontologies is unfortunately quite complicated and hard to understand, especially for what concerns the upper levels. I will focus here on the problem of ISA overloading, which I believe is the main responsible of these difficulties. To this purpose, I will carefully analyze the ontological nature of the categories used in current upper-level structures, considering the necessity of splitting them according to more subtle distinctions or the opportunity of excluding them because of their limited organizational role.<|reference_end|> | arxiv | @article{guarino1998some,
title={Some Ontological Principles for Designing Upper Level Lexical Resources},
author={Nicola Guarino (LADSEB-CNR, Padova, Italy)},
journal={in Proc. of First International Conference on Language Resources
and Evaluation, Rubio, Gallardo, Castro, and Tejada (eds.), p. 527-534,
Granada, Spain, 1998},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9809002},
primaryClass={cmp-lg cs.CL}
} | guarino1998some |
arxiv-669179 | cmp-lg/9809003 | A Comparison of WordNet and Roget's Taxonomy for Measuring Semantic Similarity | <|reference_start|>A Comparison of WordNet and Roget's Taxonomy for Measuring Semantic Similarity: This paper presents the results of using Roget's International Thesaurus as the taxonomy in a semantic similarity measurement task. Four similarity metrics were taken from the literature and applied to Roget's The experimental evaluation suggests that the traditional edge counting approach does surprisingly well (a correlation of r=0.88 with a benchmark set of human similarity judgements, with an upper bound of r=0.90 for human subjects performing the same task.)<|reference_end|> | arxiv | @article{hale1998a,
title={A Comparison of WordNet and Roget's Taxonomy for Measuring Semantic
Similarity},
author={Michael Mc Hale (Air Force Research Laboratory)},
journal={arXiv preprint arXiv:cmp-lg/9809003},
year={1998},
archivePrefix={arXiv},
eprint={cmp-lg/9809003},
primaryClass={cmp-lg cs.CL}
} | hale1998a |
arxiv-669180 | cond-mat/0001137 | The number of guards needed by a museum: A phase transition in vertex covering of random graphs | <|reference_start|>The number of guards needed by a museum: A phase transition in vertex covering of random graphs: In this letter we study the NP-complete vertex cover problem on finite connectivity random graphs. When the allowed size of the cover set is decreased, a discontinuous transition in solvability and typical-case complexity occurs. This transition is characterized by means of exact numerical simulations as well as by analytical replica calculations. The replica symmetric phase diagram is in excellent agreement with numerical findings up to average connectivity $e$, where replica symmetry becomes locally unstable.<|reference_end|> | arxiv | @article{weigt2000the,
title={The number of guards needed by a museum: A phase transition in vertex
covering of random graphs},
author={Martin Weigt and Alexander K. Hartmann},
journal={Phys. Rev. Lett. 84, 6118 (2000)},
year={2000},
doi={10.1103/PhysRevLett.84.6118},
archivePrefix={arXiv},
eprint={cond-mat/0001137},
primaryClass={cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | weigt2000the |
arxiv-669181 | cond-mat/0002331 | From naive to sophisticated behavior in multiagents based financial market models | <|reference_start|>From naive to sophisticated behavior in multiagents based financial market models: We discuss the behavior of two magnitudes, physical complexity and mutual information function of the outcome of a model of heterogeneous, inductive rational agents inspired in the El Farol Bar problem and the Minority Game. The first is a measure rooted in Kolmogorov-Chaitin theory and the second one a measure related with information entropy of Shannon. We make extensive computer simulations, as result of which, we propose an ansatz for physical complexity and establish the dependence of exponent of that ansatz from the parameters of the model. We discuss the accuracy of our results and the relationship with the behavior of mutual information function as a measure of time correlations of agents choice.<|reference_end|> | arxiv | @article{mansilla2000from,
title={From naive to sophisticated behavior in multiagents based financial
market models},
author={Ricardo Mansilla},
journal={arXiv preprint arXiv:cond-mat/0002331},
year={2000},
doi={10.1016/S0378-4371(00)00227-2},
archivePrefix={arXiv},
eprint={cond-mat/0002331},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.CE nlin.AO physics.data-an q-fin.TR}
} | mansilla2000from |
arxiv-669182 | cond-mat/0002469 | Non-equilibrium Surface Growth and Scalability of Parallel Algorithms for Large Asynchronous Systems | <|reference_start|>Non-equilibrium Surface Growth and Scalability of Parallel Algorithms for Large Asynchronous Systems: The scalability of massively parallel algorithms is a fundamental question in computer science. We study the scalability and the efficiency of a conservative massively parallel algorithm for discrete-event simulations where the discrete events are Poisson arrivals. The parallel algorithm is applicable to a wide range of problems, including dynamic Monte Carlo simulations for large asynchronous systems with short-range interactions. The evolution of the simulated time horizon is analogous to a growing and fluctuating surface, and the efficiency of the algorithm corresponds to the density of local minima of this surface. In one dimension we find that the steady state of the macroscopic landscape is governed by the Edwards-Wilkinson Hamiltonian, which implies that the algorithm is scalable. Preliminary results for higher-dimensional logical topologies are discussed.<|reference_end|> | arxiv | @article{korniss2000non-equilibrium,
title={Non-equilibrium Surface Growth and Scalability of Parallel Algorithms
for Large Asynchronous Systems},
author={G. Korniss, M.A. Novotny, Z. Toroczkai, and P.A. Rikvold},
journal={Springer Proceedings in Physics, Vol. 86, Computer Simulation
Studies in Condensed-Matter Physics XIII, edited by D.P. Landau, S.P. Lewis,
and H.-B. Schuttler (Springer, Berlin, 2001) p. 183.},
year={2000},
archivePrefix={arXiv},
eprint={cond-mat/0002469},
primaryClass={cond-mat.stat-mech cs.PF physics.comp-ph}
} | korniss2000non-equilibrium |
arxiv-669183 | cond-mat/0006316 | Statistical mechanics perspective on the phase transition in vertex covering finite-connectivity random graphs | <|reference_start|>Statistical mechanics perspective on the phase transition in vertex covering finite-connectivity random graphs: The vertex-cover problem is studied for random graphs $G_{N,cN}$ having $N$ vertices and $cN$ edges. Exact numerical results are obtained by a branch-and-bound algorithm. It is found that a transition in the coverability at a $c$-dependent threshold $x=x_c(c)$ appears, where $xN$ is the cardinality of the vertex cover. This transition coincides with a sharp peak of the typical numerical effort, which is needed to decide whether there exists a cover with $xN$ vertices or not. For small edge concentrations $c\ll 0.5$, a cluster expansion is performed, giving very accurate results in this regime. These results are extended using methods developed in statistical physics. The so called annealed approximation reproduces a rigorous bound on $x_c(c)$ which was known previously. The main part of the paper contains an application of the replica method. Within the replica symmetric ansatz the threshold $x_c(c)$ and the critical backbone size $b_c(c)$ can be calculated. For $c<e/2$ the results show an excellent agreement with the numerical findings. At average vertex degree $2c=e$, an instability of the simple replica symmetric solution occurs.<|reference_end|> | arxiv | @article{hartmann2000statistical,
title={Statistical mechanics perspective on the phase transition in vertex
covering finite-connectivity random graphs},
author={Alexander K. Hartmann and Martin Weigt},
journal={Theoretical Computer Science 265, 199 (2001)},
year={2000},
archivePrefix={arXiv},
eprint={cond-mat/0006316},
primaryClass={cond-mat.stat-mech cs.CC}
} | hartmann2000statistical |
arxiv-669184 | cond-mat/0009165 | Occam factors and model-independent Bayesian learning of continuous distributions | <|reference_start|>Occam factors and model-independent Bayesian learning of continuous distributions: Learning of a smooth but nonparametric probability density can be regularized using methods of Quantum Field Theory. We implement a field theoretic prior numerically, test its efficacy, and show that the data and the phase space factors arising from the integration over the model space determine the free parameter of the theory ("smoothness scale") self-consistently. This persists even for distributions that are atypical in the prior and is a step towards a model-independent theory for learning continuous distributions. Finally, we point out that a wrong parameterization of a model family may sometimes be advantageous for small data sets.<|reference_end|> | arxiv | @article{nemenman2000occam,
title={Occam factors and model-independent Bayesian learning of continuous
distributions},
author={Ilya Nemenman and William Bialek},
journal={Phys. Rev. E, 65 (2), 2002},
year={2000},
doi={10.1103/PhysRevE.65.026137},
archivePrefix={arXiv},
eprint={cond-mat/0009165},
primaryClass={cond-mat cs.LG nlin.AO physics.data-an}
} | nemenman2000occam |
arxiv-669185 | cond-mat/0009417 | Typical solution time for a vertex-covering algorithm on finite-connectivity random graphs | <|reference_start|>Typical solution time for a vertex-covering algorithm on finite-connectivity random graphs: In this letter, we analytically describe the typical solution time needed by a backtracking algorithm to solve the vertex-cover problem on finite-connectivity random graphs. We find two different transitions: The first one is algorithm-dependent and marks the dynamical transition from linear to exponential solution times. The second one gives the maximum computational complexity, and is found exactly at the threshold where the system undergoes an algorithm-independent phase transition in its solvability. Analytical results are corroborated by numerical simulations.<|reference_end|> | arxiv | @article{weigt2000typical,
title={Typical solution time for a vertex-covering algorithm on
finite-connectivity random graphs},
author={Martin Weigt and Alexander K. Hartmann},
journal={Phys. Rev. Lett. 86, 1658 (2001)},
year={2000},
doi={10.1103/PhysRevLett.86.1658},
archivePrefix={arXiv},
eprint={cond-mat/0009417},
primaryClass={cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | weigt2000typical |
arxiv-669186 | cond-mat/0010337 | Optimization with Extremal Dynamics | <|reference_start|>Optimization with Extremal Dynamics: We explore a new general-purpose heuristic for finding high-quality solutions to hard optimization problems. The method, called extremal optimization, is inspired by self-organized criticality, a concept introduced to describe emergent complexity in physical systems. Extremal optimization successively replaces extremely undesirable variables of a single sub-optimal solution with new, random ones. Large fluctuations ensue, that efficiently explore many local optima. With only one adjustable parameter, the heuristic's performance has proven competitive with more elaborate methods, especially near phase transitions which are believed to coincide with the hardest instances. We use extremal optimization to elucidate the phase transition in the 3-coloring problem, and we provide independent confirmation of previously reported extrapolations for the ground-state energy of +-J spin glasses in d=3 and 4.<|reference_end|> | arxiv | @article{boettcher2000optimization,
title={Optimization with Extremal Dynamics},
author={S. Boettcher (Emory U.) and A. G. Percus (Los Alamos)},
journal={Phys. Rev. Lett, 86 (2001) 5211},
year={2000},
doi={10.1103/PhysRevLett.86.5211},
archivePrefix={arXiv},
eprint={cond-mat/0010337},
primaryClass={cond-mat.stat-mech cs.NE math.OC}
} | boettcher2000optimization |
arxiv-669187 | cond-mat/0011181 | Simplest random K-satisfiability problem | <|reference_start|>Simplest random K-satisfiability problem: We study a simple and exactly solvable model for the generation of random satisfiability problems. These consist of $\gamma N$ random boolean constraints which are to be satisfied simultaneously by $N$ logical variables. In statistical-mechanics language, the considered model can be seen as a diluted p-spin model at zero temperature. While such problems become extraordinarily hard to solve by local search methods in a large region of the parameter space, still at least one solution may be superimposed by construction. The statistical properties of the model can be studied exactly by the replica method and each single instance can be analyzed in polynomial time by a simple global solution method. The geometrical/topological structures responsible for dynamic and static phase transitions as well as for the onset of computational complexity in local search method are thoroughly analyzed. Numerical analysis on very large samples allows for a precise characterization of the critical scaling behaviour.<|reference_end|> | arxiv | @article{ricci-tersenghi2000simplest,
title={Simplest random K-satisfiability problem},
author={F. Ricci-Tersenghi, M. Weigt and R. Zecchina},
journal={Phys. Rev. E 63, 026702 (2001)},
year={2000},
doi={10.1103/PhysRevE.63.026702},
archivePrefix={arXiv},
eprint={cond-mat/0011181},
primaryClass={cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | ricci-tersenghi2000simplest |
arxiv-669188 | cond-mat/0103328 | Exact solutions for diluted spin glasses and optimization problems | <|reference_start|>Exact solutions for diluted spin glasses and optimization problems: We study the low temperature properties of p-spin glass models with finite connectivity and of some optimization problems. Using a one-step functional replica symmetry breaking Ansatz we can solve exactly the saddle-point equations for graphs with uniform connectivity. The resulting ground state energy is in perfect agreement with numerical simulations. For fluctuating connectivity graphs, the same Ansatz can be used in a variational way: For p-spin models (known as p-XOR-SAT in computer science) it provides the exact configurational entropy together with the dynamical and static critical connectivities (for p=3, \gamma_d=0.818 and \gamma_s=0.918 resp.), whereas for hard optimization problems like 3-SAT or Bicoloring it provides new upper bounds for their critical thresholds (\gamma_c^{var}=4.396 and \gamma_c^{var}=2.149 resp.).<|reference_end|> | arxiv | @article{franz2001exact,
title={Exact solutions for diluted spin glasses and optimization problems},
author={S. Franz, M. Leone, F. Ricci-Tersenghi and R. Zecchina},
journal={Phys. Rev. Lett. 87 (2001) 127209},
year={2001},
doi={10.1103/PhysRevLett.87.127209},
archivePrefix={arXiv},
eprint={cond-mat/0103328},
primaryClass={cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | franz2001exact |
arxiv-669189 | cond-mat/0104066 | Beyond the Zipf-Mandelbrot law in quantitative linguistics | <|reference_start|>Beyond the Zipf-Mandelbrot law in quantitative linguistics: In this paper the Zipf-Mandelbrot law is revisited in the context of linguistics. Despite its widespread popularity the Zipf--Mandelbrot law can only describe the statistical behaviour of a rather restricted fraction of the total number of words contained in some given corpus. In particular, we focus our attention on the important deviations that become statistically relevant as larger corpora are considered and that ultimately could be understood as salient features of the underlying complex process of language generation. Finally, it is shown that all the different observed regimes can be accurately encompassed within a single mathematical framework recently introduced by C. Tsallis.<|reference_end|> | arxiv | @article{montemurro2001beyond,
title={Beyond the Zipf-Mandelbrot law in quantitative linguistics},
author={Marcelo A. Montemurro},
journal={arXiv preprint arXiv:cond-mat/0104066},
year={2001},
doi={10.1016/S0378-4371(01)00355-7},
archivePrefix={arXiv},
eprint={cond-mat/0104066},
primaryClass={cond-mat.stat-mech cs.CL nlin.AO}
} | montemurro2001beyond |
arxiv-669190 | cond-mat/0104214 | Extremal Optimization for Graph Partitioning | <|reference_start|>Extremal Optimization for Graph Partitioning: Extremal optimization is a new general-purpose method for approximating solutions to hard optimization problems. We study the method in detail by way of the NP-hard graph partitioning problem. We discuss the scaling behavior of extremal optimization, focusing on the convergence of the average run as a function of runtime and system size. The method has a single free parameter, which we determine numerically and justify using a simple argument. Our numerical results demonstrate that on random graphs, extremal optimization maintains consistent accuracy for increasing system sizes, with an approximation error decreasing over runtime roughly as a power law t^(-0.4). On geometrically structured graphs, the scaling of results from the average run suggests that these are far from optimal, with large fluctuations between individual trials. But when only the best runs are considered, results consistent with theoretical arguments are recovered.<|reference_end|> | arxiv | @article{boettcher2001extremal,
title={Extremal Optimization for Graph Partitioning},
author={S. Boettcher (Emory U.) and A. G. Percus (Los Alamos)},
journal={Phys. Rev. E, 64 (2001) 026114},
year={2001},
doi={10.1103/PhysRevE.64.026114},
archivePrefix={arXiv},
eprint={cond-mat/0104214},
primaryClass={cond-mat.stat-mech cs.NE math.OC}
} | boettcher2001extremal |
arxiv-669191 | cond-mat/0106096 | Statistical mechanics of complex networks | <|reference_start|>Statistical mechanics of complex networks: Complex networks describe a wide range of systems in nature and society, much quoted examples including the cell, a network of chemicals linked by chemical reactions, or the Internet, a network of routers and computers connected by physical links. While traditionally these systems were modeled as random graphs, it is increasingly recognized that the topology and evolution of real networks is governed by robust organizing principles. Here we review the recent advances in the field of complex networks, focusing on the statistical mechanics of network topology and dynamics. After reviewing the empirical data that motivated the recent interest in networks, we discuss the main models and analytical tools, covering random graphs, small-world and scale-free networks, as well as the interplay between topology and the network's robustness against failures and attacks.<|reference_end|> | arxiv | @article{albert2001statistical,
title={Statistical mechanics of complex networks},
author={Reka Albert, Albert-Laszlo Barabasi},
journal={Reviews of Modern Physics 74, 47 (2002)},
year={2001},
doi={10.1103/RevModPhys.74.47},
archivePrefix={arXiv},
eprint={cond-mat/0106096},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.NI math-ph math.MP nlin.AO physics.data-an}
} | albert2001statistical |
arxiv-669192 | cond-mat/0107212 | Intentional Walks on Scale Free Small Worlds | <|reference_start|>Intentional Walks on Scale Free Small Worlds: We present a novel algorithm that generates scale free small world graphs such as those found in the World Wide Web,social and metabolic networks. We use the generated graphs to study the dynamics of a realistic search strategy on the graphs, and find that they can be navigated in a very short number of steps.<|reference_end|> | arxiv | @article{puniyani2001intentional,
title={Intentional Walks on Scale Free Small Worlds},
author={Amit R Puniyani, Rajan M Lukose and Bernardo A Huberman},
journal={arXiv preprint arXiv:cond-mat/0107212},
year={2001},
archivePrefix={arXiv},
eprint={cond-mat/0107212},
primaryClass={cond-mat.soft cond-mat.stat-mech cs.NI physics.data-an}
} | puniyani2001intentional |
arxiv-669193 | cond-mat/0109121 | Coordination of Decisions in a Spatial Agent Model | <|reference_start|>Coordination of Decisions in a Spatial Agent Model: For a binary choice problem, the spatial coordination of decisions in an agent community is investigated both analytically and by means of stochastic computer simulations. The individual decisions are based on different local information generated by the agents with a finite lifetime and disseminated in the system with a finite velocity. We derive critical parameters for the emergence of minorities and majorities of agents making opposite decisions and investigate their spatial organization. We find that dependent on two essential parameters describing the local impact and the spatial dissemination of information, either a definite stable minority/majority relation (single-attractor regime) or a broad range of possible values (multi-attractor regime) occurs. In the latter case, the outcome of the decision process becomes rather diverse and hard to predict, both with respect to the share of the majority and their spatial distribution. We further investigate how a dissemination of information on different time scales affects the outcome of the decision process. We find that a more ``efficient'' information exchange within a subpopulation provides a suitable way to stabilize their majority status and to reduce ``diversity'' and uncertainty in the decision process.<|reference_end|> | arxiv | @article{schweitzer2001coordination,
title={Coordination of Decisions in a Spatial Agent Model},
author={Frank Schweitzer, Joerg Zimmermann, Heinz Muehlenbein},
journal={Physica A, vol. 303, no. 1-2 (2002) pp. 189-216},
year={2001},
doi={10.1016/S0378-4371(01)00486-1},
archivePrefix={arXiv},
eprint={cond-mat/0109121},
primaryClass={cond-mat.stat-mech cs.MA}
} | schweitzer2001coordination |
arxiv-669194 | cond-mat/0109218 | Entropic analysis of the role of words in literary texts | <|reference_start|>Entropic analysis of the role of words in literary texts: Beyond the local constraints imposed by grammar, words concatenated in long sequences carrying a complex message show statistical regularities that may reflect their linguistic role in the message. In this paper, we perform a systematic statistical analysis of the use of words in literary English corpora. We show that there is a quantitative relation between the role of content words in literary English and the Shannon information entropy defined over an appropriate probability distribution. Without assuming any previous knowledge about the syntactic structure of language, we are able to cluster certain groups of words according to their specific role in the text.<|reference_end|> | arxiv | @article{montemurro2001entropic,
title={Entropic analysis of the role of words in literary texts},
author={Marcelo A. Montemurro, Damian H. Zanette},
journal={arXiv preprint arXiv:cond-mat/0109218},
year={2001},
archivePrefix={arXiv},
eprint={cond-mat/0109218},
primaryClass={cond-mat.stat-mech cs.CL}
} | montemurro2001entropic |
arxiv-669195 | cond-mat/0109313 | Extreme Value Statistics and Traveling Fronts: An Application to Computer Science | <|reference_start|>Extreme Value Statistics and Traveling Fronts: An Application to Computer Science: We study the statistics of height and balanced height in the binary search tree problem in computer science. The search tree problem is first mapped to a fragmentation problem which is then further mapped to a modified directed polymer problem on a Cayley tree. We employ the techniques of traveling fronts to solve the polymer problem and translate back to derive exact asymptotic properties in the original search tree problem. The second mapping allows us not only to re-derive the already known results for random binary trees but to obtain new exact results for search trees where the entries arrive according to an arbitrary distribution, not necessarily randomly. Besides it allows us to derive the asymptotic shape of the full probability distribution of height and not just its moments. Our results are then generalized to $m$-ary search trees with arbitrary distribution. An attempt has been made to make the article accessible to both physicists and computer scientists.<|reference_end|> | arxiv | @article{majumdar2001extreme,
title={Extreme Value Statistics and Traveling Fronts: An Application to
Computer Science},
author={Satya N. Majumdar and P.L. Krapivsky},
journal={Phys. Rev. E 65, 036127 (2002)},
year={2001},
doi={10.1103/PhysRevE.65.036127},
archivePrefix={arXiv},
eprint={cond-mat/0109313},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.DS}
} | majumdar2001extreme |
arxiv-669196 | cond-mat/0110165 | Jamming Model for the Extremal Optimization Heuristic | <|reference_start|>Jamming Model for the Extremal Optimization Heuristic: Extremal Optimization, a recently introduced meta-heuristic for hard optimization problems, is analyzed on a simple model of jamming. The model is motivated first by the problem of finding lowest energy configurations for a disordered spin system on a fixed-valence graph. The numerical results for the spin system exhibit the same phenomena found in all earlier studies of extremal optimization, and our analytical results for the model reproduce many of these features.<|reference_end|> | arxiv | @article{boettcher2001jamming,
title={Jamming Model for the Extremal Optimization Heuristic},
author={S. Boettcher (Emory U.) and M. Grigni (Emory U.)},
journal={Journal of Physics A: Math. Gen., 35 (2002) 1109},
year={2001},
doi={10.1088/0305-4470/35/5/301},
archivePrefix={arXiv},
eprint={cond-mat/0110165},
primaryClass={cond-mat.stat-mech cs.NE physics.comp-ph}
} | boettcher2001jamming |
arxiv-669197 | cond-mat/0111153 | Hiding solutions in random satisfiability problems: A statistical mechanics approach | <|reference_start|>Hiding solutions in random satisfiability problems: A statistical mechanics approach: A major problem in evaluating stochastic local search algorithms for NP-complete problems is the need for a systematic generation of hard test instances having previously known properties of the optimal solutions. On the basis of statistical mechanics results, we propose random generators of hard and satisfiable instances for the 3-satisfiability problem (3SAT). The design of the hardest problem instances is based on the existence of a first order ferromagnetic phase transition and the glassy nature of excited states. The analytical predictions are corroborated by numerical results obtained from complete as well as stochastic local algorithms.<|reference_end|> | arxiv | @article{barthel2001hiding,
title={Hiding solutions in random satisfiability problems: A statistical
mechanics approach},
author={W. Barthel, A.K. Hartmann, M. Leone, F. Ricci-Tersenghi, M. Weigt, and
R. Zecchina},
journal={Phys. Rev. Lett. 88, 188701 (2002)},
year={2001},
doi={10.1103/PhysRevLett.88.188701},
archivePrefix={arXiv},
eprint={cond-mat/0111153},
primaryClass={cond-mat.dis-nn cond-mat.stat-mech cs.CC}
} | barthel2001hiding |
arxiv-669198 | cond-mat/0112103 | Going through Rough Times: from Non-Equilibrium Surface Growth to Algorithmic Scalability | <|reference_start|>Going through Rough Times: from Non-Equilibrium Surface Growth to Algorithmic Scalability: Efficient and faithful parallel simulation of large asynchronous systems is a challenging computational problem. It requires using the concept of local simulated times and a synchronization scheme. We study the scalability of massively parallel algorithms for discrete-event simulations which employ conservative synchronization to enforce causality. We do this by looking at the simulated time horizon as a complex evolving system, and we identify its universal characteristics. We find that the time horizon for the conservative parallel discrete-event simulation scheme exhibits Kardar-Parisi-Zhang-like kinetic roughening. This implies that the algorithm is asymptotically scalable in the sense that the average progress rate of the simulation approaches a non-zero constant. It also implies, however, that there are diverging memory requirements associated with such schemes.<|reference_end|> | arxiv | @article{korniss2001going,
title={Going through Rough Times: from Non-Equilibrium Surface Growth to
Algorithmic Scalability},
author={G. Korniss, M.A. Novotny, P.A. Rikvold, H. Guclu, and Z. Toroczkai},
journal={Materials Research Society Symposium Proceedings Series Vol. 700,
pp. 297-308, 2002},
year={2001},
archivePrefix={arXiv},
eprint={cond-mat/0112103},
primaryClass={cond-mat.stat-mech cond-mat.mtrl-sci cs.DC cs.PF physics.comp-ph}
} | korniss2001going |
arxiv-669199 | cond-mat/0112142 | Boosting search by rare events | <|reference_start|>Boosting search by rare events: Randomized search algorithms for hard combinatorial problems exhibit a large variability of performances. We study the different types of rare events which occur in such out-of-equilibrium stochastic processes and we show how they cooperate in determining the final distribution of running times. As a byproduct of our analysis we show how search algorithms are optimized by random restarts.<|reference_end|> | arxiv | @article{montanari2001boosting,
title={Boosting search by rare events},
author={Andrea Montanari and Riccardo Zecchina},
journal={Phys. Rev. Lett. 88, 178701 (2002)},
year={2001},
doi={10.1103/PhysRevLett.88.178701},
archivePrefix={arXiv},
eprint={cond-mat/0112142},
primaryClass={cond-mat.stat-mech cond-mat.dis-nn cs.CC}
} | montanari2001boosting |
arxiv-669200 | cond-mat/0201139 | Long-range fractal correlations in literary corpora | <|reference_start|>Long-range fractal correlations in literary corpora: In this paper we analyse the fractal structure of long human-language records by mapping large samples of texts onto time series. The particular mapping set up in this work is inspired on linguistic basis in the sense that is retains {\em the word} as the fundamental unit of communication. The results confirm that beyond the short-range correlations resulting from syntactic rules acting at sentence level, long-range structures emerge in large written language samples that give rise to long-range correlations in the use of words.<|reference_end|> | arxiv | @article{montemurro2002long-range,
title={Long-range fractal correlations in literary corpora},
author={Marcelo A. Montemurro and Pedro A. Pury},
journal={Fractals 10(4), 451-461 (2002)},
year={2002},
archivePrefix={arXiv},
eprint={cond-mat/0201139},
primaryClass={cond-mat.stat-mech cs.CL nlin.AO}
} | montemurro2002long-range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.