corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-676201 | cs/9811030 | Generating Segment Durations in a Text-To-Speech System: A Hybrid Rule-Based/Neural Network Approach | <|reference_start|>Generating Segment Durations in a Text-To-Speech System: A Hybrid Rule-Based/Neural Network Approach: A combination of a neural network with rule firing information from a rule-based system is used to generate segment durations for a text-to-speech system. The system shows a slight improvement in performance over a neural network system without the rule firing information. Synthesized speech using segment durations was accepted by listeners as having about the same quality as speech generated using segment durations extracted from natural speech.<|reference_end|> | arxiv | @article{corrigan1998generating,
title={Generating Segment Durations in a Text-To-Speech System: A Hybrid
Rule-Based/Neural Network Approach},
author={Gerald Corrigan, Noel Massey and Orhan Karaali},
journal={Proceedings of Eurospeech (1997) 2675-2678. Rhodes, Greece},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811030},
primaryClass={cs.NE cs.HC}
} | corrigan1998generating |
arxiv-676202 | cs/9811031 | Speech Synthesis with Neural Networks | <|reference_start|>Speech Synthesis with Neural Networks: Text-to-speech conversion has traditionally been performed either by concatenating short samples of speech or by using rule-based systems to convert a phonetic representation of speech into an acoustic representation, which is then converted into speech. This paper describes a system that uses a time-delay neural network (TDNN) to perform this phonetic-to-acoustic mapping, with another neural network to control the timing of the generated speech. The neural network system requires less memory than a concatenation system, and performed well in tests comparing it to commercial systems using other technologies.<|reference_end|> | arxiv | @article{karaali1998speech,
title={Speech Synthesis with Neural Networks},
author={Orhan Karaali, Gerald Corrigan and Ira Gerson},
journal={World Congress on Neural Networks (1996) 45-50. San Diego},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811031},
primaryClass={cs.NE cs.HC}
} | karaali1998speech |
arxiv-676203 | cs/9811032 | Text-To-Speech Conversion with Neural Networks: A Recurrent TDNN Approach | <|reference_start|>Text-To-Speech Conversion with Neural Networks: A Recurrent TDNN Approach: This paper describes the design of a neural network that performs the phonetic-to-acoustic mapping in a speech synthesis system. The use of a time-domain neural network architecture limits discontinuities that occur at phone boundaries. Recurrent data input also helps smooth the output parameter tracks. Independent testing has demonstrated that the voice quality produced by this system compares favorably with speech from existing commercial text-to-speech systems.<|reference_end|> | arxiv | @article{karaali1998text-to-speech,
title={Text-To-Speech Conversion with Neural Networks: A Recurrent TDNN
Approach},
author={Orhan Karaali, Gerald Corrigan, Ira Gerson and Noel Massey},
journal={Proceedings of Eurospeech (1997) 561-564. Rhodes, Greece},
year={1998},
archivePrefix={arXiv},
eprint={cs/9811032},
primaryClass={cs.NE cs.HC}
} | karaali1998text-to-speech |
arxiv-676204 | cs/9812001 | A Probabilistic Approach to Lexical Semantic Knowledge Acquisition and S tructural Disambiguation | <|reference_start|>A Probabilistic Approach to Lexical Semantic Knowledge Acquisition and S tructural Disambiguation: In this thesis, I address the problem of automatically acquiring lexical semantic knowledge, especially that of case frame patterns, from large corpus data and using the acquired knowledge in structural disambiguation. The approach I adopt has the following characteristics: (1) dividing the problem into three subproblems: case slot generalization, case dependency learning, and word clustering (thesaurus construction). (2) viewing each subproblem as that of statistical estimation and defining probability models for each subproblem, (3) adopting the Minimum Description Length (MDL) principle as learning strategy, (4) employing efficient learning algorithms, and (5) viewing the disambiguation problem as that of statistical prediction. Major contributions of this thesis include: (1) formalization of the lexical knowledge acquisition problem, (2) development of a number of learning methods for lexical knowledge acquisition, and (3) development of a high-performance disambiguation method.<|reference_end|> | arxiv | @article{li1998a,
title={A Probabilistic Approach to Lexical Semantic Knowledge Acquisition and S
tructural Disambiguation},
author={Hang LI (NEC Corporation)},
journal={arXiv preprint arXiv:cs/9812001},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812001},
primaryClass={cs.CL}
} | li1998a |
arxiv-676205 | cs/9812002 | Training Reinforcement Neurocontrollers Using the Polytope Algorithm | <|reference_start|>Training Reinforcement Neurocontrollers Using the Polytope Algorithm: A new training algorithm is presented for delayed reinforcement learning problems that does not assume the existence of a critic model and employs the polytope optimization algorithm to adjust the weights of the action network so that a simple direct measure of the training performance is maximized. Experimental results from the application of the method to the pole balancing problem indicate improved training performance compared with critic-based and genetic reinforcement approaches.<|reference_end|> | arxiv | @article{likas1998training,
title={Training Reinforcement Neurocontrollers Using the Polytope Algorithm},
author={A. Likas and I. E. Lagaris},
journal={arXiv preprint arXiv:cs/9812002},
year={1998},
number={Preprint, Dept. of Computer Science, Univ. of Ioannina, 1996},
archivePrefix={arXiv},
eprint={cs/9812002},
primaryClass={cs.NE}
} | likas1998training |
arxiv-676206 | cs/9812003 | Neural Network Methods for Boundary Value Problems Defined in Arbitrarily Shaped Domains | <|reference_start|>Neural Network Methods for Boundary Value Problems Defined in Arbitrarily Shaped Domains: Partial differential equations (PDEs) with Dirichlet boundary conditions defined on boundaries with simple geometry have been succesfuly treated using sigmoidal multilayer perceptrons in previous works. This article deals with the case of complex boundary geometry, where the boundary is determined by a number of points that belong to it and are closely located, so as to offer a reasonable representation. Two networks are employed: a multilayer perceptron and a radial basis function network. The later is used to account for the satisfaction of the boundary conditions. The method has been successfuly tested on two-dimensional and three-dimensional PDEs and has yielded accurate solutions.<|reference_end|> | arxiv | @article{lagaris1998neural,
title={Neural Network Methods for Boundary Value Problems Defined in
Arbitrarily Shaped Domains},
author={I. E. Lagaris, A. Likas and D. G. Papageorgiou},
journal={arXiv preprint arXiv:cs/9812003},
year={1998},
number={Preprint no. 7-98, Dept. of Computer Science, Univ. of Ioannina,
Greece, 1998},
archivePrefix={arXiv},
eprint={cs/9812003},
primaryClass={cs.NE cond-mat.dis-nn cs.NA math-ph math.MP math.NA physics.comp-ph}
} | lagaris1998neural |
arxiv-676207 | cs/9812004 | Name Strategy: Its Existence and Implications | <|reference_start|>Name Strategy: Its Existence and Implications: It is argued that colour name strategy, object name strategy, and chunking strategy in memory are all aspects of the same general phenomena, called stereotyping. It is pointed out that the Berlin-Kay universal partial ordering of colours and the frequency of traffic accidents classified by colour are surprisingly similar. Some consequences of the existence of a name strategy for the philosophy of language and mathematics are discussed. It is argued that real valued quantities occur {\it ab initio}. The implication of real valued truth quantities is that the {\bf Continuum Hypothesis} of pure mathematics is side-stepped. The existence of name strategy shows that thought/sememes and talk/phonemes can be separate, and this vindicates the assumption of thought occurring before talk used in psycholinguistic speech production models.<|reference_end|> | arxiv | @article{roberts1998name,
title={Name Strategy: Its Existence and Implications},
author={Mark D. Roberts},
journal={Int.J.Computational Cognition Volume 3 Pages 1-14 (2005).},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812004},
primaryClass={cs.CL cs.AI math.HO}
} | roberts1998name |
arxiv-676208 | cs/9812005 | Optimal Multi-Paragraph Text Segmentation by Dynamic Programming | <|reference_start|>Optimal Multi-Paragraph Text Segmentation by Dynamic Programming: There exist several methods of calculating a similarity curve, or a sequence of similarity values, representing the lexical cohesion of successive text constituents, e.g., paragraphs. Methods for deciding the locations of fragment boundaries are, however, scarce. We propose a fragmentation method based on dynamic programming. The method is theoretically sound and guaranteed to provide an optimal splitting on the basis of a similarity curve, a preferred fragment length, and a cost function defined. The method is especially useful when control on fragment size is of importance.<|reference_end|> | arxiv | @article{heinonen1998optimal,
title={Optimal Multi-Paragraph Text Segmentation by Dynamic Programming},
author={Oskari Heinonen (University of Helsinki)},
journal={Proceedings of COLING-ACL '98, pp. 1484-1486, Montreal, Canada},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812005},
primaryClass={cs.CL}
} | heinonen1998optimal |
arxiv-676209 | cs/9812006 | A High Quality Text-To-Speech System Composed of Multiple Neural Networks | <|reference_start|>A High Quality Text-To-Speech System Composed of Multiple Neural Networks: While neural networks have been employed to handle several different text-to-speech tasks, ours is the first system to use neural networks throughout, for both linguistic and acoustic processing. We divide the text-to-speech task into three subtasks, a linguistic module mapping from text to a linguistic representation, an acoustic module mapping from the linguistic representation to speech, and a video module mapping from the linguistic representation to animated images. The linguistic module employs a letter-to-sound neural network and a postlexical neural network. The acoustic module employs a duration neural network and a phonetic neural network. The visual neural network is employed in parallel to the acoustic module to drive a talking head. The use of neural networks that can be retrained on the characteristics of different voices and languages affords our system a degree of adaptability and naturalness heretofore unavailable.<|reference_end|> | arxiv | @article{karaali1998a,
title={A High Quality Text-To-Speech System Composed of Multiple Neural
Networks},
author={Orhan Karaali, Gerald Corrigan, Noel Massey, Corey Miller, Otto
Schnurr and Andrew Mackie},
journal={Proceedings of the IEEE International Conference on Acoustics,
Speech and Signal Processing (1998) 2:1237-1240. Seattle, Washington},
year={1998},
doi={10.1109/ICASSP.1998.675495},
archivePrefix={arXiv},
eprint={cs/9812006},
primaryClass={cs.NE cs.HC}
} | karaali1998a |
arxiv-676210 | cs/9812007 | Minimum Cuts in Near-Linear Time | <|reference_start|>Minimum Cuts in Near-Linear Time: We significantly improve known time bounds for solving the minimum cut problem on undirected graphs. We use a ``semi-duality'' between minimum cuts and maximum spanning tree packings combined with our previously developed random sampling techniques. We give a randomized algorithm that finds a minimum cut in an m-edge, n-vertex graph with high probability in O(m log^3 n) time. We also give a simpler randomized algorithm that finds all minimum cuts with high probability in O(n^2 log n) time. This variant has an optimal RNC parallelization. Both variants improve on the previous best time bound of O(n^2 log^3 n). Other applications of the tree-packing approach are new, nearly tight bounds on the number of near minimum cuts a graph may have and a new data structure for representing them in a space-efficient manner.<|reference_end|> | arxiv | @article{karger1998minimum,
title={Minimum Cuts in Near-Linear Time},
author={David R. Karger},
journal={arXiv preprint arXiv:cs/9812007},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812007},
primaryClass={cs.DS}
} | karger1998minimum |
arxiv-676211 | cs/9812008 | Approximate Graph Coloring by Semidefinite Programming | <|reference_start|>Approximate Graph Coloring by Semidefinite Programming: We consider the problem of coloring k-colorable graphs with the fewest possible colors. We present a randomized polynomial time algorithm that colors a 3-colorable graph on $n$ vertices with min O(Delta^{1/3} log^{1/2} Delta log n), O(n^{1/4} log^{1/2} n) colors where Delta is the maximum degree of any vertex. Besides giving the best known approximation ratio in terms of n, this marks the first non-trivial approximation result as a function of the maximum degree Delta. This result can be generalized to k-colorable graphs to obtain a coloring using min O(Delta^{1-2/k} log^{1/2} Delta log n), O(n^{1-3/(k+1)} log^{1/2} n) colors. Our results are inspired by the recent work of Goemans and Williamson who used an algorithm for semidefinite optimization problems, which generalize linear programs, to obtain improved approximations for the MAX CUT and MAX 2-SAT problems. An intriguing outcome of our work is a duality relationship established between the value of the optimum solution to our semidefinite program and the Lovasz theta-function. We show lower bounds on the gap between the optimum solution of our semidefinite program and the actual chromatic number; by duality this also demonstrates interesting new facts about the theta-function.<|reference_end|> | arxiv | @article{karger1998approximate,
title={Approximate Graph Coloring by Semidefinite Programming},
author={David Karger, Rajeev Motwani, and Madhu Sudan},
journal={JACM 45(2), mar. 1998, pp.246--265},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812008},
primaryClass={cs.DS}
} | karger1998approximate |
arxiv-676212 | cs/9812009 | Vocal Access to a Newspaper Archive: Design Issues and Preliminary Investigation | <|reference_start|>Vocal Access to a Newspaper Archive: Design Issues and Preliminary Investigation: This paper presents the design and the current prototype implementation of an interactive vocal Information Retrieval system that can be used to access articles of a large newspaper archive using a telephone. The results of preliminary investigation into the feasibility of such a system are also presented.<|reference_end|> | arxiv | @article{crestani1998vocal,
title={Vocal Access to a Newspaper Archive: Design Issues and Preliminary
Investigation},
author={Fabio Crestani},
journal={arXiv preprint arXiv:cs/9812009},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812009},
primaryClass={cs.DL}
} | crestani1998vocal |
arxiv-676213 | cs/9812010 | Towards a computational theory of human daydreaming | <|reference_start|>Towards a computational theory of human daydreaming: This paper examines the phenomenon of daydreaming: spontaneously recalling or imagining personal or vicarious experiences in the past or future. The following important roles of daydreaming in human cognition are postulated: plan preparation and rehearsal, learning from failures and successes, support for processes of creativity, emotion regulation, and motivation. A computational theory of daydreaming and its implementation as the program DAYDREAMER are presented. DAYDREAMER consists of 1) a scenario generator based on relaxed planning, 2) a dynamic episodic memory of experiences used by the scenario generator, 3) a collection of personal goals and control goals which guide the scenario generator, 4) an emotion component in which daydreams initiate, and are initiated by, emotional states arising from goal outcomes, and 5) domain knowledge of interpersonal relations and common everyday occurrences. The role of emotions and control goals in daydreaming is discussed. Four control goals commonly used in guiding daydreaming are presented: rationalization, failure/success reversal, revenge, and preparation. The role of episodic memory in daydreaming is considered, including how daydreamed information is incorporated into memory and later used. An initial version of DAYDREAMER which produces several daydreams (in English) is currently running.<|reference_end|> | arxiv | @article{mueller1998towards,
title={Towards a computational theory of human daydreaming},
author={Erik T. Mueller, Michael G. Dyer},
journal={arXiv preprint arXiv:cs/9812010},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812010},
primaryClass={cs.AI}
} | mueller1998towards |
arxiv-676214 | cs/9812011 | A nested transaction mechanism for LOCUS | <|reference_start|>A nested transaction mechanism for LOCUS: A working implementation of nested transactions has been produced for LOCUS, an integrated distributed operating system which provides a high degree of network transparency. Several aspects of our mechanism are novel. First, the mechanism allows a transaction to access objects directly without regard to the location of the object. Second, processes running on behalf of a single transaction may be located at many sites. Thus there is no need to invoke a new transaction to perform processing or access objects at a remote site. Third, unlike other environments, LOCUS allows replication of data objects at more than one site in the network, and this capability is incorporated into the transaction mechanism. If the copy of an object that is currently being accessed becomes unavailable, it is possible to continue work by using another one of the replicated copies. Finally, an efficient orphan removal algorithm is presented, and the problem of providing continued operation during network partitions is addressed in detail.<|reference_end|> | arxiv | @article{mueller1998a,
title={A nested transaction mechanism for LOCUS},
author={Erik T. Mueller, Johanna D. Moore, and Gerald J. Popek},
journal={arXiv preprint arXiv:cs/9812011},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812011},
primaryClass={cs.OS cs.DC}
} | mueller1998a |
arxiv-676215 | cs/9812012 | Quantum simulations of classical random walks and undirected graph connectivity | <|reference_start|>Quantum simulations of classical random walks and undirected graph connectivity: It is not currently known if quantum Turing machines can efficiently simulate probabilistic computations in the space-bounded case. In this paper we show that space-bounded quantum Turing machines can efficiently simulate a limited class of random processes: random walks on undirected graphs. By means of such simulations, it is demonstrated that the undirected graph connectivity problem for regular graphs can be solved by one-sided error quantum Turing machines that run in logspace and halt absolutely. It follows that symmetric logspace is contained in the quantum analogue of randomized logspace.<|reference_end|> | arxiv | @article{watrous1998quantum,
title={Quantum simulations of classical random walks and undirected graph
connectivity},
author={John Watrous (University of Montreal)},
journal={arXiv preprint arXiv:cs/9812012},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812012},
primaryClass={cs.CC quant-ph}
} | watrous1998quantum |
arxiv-676216 | cs/9812013 | The Self-Organizing Symbiotic Agent | <|reference_start|>The Self-Organizing Symbiotic Agent: In [N. A. Baas, Emergence, Hierarchies, and Hyper-structures, in C.G. Langton ed., Artificial Life III, Addison Wesley, 1994.] a general framework for the study of Emergence and hyper-structure was presented. This approach is mostly concerned with the description of such systems. In this paper we will try to bring forth a different aspect of this model we feel will be useful in the engineering of agent based solutions, namely the symbiotic approach. In this approach a self-organizing method of dividing the more complex "main-problem" to a hyper-structure of "sub-problems" with the aim of reducing complexity is desired. A description of the general problem will be given along with some instances of related work. This paper is intended to serve as an introductory challenge for general solutions to the described problem.<|reference_end|> | arxiv | @article{hodjat1998the,
title={The Self-Organizing Symbiotic Agent},
author={Babak Hodjat and Makoto Amamiya},
journal={arXiv preprint arXiv:cs/9812013},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812013},
primaryClass={cs.NE cs.CC}
} | hodjat1998the |
arxiv-676217 | cs/9812014 | An Adaptive Agent Oriented Software Architecture | <|reference_start|>An Adaptive Agent Oriented Software Architecture: A new approach to software design based on an agent-oriented architecture is presented. Unlike current research, we consider software to be designed and implemented with this methodology in mind. In this approach agents are considered adaptively communicating concurrent modules which are divided into a white box module responsible for the communications and learning, and a black box which is the independent specialized processes of the agent. A distributed Learning policy is also introduced for adaptability.<|reference_end|> | arxiv | @article{hodjat1998an,
title={An Adaptive Agent Oriented Software Architecture},
author={Babak Hodjat, Christopher J. Savoie, and Makoto Amamiya},
journal={arXiv preprint arXiv:cs/9812014},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812014},
primaryClass={cs.DC cs.MA}
} | hodjat1998an |
arxiv-676218 | cs/9812015 | Adaptive Interaction Using the Adaptive Agent Oriented Software Architecture (AAOSA) | <|reference_start|>Adaptive Interaction Using the Adaptive Agent Oriented Software Architecture (AAOSA): User interfaces that adapt their characteristics to those of the user are referred to as adaptive interfaces. We propose Adaptive Agent Oriented Software Architecture (AAOSA) as a new way of designing adaptive interfaces. AAOSA is a new approach to software design based on an agent-oriented architecture. In this approach agents are considered adaptively communicating concurrent modules which are divided into a white box module responsible for the communications and learning, and a black box which is responsible for the independent specialized processes of the agent. A distributed learning policy that makes use of this architecture is used for purposes of system adaptability.<|reference_end|> | arxiv | @article{hodjat1998adaptive,
title={Adaptive Interaction Using the Adaptive Agent Oriented Software
Architecture (AAOSA)},
author={Babak Hodjat and Makoto Amamiya},
journal={arXiv preprint arXiv:cs/9812015},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812015},
primaryClass={cs.HC cs.DC}
} | hodjat1998adaptive |
arxiv-676219 | cs/9812016 | Making the most of electronic journals | <|reference_start|>Making the most of electronic journals: As most electronic journals available today have been derived from print originals, print journals have become a vital element in the broad development of electronic journals publishing. Further dependence on the print publishing model, however, will be a constraint on the continuing development of e-journals, and a series of conflicts are likely to arise. Making the most of e-journals requires that a distinctive new publishing model is developed. We consider some of the issues that will be fundamental in this new model, starting with user motivations and some reported publisher experiences, both of which suggest a broadening desire for comprehensive linked archives. This leads in turn to questions about the impact of rights assignment by authors, in particular the common practice of giving exlusive rights to publishers for individual works. Some non-prescriptive solutions are suggested, and four steps towards optimum e-journals are proposed.<|reference_end|> | arxiv | @article{hitchcock1998making,
title={Making the most of electronic journals},
author={Steve Hitchcock, Les Carr and Wendy Hall},
journal={arXiv preprint arXiv:cs/9812016},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812016},
primaryClass={cs.DL}
} | hitchcock1998making |
arxiv-676220 | cs/9812017 | A reusable iterative optimization software library to solve combinatorial problems with approximate reasoning | <|reference_start|>A reusable iterative optimization software library to solve combinatorial problems with approximate reasoning: Real world combinatorial optimization problems such as scheduling are typically too complex to solve with exact methods. Additionally, the problems often have to observe vaguely specified constraints of different importance, the available data may be uncertain, and compromises between antagonistic criteria may be necessary. We present a combination of approximate reasoning based constraints and iterative optimization based heuristics that help to model and solve such problems in a framework of C++ software libraries called StarFLIP++. While initially developed to schedule continuous caster units in steel plants, we present in this paper results from reusing the library components in a shift scheduling system for the workforce of an industrial production plant.<|reference_end|> | arxiv | @article{raggl1998a,
title={A reusable iterative optimization software library to solve
combinatorial problems with approximate reasoning},
author={Andreas Raggl, Wolfgang Slany},
journal={International Journal of Approximate Reasoning, 19(1--2):161--191,
July/August 1998},
year={1998},
number={DBAI-TR-98-23},
archivePrefix={arXiv},
eprint={cs/9812017},
primaryClass={cs.AI}
} | raggl1998a |
arxiv-676221 | cs/9812018 | A Flexible Shallow Approach to Text Generation | <|reference_start|>A Flexible Shallow Approach to Text Generation: In order to support the efficient development of NL generation systems, two orthogonal methods are currently pursued with emphasis: (1) reusable, general, and linguistically motivated surface realization components, and (2) simple, task-oriented template-based techniques. In this paper we argue that, from an application-oriented perspective, the benefits of both are still limited. In order to improve this situation, we suggest and evaluate shallow generation methods associated with increased flexibility. We advise a close connection between domain-motivated and linguistic ontologies that supports the quick adaptation to new tasks and domains, rather than the reuse of general resources. Our method is especially designed for generating reports with limited linguistic variations.<|reference_end|> | arxiv | @article{busemann1998a,
title={A Flexible Shallow Approach to Text Generation},
author={Stephan Busemann and Helmut Horacek (DFKI GmbH)},
journal={Proc. 9th International Workshop on Natural Language Generation,
Niagara-on-the-Lake, Canada, August 1998, 238-247},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812018},
primaryClass={cs.CL}
} | busemann1998a |
arxiv-676222 | cs/9812019 | Symmetries and transitions of bounded Turing machines | <|reference_start|>Symmetries and transitions of bounded Turing machines: We consider the structures given by repeatedly generalising the definition of finite state automata by symmetry considerations, and constructing analogues of transition monoids at each step. This approach first gives us non-deterministic automata, then (non-deterministic) two-way automata and bounded Turing machines --- that is, Turing machines where the read / write head is unable to move past the end of the input word. In the case of two-way automata, the transition monoids generalise to endomorphism monoids in compact closed categories. These use Girard's resolution formula (from the Geometry of Interaction representation of linear logic) to construct the images of singleton words. In the case of bounded Turing machines, the transition homomorphism generalises to a monoid homomorphism from the natural numbers to a monoid constructed from the union of endomorphism monoids of a compact closed category, together with an appropriate composition. These use Girard's execution formula (also from the Geometry of Interaction representation of linear logic) to construct images of singletons.<|reference_end|> | arxiv | @article{hines1998symmetries,
title={Symmetries and transitions of bounded Turing machines},
author={Peter M. Hines},
journal={arXiv preprint arXiv:cs/9812019},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812019},
primaryClass={cs.LO math.CT}
} | hines1998symmetries |
arxiv-676223 | cs/9812020 | The Computing Research Repository: Promoting the Rapid Dissemination and Archiving of Computer Science Research | <|reference_start|>The Computing Research Repository: Promoting the Rapid Dissemination and Archiving of Computer Science Research: We describe the Computing Research Repository (CoRR), a new electronic archive for rapid dissemination and archiving of computer science research results. CoRR was initiated in September 1998 through the cooperation of ACM, LANL (Los Alamos National Laboratory) e-Print archive, and NCSTRL (Networked Computer Science Technical Research Library. Through its implementation of the Dienst protocol, CoRR combines the open and extensible architecture of NCSTRL with the reliable access and well-established management practices of the LANL XXX e-Print repository. This architecture will allow integration with other e-Print archives and provides a foundation for a future broad-based scholarly digital library. We describe the decisions that were made in creating CoRR, the architecture of the CoRR/NCSTRL interoperation, and issues that have arisen during the operation of CoRR.<|reference_end|> | arxiv | @article{halpern1998the,
title={The Computing Research Repository: Promoting the Rapid Dissemination and
Archiving of Computer Science Research},
author={Joseph Y. Halpern, Carl Lagoze},
journal={arXiv preprint arXiv:cs/9812020},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812020},
primaryClass={cs.DL}
} | halpern1998the |
arxiv-676224 | cs/9812021 | Forgetting Exceptions is Harmful in Language Learning | <|reference_start|>Forgetting Exceptions is Harmful in Language Learning: We show that in language learning, contrary to received wisdom, keeping exceptional training instances in memory can be beneficial for generalization accuracy. We investigate this phenomenon empirically on a selection of benchmark natural language processing tasks: grapheme-to-phoneme conversion, part-of-speech tagging, prepositional-phrase attachment, and base noun phrase chunking. In a first series of experiments we combine memory-based learning with training set editing techniques, in which instances are edited based on their typicality and class prediction strength. Results show that editing exceptional instances (with low typicality or low class prediction strength) tends to harm generalization accuracy. In a second series of experiments we compare memory-based learning and decision-tree learning methods on the same selection of tasks, and find that decision-tree learning often performs worse than memory-based learning. Moreover, the decrease in performance can be linked to the degree of abstraction from exceptions (i.e., pruning or eagerness). We provide explanations for both results in terms of the properties of the natural language processing tasks and the learning algorithms.<|reference_end|> | arxiv | @article{daelemans1998forgetting,
title={Forgetting Exceptions is Harmful in Language Learning},
author={Walter Daelemans, Antal van den Bosch, and Jakub Zavrel},
journal={arXiv preprint arXiv:cs/9812021},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812021},
primaryClass={cs.CL cs.LG}
} | daelemans1998forgetting |
arxiv-676225 | cs/9812022 | Hypertree Decompositions and Tractable Queries | <|reference_start|>Hypertree Decompositions and Tractable Queries: Several important decision problems on conjunctive queries (CQs) are NP-complete in general but become tractable, and actually highly parallelizable, if restricted to acyclic or nearly acyclic queries. Examples are the evaluation of Boolean CQs and query containment. These problems were shown tractable for conjunctive queries of bounded treewidth and of bounded degree of cyclicity. The so far most general concept of nearly acyclic queries was the notion of queries of bounded query-width introduced by Chekuri and Rajaraman (1997). While CQs of bounded query width are tractable, it remained unclear whether such queries are efficiently recognizable. Chekuri and Rajaraman stated as an open problem whether for each constant k it can be determined in polynomial time if a query has query width less than or equal to k. We give a negative answer by proving this problem NP-complete (specifically, for k=4). In order to circumvent this difficulty, we introduce the new concept of hypertree decomposition of a query and the corresponding notion of hypertree width. We prove: (a) for each k, the class of queries with query width bounded by k is properly contained in the class of queries whose hypertree width is bounded by k; (b) unlike query width, constant hypertree-width is efficiently recognizable; (c) Boolean queries of constant hypertree width can be efficiently evaluated.<|reference_end|> | arxiv | @article{gottlob1998hypertree,
title={Hypertree Decompositions and Tractable Queries},
author={G. Gottlob, N. Leone, F. Scarcello},
journal={Journal of Computer and System Sciences, 64(3):579-627, 2002},
year={1998},
doi={10.1006/jcss.2001.1809},
number={DBAI-TR-98/21},
archivePrefix={arXiv},
eprint={cs/9812022},
primaryClass={cs.DB cs.AI}
} | gottlob1998hypertree |
arxiv-676226 | cs/9812023 | Virtual Kathakali : Gesture Driven Metamorphosis | <|reference_start|>Virtual Kathakali : Gesture Driven Metamorphosis: Training in motor skills such as athletics, dance, or gymnastics is not possible today except in the direct presence of the coach/instructor. This paper describes a computer vision based gesture recognition system which is used to metamorphose the user into a Virtual person, e.g. as a Kathakali dancer, which is graphically recreated at a near or diatant location. Thus this can be seen by an off-site coach using low-bandwidth joint-motion data which permits real time animation. The metamorphosis involves altering the appearance and identity of the user and also creating a specific environment possibly in interaction with other virtual creatures. A robust vision module is used to identify the user, based on very simple binary image processing in real time which also manages to resolve self-occlusion, correct for clothing/colour and other variations among users. Gestures are identified by locating key points at the shoulder, elbow and wrist joint, which are then recreated in an articulated humanoid model, which in this instance, representes a Kathakali dancer in elaborate traditional dress. Unlike glove based or other and movement tracking systems, this application requires the user to wear no hardwire devices and is aimed at making gesture tracking simpler, cheaper, and more user friendly.<|reference_end|> | arxiv | @article{paul1998virtual,
title={Virtual Kathakali : Gesture Driven Metamorphosis},
author={Soumyadeep Paul, Sudipta N. Sinha and Amitabha Mukerjee},
journal={arXiv preprint arXiv:cs/9812023},
year={1998},
archivePrefix={arXiv},
eprint={cs/9812023},
primaryClass={cs.HC}
} | paul1998virtual |
arxiv-676227 | cs/9901001 | TDLeaf(lambda): Combining Temporal Difference Learning with Game-Tree Search | <|reference_start|>TDLeaf(lambda): Combining Temporal Difference Learning with Game-Tree Search: In this paper we present TDLeaf(lambda), a variation on the TD(lambda) algorithm that enables it to be used in conjunction with minimax search. We present some experiments in both chess and backgammon which demonstrate its utility and provide comparisons with TD(lambda) and another less radical variant, TD-directed(lambda). In particular, our chess program, ``KnightCap,'' used TDLeaf(lambda) to learn its evaluation function while playing on the Free Internet Chess Server (FICS, fics.onenet.net). It improved from a 1650 rating to a 2100 rating in just 308 games. We discuss some of the reasons for this success and the relationship between our results and Tesauro's results in backgammon.<|reference_end|> | arxiv | @article{baxter1999tdleaf(lambda):,
title={TDLeaf(lambda): Combining Temporal Difference Learning with Game-Tree
Search},
author={Jonathan Baxter, Andrew Tridgell, and Lex Weaver},
journal={Australian Journal of Intelligent Information Processing Systems,
ISSN 1321-2133, Vol. 5 No. 1, Autumn 1998, pages 39-43},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901001},
primaryClass={cs.LG cs.AI}
} | baxter1999tdleaf(lambda): |
arxiv-676228 | cs/9901002 | KnightCap: A chess program that learns by combining TD(lambda) with game-tree search | <|reference_start|>KnightCap: A chess program that learns by combining TD(lambda) with game-tree search: In this paper we present TDLeaf(lambda), a variation on the TD(lambda) algorithm that enables it to be used in conjunction with game-tree search. We present some experiments in which our chess program ``KnightCap'' used TDLeaf(lambda) to learn its evaluation function while playing on the Free Internet Chess Server (FICS, fics.onenet.net). The main success we report is that KnightCap improved from a 1650 rating to a 2150 rating in just 308 games and 3 days of play. As a reference, a rating of 1650 corresponds to about level B human play (on a scale from E (1000) to A (1800)), while 2150 is human master level. We discuss some of the reasons for this success, principle among them being the use of on-line, rather than self-play.<|reference_end|> | arxiv | @article{baxter1999knightcap:,
title={KnightCap: A chess program that learns by combining TD(lambda) with
game-tree search},
author={Jonathan Baxter, Andrew Tridgell, and Lex Weaver},
journal={MACHINE LEARNING Proceedings of the Fifteenth International
Conference (ICML '98), ISBN 1-55860-556-8, ISSN 1049-1910, Madison WISCONSIN,
July 24-27 1998, pages 28-36},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901002},
primaryClass={cs.LG cs.AI}
} | baxter1999knightcap: |
arxiv-676229 | cs/9901003 | Fixpoint 3-valued semantics for autoepistemic logic | <|reference_start|>Fixpoint 3-valued semantics for autoepistemic logic: The paper presents a constructive fixpoint semantics for autoepistemic logic (AEL). This fixpoint characterizes a unique but possibly three-valued belief set of an autoepistemic theory. It may be three-valued in the sense that for a subclass of formulas F, the fixpoint may not specify whether F is believed or not. The paper presents a constructive 3-valued semantics for autoepistemic logic (AEL). We introduce a derivation operator and define the semantics as its least fixpoint. The semantics is 3-valued in the sense that, for some formulas, the least fixpoint does not specify whether they are believed or not. We show that complete fixpoints of the derivation operator correspond to Moore's stable expansions. In the case of modal representations of logic programs our least fixpoint semantics expresses well-founded semantics or 3-valued Fitting-Kunen semantics (depending on the embedding used). We show that, computationally, our semantics is simpler than the semantics proposed by Moore (assuming that the polynomial hierarchy does not collapse).<|reference_end|> | arxiv | @article{denecker1999fixpoint,
title={Fixpoint 3-valued semantics for autoepistemic logic},
author={M. Denecker and V. Marek and M. Truszczynski},
journal={arXiv preprint arXiv:cs/9901003},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901003},
primaryClass={cs.LO cs.AI}
} | denecker1999fixpoint |
arxiv-676230 | cs/9901004 | On the geometry of similarity search: dimensionality curse and concentration of measure | <|reference_start|>On the geometry of similarity search: dimensionality curse and concentration of measure: We suggest that the curse of dimensionality affecting the similarity-based search in large datasets is a manifestation of the phenomenon of concentration of measure on high-dimensional structures. We prove that, under certain geometric assumptions on the query domain $\Omega$ and the dataset $X$, if $\Omega$ satisfies the so-called concentration property, then for most query points $x^\ast$ the ball of radius $(1+\e)d_X(x^\ast)$ centred at $x^\ast$ contains either all points of $X$ or else at least $C_1\exp(-C_2\e^2n)$ of them. Here $d_X(x^\ast)$ is the distance from $x^\ast$ to the nearest neighbour in $X$ and $n$ is the dimension of $\Omega$.<|reference_end|> | arxiv | @article{pestov1999on,
title={On the geometry of similarity search: dimensionality curse and
concentration of measure},
author={Vladimir Pestov},
journal={Information Processing Letters 73 (2000), 47-51.},
year={1999},
number={RP-99-01, Victoria University of Wellington, NZ},
archivePrefix={arXiv},
eprint={cs/9901004},
primaryClass={cs.IR cs.CG cs.DB cs.DS}
} | pestov1999on |
arxiv-676231 | cs/9901005 | An Empirical Approach to Temporal Reference Resolution (journal version) | <|reference_start|>An Empirical Approach to Temporal Reference Resolution (journal version): Scheduling dialogs, during which people negotiate the times of appointments, are common in everyday life. This paper reports the results of an in-depth empirical investigation of resolving explicit temporal references in scheduling dialogs. There are four phases of this work: data annotation and evaluation, model development, system implementation and evaluation, and model evaluation and analysis. The system and model were developed primarily on one set of data, and then applied later to a much more complex data set, to assess the generalizability of the model for the task being performed. Many different types of empirical methods are applied to pinpoint the strengths and weaknesses of the approach. Detailed annotation instructions were developed and an intercoder reliability study was performed, showing that naive annotators can reliably perform the targeted annotations. A fully automatic system has been developed and evaluated on unseen test data, with good results on both data sets. We adopt a pure realization of a recency-based focus model to identify precisely when it is and is not adequate for the task being addressed. In addition to system results, an in-depth evaluation of the model itself is presented, based on detailed manual annotations. The results are that few errors occur specifically due to the model of focus being used, and the set of anaphoric relations defined in the model are low in ambiguity for both data sets.<|reference_end|> | arxiv | @article{wiebe1999an,
title={An Empirical Approach to Temporal Reference Resolution (journal version)},
author={Janyce Wiebe, Thomas P. O'Hara, Thorsten Ohrstrom-Sandgren, and
Kenneth K. McKeever (New Mexico State University)},
journal={Journal of Artificial Intelligence Research (JAIR), 9:247-293},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901005},
primaryClass={cs.CL}
} | wiebe1999an |
arxiv-676232 | cs/9901006 | Object Oriented and Functional Programming for Symbolic Manipulation | <|reference_start|>Object Oriented and Functional Programming for Symbolic Manipulation: The advantages of mixed approach with using different kinds of programming techniques for symbolic manipulation are discussed. The main purpose of approach offered is merge the methods of object oriented programming that convenient for presentation data and algorithms for user with advantages of functional languages for data manipulation, internal presentation, and portability of software.<|reference_end|> | arxiv | @article{vlasov1999object,
title={Object Oriented and Functional Programming for Symbolic Manipulation},
author={Alexander Yu. Vlasov (FCR/IRH, St.-Petersburg, Russia)},
journal={arXiv preprint arXiv:cs/9901006},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901006},
primaryClass={cs.SC cs.PL}
} | vlasov1999object |
arxiv-676233 | cs/9901007 | Universal Object Oriented Languages and Computer Algebra | <|reference_start|>Universal Object Oriented Languages and Computer Algebra: The universal object oriented languages made programming more simple and efficient. In the article is considered possibilities of using similar methods in computer algebra. A clear and powerful universal language is useful if particular problem was not implemented in standard software packages like REDUCE, MATHEMATICA, etc. and if the using of internal programming languages of the packages looks not very efficient. Functional languages like LISP had some advantages and traditions for algebraic and symbolic manipulations. Functional and object oriented programming are not incompatible ones. An extension of the model of an object for manipulation with pure functions and algebraic expressions is considered.<|reference_end|> | arxiv | @article{vlasov1999universal,
title={Universal Object Oriented Languages and Computer Algebra},
author={Alexander Yu. Vlasov (FCR/IRH, St.-Petersburg, Russia)},
journal={Computer Algebra in Scientific Computing. Extended abstracts of
the International Conference CASC-98, ed by N.N.Vasiliev -- St.-Petersburg,
1998, pages 130 -- 132},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901007},
primaryClass={cs.PL}
} | vlasov1999universal |
arxiv-676234 | cs/9901008 | Fast Computational Algorithms for the Discrete Wavelet Transform and Applications of Localized Orthonormal Bases in Signal Classification | <|reference_start|>Fast Computational Algorithms for the Discrete Wavelet Transform and Applications of Localized Orthonormal Bases in Signal Classification: We construct an algorithm for implementing the discrete wavelet transform by means of matrices in SO_2(R) for orthonormal compactly supported wavelets and matrices in SL_m(R), m > = 2, for compactly supported biorthogonal wavelets. We show that in 1 dimension the total operation count using this algorithm can be reduced to about 50% of the conventional convolution and downsampling by 2-operation for both orthonormal and biorthogonal filters. In the special case of biorthogonal symmetric odd-odd filters, we show an implementation yielding a total operation count of about 38% of the conventional method. In 2 dimensions we show an implementation of this algorithm yielding a reduction in the total operation count of about 70% when the filters are orthonormal, a reduction of about 62% for general biorthogonal filters, and a reduction of about 70% if the filters are symmetric odd-odd length filters. We further extend these results to 3 dimensions. We also show how the SO_2(R)-method for implementing the discrete wavelet transform may be exploited to compute short FIR filters, and we construct edge mappings where we try to improve upon the degree of preservation of regularity in the conventional methods. We also consider a two-class waveform discrimination problem. A statistical space-frequency analysis is performed on a training data set using the LDB-algorithm of N.Saito and R.Coifman. The success of the algorithm on this particular problem is evaluated on a disjoint test data set.<|reference_end|> | arxiv | @article{fossgaard1999fast,
title={Fast Computational Algorithms for the Discrete Wavelet Transform and
Applications of Localized Orthonormal Bases in Signal Classification},
author={Eirik Fossgaard},
journal={arXiv preprint arXiv:cs/9901008},
year={1999},
number={82-90487-93-2},
archivePrefix={arXiv},
eprint={cs/9901008},
primaryClass={cs.MS cs.CE}
} | fossgaard1999fast |
arxiv-676235 | cs/9901009 | Competition and cooperation: Libraries and publishers in the transition to electronic scholarly journals | <|reference_start|>Competition and cooperation: Libraries and publishers in the transition to electronic scholarly journals: The conversion of scholarly journals to digital format is proceeding rapidly, especially for those from large commercial and learned society publishers. This conversion offers the best hope for survival for such publishers. The infamous "journal crisis" is more of a library cost crisis than a publisher pricing problem, with internal library costs much higher than the amount spent on purchasing books and journals. Therefore publishers may be able to retain or even increase their revenues and profits, while at the same time providing a superior service. To do this, they will have to take over many of the function of libraries, and they can do that only in the digital domain. This paper examines publishers' strategies, how they are likely to evolve, and how they will affect libraries.<|reference_end|> | arxiv | @article{odlyzko1999competition,
title={Competition and cooperation: Libraries and publishers in the transition
to electronic scholarly journals},
author={Andrew Odlyzko},
journal={arXiv preprint arXiv:cs/9901009},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901009},
primaryClass={cs.DL}
} | odlyzko1999competition |
arxiv-676236 | cs/9901010 | Average-Case Complexity of Shellsort | <|reference_start|>Average-Case Complexity of Shellsort: We prove a general lower bound on the average-case complexity of Shellsort: the average number of data-movements (and comparisons) made by a $p$-pass Shellsort for any incremental sequence is $\Omega (pn^{1 + 1/p)$ for all $p \leq \log n$. Using similar arguments, we analyze the average-case complexity of several other sorting algorithms.<|reference_end|> | arxiv | @article{jiang1999average-case,
title={Average-Case Complexity of Shellsort},
author={Tao Jiang (McMaster U.), Ming Li (U of Waterloo), Paul Vitanyi (CWI
and U of Amsterdam)},
journal={arXiv preprint arXiv:cs/9901010},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901010},
primaryClass={cs.DS cs.CC}
} | jiang1999average-case |
arxiv-676237 | cs/9901011 | A Brief History of the Internet | <|reference_start|>A Brief History of the Internet: The Internet has revolutionized the computer and communications world like nothing before. The invention of the telegraph, telephone, radio, and computer set the stage for this unprecedented integration of capabilities. The Internet is at once a world-wide broadcasting capability, a mechanism for information dissemination, and a medium for collaboration and interaction between individuals and their computers without regard for geographic location. In this paper, several of us involved in the development and evolution of the Internet share our views of its origins and history. This is intended to be a brief, necessarily cursory and incomplete history. This history revolves around four distinct aspects. There is the technological evolution that began with early research on packet switching and the ARPANET (and related technologies), and where current research continues to expand the horizons of the infrastructure along several dimensions, such as scale, performance, and higher level functionality. There is the operations and management aspect of a global and complex operational infrastructure. There is the social aspect, which resulted in a broad community of Internauts working together to create and evolve the technology. And there is the commercialization aspect, resulting in an extremely effective transition of research results into a broadly deployed and available information infrastructure.<|reference_end|> | arxiv | @article{leiner1999a,
title={A Brief History of the Internet},
author={Barry M. Leiner, Vinton G. Cerf, David D. Clark, Robert E. Kahn,
Leonard Kleinrock, Daniel C. Lynch, Jon Postel, Larry G. Roberts, and Stephen
Wolf},
journal={arXiv preprint arXiv:cs/9901011},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901011},
primaryClass={cs.NI}
} | leiner1999a |
arxiv-676238 | cs/9901012 | Extremal problems in logic programming and stable model computation | <|reference_start|>Extremal problems in logic programming and stable model computation: We study the following problem: given a class of logic programs C, determine the maximum number of stable models of a program from C. We establish the maximum for the class of all logic programs with at most n clauses, and for the class of all logic programs of size at most n. We also characterize the programs for which the maxima are attained. We obtain similar results for the class of all disjunctive logic programs with at most n clauses, each of length at most m, and for the class of all disjunctive logic programs of size at most n. Our results on logic programs have direct implication for the design of algorithms to compute stable models. Several such algorithms, similar in spirit to the Davis-Putnam procedure, are described in the paper. Our results imply that there is an algorithm that finds all stable models of a program with n clauses after considering the search space of size O(3^{n/3}) in the worst case. Our results also provide some insights into the question of representability of families of sets as families of stable models of logic programs.<|reference_end|> | arxiv | @article{cholewinski1999extremal,
title={Extremal problems in logic programming and stable model computation},
author={Pawel Cholewinski, Miroslaw Truszczynski},
journal={Journal of Logic Programming, 38(1999), pp. 219-242},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901012},
primaryClass={cs.LO cs.AI}
} | cholewinski1999extremal |
arxiv-676239 | cs/9901013 | Analysis of approximate nearest neighbor searching with clustered point sets | <|reference_start|>Analysis of approximate nearest neighbor searching with clustered point sets: We present an empirical analysis of data structures for approximate nearest neighbor searching. We compare the well-known optimized kd-tree splitting method against two alternative splitting methods. The first, called the sliding-midpoint method, which attempts to balance the goals of producing subdivision cells of bounded aspect ratio, while not producing any empty cells. The second, called the minimum-ambiguity method is a query-based approach. In addition to the data points, it is also given a training set of query points for preprocessing. It employs a simple greedy algorithm to select the splitting plane that minimizes the average amount of ambiguity in the choice of the nearest neighbor for the training points. We provide an empirical analysis comparing these two methods against the optimized kd-tree construction for a number of synthetically generated data and query sets. We demonstrate that for clustered data and query sets, these algorithms can provide significant improvements over the standard kd-tree construction for approximate nearest neighbor searching.<|reference_end|> | arxiv | @article{maneewongvatana1999analysis,
title={Analysis of approximate nearest neighbor searching with clustered point
sets},
author={Songrit Maneewongvatana and David M. Mount},
journal={arXiv preprint arXiv:cs/9901013},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901013},
primaryClass={cs.CG}
} | maneewongvatana1999analysis |
arxiv-676240 | cs/9901014 | Minimum Description Length Induction, Bayesianism, and Kolmogorov Complexity | <|reference_start|>Minimum Description Length Induction, Bayesianism, and Kolmogorov Complexity: The relationship between the Bayesian approach and the minimum description length approach is established. We sharpen and clarify the general modeling principles MDL and MML, abstracted as the ideal MDL principle and defined from Bayes's rule by means of Kolmogorov complexity. The basic condition under which the ideal principle should be applied is encapsulated as the Fundamental Inequality, which in broad terms states that the principle is valid when the data are random, relative to every contemplated hypothesis and also these hypotheses are random relative to the (universal) prior. Basically, the ideal principle states that the prior probability associated with the hypothesis should be given by the algorithmic universal probability, and the sum of the log universal probability of the model plus the log of the probability of the data given the model should be minimized. If we restrict the model class to the finite sets then application of the ideal principle turns into Kolmogorov's minimal sufficient statistic. In general we show that data compression is almost always the best strategy, both in hypothesis identification and prediction.<|reference_end|> | arxiv | @article{vitanyi1999minimum,
title={Minimum Description Length Induction, Bayesianism, and Kolmogorov
Complexity},
author={Paul Vitanyi (CWI and University of Amsterdam), Ming Li (University of
Waterloo)},
journal={IEEE Transactions on Information Theory, 46:2(2000), 446-464},
year={1999},
number={CWI Tech Report 1998},
archivePrefix={arXiv},
eprint={cs/9901014},
primaryClass={cs.LG cs.AI cs.CC cs.IT cs.LO math.IT math.PR physics.data-an}
} | vitanyi1999minimum |
arxiv-676241 | cs/9901015 | PSPACE has 2-round quantum interactive proof systems | <|reference_start|>PSPACE has 2-round quantum interactive proof systems: In this paper we consider quantum interactive proof systems, i.e., interactive proof systems in which the prover and verifier may perform quantum computations and exchange quantum messages. It is proved that every language in PSPACE has a quantum interactive proof system that requires only two rounds of communication between the prover and verifier, while having exponentially small (one-sided) probability of error. It follows that quantum interactive proof systems are strictly more powerful than classical interactive proof systems in the constant-round case unless the polynomial time hierarchy collapses to the second level.<|reference_end|> | arxiv | @article{watrous1999pspace,
title={PSPACE has 2-round quantum interactive proof systems},
author={John Watrous},
journal={arXiv preprint arXiv:cs/9901015},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901015},
primaryClass={cs.CC quant-ph}
} | watrous1999pspace |
arxiv-676242 | cs/9901016 | Representation Theory for Default Logic | <|reference_start|>Representation Theory for Default Logic: Default logic can be regarded as a mechanism to represent families of belief sets of a reasoning agent. As such, it is inherently second-order. In this paper, we study the problem of representability of a family of theories as the set of extensions of a default theory. We give a complete solution to the representability by means of normal default theories. We obtain partial results on representability by arbitrary default theories. We construct examples of denumerable families of non-including theories that are not representable. We also study the concept of equivalence between default theories.<|reference_end|> | arxiv | @article{marek1999representation,
title={Representation Theory for Default Logic},
author={Victor Marek, Jan Treur, Miroslaw Truszczynski},
journal={arXiv preprint arXiv:cs/9901016},
year={1999},
archivePrefix={arXiv},
eprint={cs/9901016},
primaryClass={cs.LO cs.AI}
} | marek1999representation |
arxiv-676243 | cs/9902001 | Compacting the Penn Treebank Grammar | <|reference_start|>Compacting the Penn Treebank Grammar: Treebanks, such as the Penn Treebank (PTB), offer a simple approach to obtaining a broad coverage grammar: one can simply read the grammar off the parse trees in the treebank. While such a grammar is easy to obtain, a square-root rate of growth of the rule set with corpus size suggests that the derived grammar is far from complete and that much more treebanked text would be required to obtain a complete grammar, if one exists at some limit. However, we offer an alternative explanation in terms of the underspecification of structures within the treebank. This hypothesis is explored by applying an algorithm to compact the derived grammar by eliminating redundant rules -- rules whose right hand sides can be parsed by other rules. The size of the resulting compacted grammar, which is significantly less than that of the full treebank grammar, is shown to approach a limit. However, such a compacted grammar does not yield very good performance figures. A version of the compaction algorithm taking rule probabilities into account is proposed, which is argued to be more linguistically motivated. Combined with simple thresholding, this method can be used to give a 58% reduction in grammar size without significant change in parsing performance, and can produce a 69% reduction with some gain in recall, but a loss in precision.<|reference_end|> | arxiv | @article{krotov1999compacting,
title={Compacting the Penn Treebank Grammar},
author={Alexander Krotov, Mark Hepple, Robert Gaizauskas and Yorick Wilks
(Department of Computer Science, University of Sheffield, UK)},
journal={In Proceedings of COLING-98 (Montreal), pages 699-703},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902001},
primaryClass={cs.CL}
} | krotov1999compacting |
arxiv-676244 | cs/9902002 | Automatic Identification of Subjects for Textual Documents in Digital Libraries | <|reference_start|>Automatic Identification of Subjects for Textual Documents in Digital Libraries: The amount of electronic documents in the Internet grows very quickly. How to effectively identify subjects for documents becomes an important issue. In past, the researches focus on the behavior of nouns in documents. Although subjects are composed of nouns, the constituents that determine which nouns are subjects are not only nouns. Based on the assumption that texts are well-organized and event-driven, nouns and verbs together contribute the process of subject identification. This paper considers four factors: 1) word importance, 2) word frequency, 3) word co-occurrence, and 4) word distance and proposes a model to identify subjects for textual documents. The preliminary experiments show that the performance of the proposed model is close to that of human beings.<|reference_end|> | arxiv | @article{chen1999automatic,
title={Automatic Identification of Subjects for Textual Documents in Digital
Libraries},
author={Kuang-hua Chen},
journal={arXiv preprint arXiv:cs/9902002},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902002},
primaryClass={cs.DL cs.CL}
} | chen1999automatic |
arxiv-676245 | cs/9902003 | MyLibrary: A Model for Implementing a User-centered, Customizable Interface to a Library's Collection of Information Resources | <|reference_start|>MyLibrary: A Model for Implementing a User-centered, Customizable Interface to a Library's Collection of Information Resources: The paper describes an extensible model for implementing a user-centered, customizable interface to a library's collection of information resources. This model, called MyLibrary, integrates the principles of librarianship (collection, organization, dissemination, and evaluation) with globally networked computing resources creating a dynamic, customer-driven front-end to any library's set of materials. The model supports a framework for libraries to provide enhanced access to local and remote sets of data, information, and knowledge. At the same, the model does not overwhelm its users with too much information because the users control exactly how much information is displayed to them at any given time. The model is active and not passive; direct human interaction, computer mediated guidance and communication technologies, as well as current awareness services all play indispensable roles in this system.<|reference_end|> | arxiv | @article{morgan1999mylibrary:,
title={MyLibrary: A Model for Implementing a User-centered, Customizable
Interface to a Library's Collection of Information Resources},
author={Eric Lease Morgan},
journal={arXiv preprint arXiv:cs/9902003},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902003},
primaryClass={cs.DL}
} | morgan1999mylibrary: |
arxiv-676246 | cs/9902004 | The Alex Catalogue, A Collection of Digital Texts with Automatic Methods for Acquisition and Cataloging, User-Defined Typography, Cross-searching of Indexed Content, and a Sense of Community | <|reference_start|>The Alex Catalogue, A Collection of Digital Texts with Automatic Methods for Acquisition and Cataloging, User-Defined Typography, Cross-searching of Indexed Content, and a Sense of Community: This paper describes the Alex Catalogue of Electronic Texts, the only Internet-accessible collection of digital documents allowing the user to 1) dynamically create customized, typographically readable documents on demand, 2) search the content of one or more documents from the collection simultaneously, 3) create sets of documents from the collection for review and annotation, and 4) publish these sets of annotated documents in turn fostering a sense of community around the Catalogue. More than a just a collection of links that will break over time, Alex is an archive of electronic texts providing unprecedented access to its content and features allowing it to meet the needs of a wide variety of users and settings. Furthermore, the process of maintaining the Catalogue is streamlined with tools for automatic acquisition and cataloging making it possible to sustain the service with a minimum of personnel.<|reference_end|> | arxiv | @article{morgan1999the,
title={The Alex Catalogue, A Collection of Digital Texts with Automatic Methods
for Acquisition and Cataloging, User-Defined Typography, Cross-searching of
Indexed Content, and a Sense of Community},
author={Eric Lease Morgan},
journal={arXiv preprint arXiv:cs/9902004},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902004},
primaryClass={cs.DL}
} | morgan1999the |
arxiv-676247 | cs/9902005 | Mutual Search | <|reference_start|>Mutual Search: We introduce a search problem called ``mutual search'' where $k$ \agents, arbitrarily distributed over $n$ sites, are required to locate one another by posing queries of the form ``Anybody at site $i$?''. We ask for the least number of queries that is necessary and sufficient. For the case of two \agents using deterministic protocols we obtain the following worst-case results: In an oblivious setting (where all pre-planned queries are executed) there is no savings: $n-1$ queries are required and are sufficient. In a nonoblivious setting we can exploit the paradigm of ``no news is also news'' to obtain significant savings: in the synchronous case $0.586n$ queries suffice and $0.536n$ queries are required; in the asynchronous case $0.896n$ queries suffice and a fortiori 0.536 queries are required; for $o(\sqrt{n})$ \agents using a deterministic protocol less than $n$ queries suffice; there is a simple randomized protocol for two \agents with worst-case expected $0.5n$ queries and all randomized protocols require at least $0.125n$ worst-case expected queries. The graph-theoretic framework we formulate for expressing and analyzing algorithms for this problem may be of independent interest.<|reference_end|> | arxiv | @article{buhrman1999mutual,
title={Mutual Search},
author={Harry Buhrman (CWI), Matthew Franklin (Xerox PARC), Juan A. Garay
(Bell Labs - Lucent Technologies), Jaap-Henk Hoepman (University Twente),
John Tromp (CWI), Paul Vitanyi (CWI and University of Amsterdam)},
journal={arXiv preprint arXiv:cs/9902005},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902005},
primaryClass={cs.DS cs.CC cs.DB cs.DC cs.DM cs.IR}
} | buhrman1999mutual |
arxiv-676248 | cs/9902006 | A Discipline of Evolutionary Programming | <|reference_start|>A Discipline of Evolutionary Programming: Genetic fitness optimization using small populations or small population updates across generations generally suffers from randomly diverging evolutions. We propose a notion of highly probable fitness optimization through feasible evolutionary computing runs on small size populations. Based on rapidly mixing Markov chains, the approach pertains to most types of evolutionary genetic algorithms, genetic programming and the like. We establish that for systems having associated rapidly mixing Markov chains and appropriate stationary distributions the new method finds optimal programs (individuals) with probability almost 1. To make the method useful would require a structured design methodology where the development of the program and the guarantee of the rapidly mixing property go hand in hand. We analyze a simple example to show that the method is implementable. More significant examples require theoretical advances, for example with respect to the Metropolis filter.<|reference_end|> | arxiv | @article{vitanyi1999a,
title={A Discipline of Evolutionary Programming},
author={Paul Vitanyi},
journal={Theoret. Comp. Sci., 241:1-2 (2000), 3--23.},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902006},
primaryClass={cs.NE cs.AI cs.CC cs.DS cs.LG cs.MA}
} | vitanyi1999a |
arxiv-676249 | cs/9902007 | KEA: Practical Automatic Keyphrase Extraction | <|reference_start|>KEA: Practical Automatic Keyphrase Extraction: Keyphrases provide semantic metadata that summarize and characterize documents. This paper describes Kea, an algorithm for automatically extracting keyphrases from text. Kea identifies candidate keyphrases using lexical methods, calculates feature values for each candidate, and uses a machine-learning algorithm to predict which candidates are good keyphrases. The machine learning scheme first builds a prediction model using training documents with known keyphrases, and then uses the model to find keyphrases in new documents. We use a large test corpus to evaluate Kea's effectiveness in terms of how many author-assigned keyphrases are correctly identified. The system is simple, robust, and publicly available.<|reference_end|> | arxiv | @article{witten1999kea:,
title={KEA: Practical Automatic Keyphrase Extraction},
author={Ian H. Witten, Gordon W. Paynter, Eibe Frank, Carl Gutwin and Craig G.
Nevill-Manning},
journal={arXiv preprint arXiv:cs/9902007},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902007},
primaryClass={cs.DL}
} | witten1999kea: |
arxiv-676250 | cs/9902008 | Managing Object-Oriented Integration and Regression Testing | <|reference_start|>Managing Object-Oriented Integration and Regression Testing: Systematic testing of object-oriented software turned out to be much more complex than testing conventional software. Especially the highly incremental and iterative development cycle demands both many more changes and partially implemented resp. re-implemented classes. Much more integration and regression testing has to be done to reach stable stages during the development. In this presentation we propose a diagram capturing all possible dependencies and interactions in an object-oriented program. Then we give algorithms and coverage criteria to identify integration resp. regression test strategys and all test cases to be executed after some implementation resp. modification activities. Finally, we summarize some practical experiences and heuristics.<|reference_end|> | arxiv | @article{winter1999managing,
title={Managing Object-Oriented Integration and Regression Testing},
author={Mario Winter},
journal={arXiv preprint arXiv:cs/9902008},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902008},
primaryClass={cs.SE}
} | winter1999managing |
arxiv-676251 | cs/9902009 | Quality of OCR for Degraded Text Images | <|reference_start|>Quality of OCR for Degraded Text Images: Commercial OCR packages work best with high-quality scanned images. They often produce poor results when the image is degraded, either because the original itself was poor quality, or because of excessive photocopying. The ability to predict the word failure rate of OCR from a statistical analysis of the image can help in making decisions in the trade-off between the success rate of OCR and the cost of human correction of errors. This paper describes an investigation of OCR of degraded text images using a standard OCR engine (Adobe Capture). The documents were selected from those in the archive at Los Alamos National Laboratory. By introducing noise in a controlled manner into perfect documents, we show how the quality of OCR can be predicted from the nature of the noise. The preliminary results show that a simple noise model can give good prediction of the number of OCR errors.<|reference_end|> | arxiv | @article{hartley1999quality,
title={Quality of OCR for Degraded Text Images},
author={Roger T. Hartley, Kathleen Crumpton},
journal={arXiv preprint arXiv:cs/9902009},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902009},
primaryClass={cs.DL}
} | hartley1999quality |
arxiv-676252 | cs/9902010 | Multiparty computation unconditionally secure against Q^2 adversary structures | <|reference_start|>Multiparty computation unconditionally secure against Q^2 adversary structures: We present here a generalization of the work done by Rabin and Ben-Or. We give a protocol for multiparty computation which tolerates any Q^2 active adversary structure based on the existence of a broadcast channel, secure communication between each pair of participants, and a monotone span program with multiplication tolerating the structure. The secrecy achieved is unconditional although we allow an exponentially small probability of error. This is possible due to a protocol for computing the product of two values already shared by means of a homomorphic commitment scheme which appeared originally in a paper of Chaum, Evertse and van de Graaf.<|reference_end|> | arxiv | @article{smith1999multiparty,
title={Multiparty computation unconditionally secure against Q^2 adversary
structures},
author={Adam Smith and Anton Stiglic},
journal={arXiv preprint arXiv:cs/9902010},
year={1999},
number={SOCS-98.2},
archivePrefix={arXiv},
eprint={cs/9902010},
primaryClass={cs.CR}
} | smith1999multiparty |
arxiv-676253 | cs/9902011 | Content-Based Book Recommending Using Learning for Text Categorization | <|reference_start|>Content-Based Book Recommending Using Learning for Text Categorization: Recommender systems improve access to relevant products and information by making personalized suggestions based on previous examples of a user's likes and dislikes. Most existing recommender systems use social filtering methods that base recommendations on other users' preferences. By contrast, content-based methods use information about an item itself to make suggestions. This approach has the advantage of being able to recommended previously unrated items to users with unique interests and to provide explanations for its recommendations. We describe a content-based book recommending system that utilizes information extraction and a machine-learning algorithm for text categorization. Initial experimental results demonstrate that this approach can produce accurate recommendations.<|reference_end|> | arxiv | @article{mooney1999content-based,
title={Content-Based Book Recommending Using Learning for Text Categorization},
author={Raymond J. Mooney and Loriene Roy (University of Texas at Austin)},
journal={arXiv preprint arXiv:cs/9902011},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902011},
primaryClass={cs.DL}
} | mooney1999content-based |
arxiv-676254 | cs/9902012 | Digital Library Technology for Locating and Accessing Scientific Data | <|reference_start|>Digital Library Technology for Locating and Accessing Scientific Data: In this paper we describe our efforts to bring scientific data into the digital library. This has required extension of the standard WWW, and also the extension of metadata standards far beyond the Dublin Core. Our system demonstrates this technology for real scientific data from astronomy.<|reference_end|> | arxiv | @article{mcgrath1999digital,
title={Digital Library Technology for Locating and Accessing Scientific Data},
author={Robert E. McGrath, Joe Futrelle, Ray Plante, Damien Guillaume},
journal={arXiv preprint arXiv:cs/9902012},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902012},
primaryClass={cs.DL}
} | mcgrath1999digital |
arxiv-676255 | cs/9902013 | Use and usability in a digital library search system | <|reference_start|>Use and usability in a digital library search system: Digital libraries must reach out to users from all walks of life, serving information needs at all levels. To do this, they must attain high standards of usability over an extremely broad audience. This paper details the evolution of one important digital library component as it has grown in functionality and usefulness over several years of use by a live, unrestricted community. Central to its evolution have been user studies, analysis of use patterns, and formative usability evaluation. We extrapolate that all three components are necessary in the production of successful digital library systems.<|reference_end|> | arxiv | @article{france1999use,
title={Use and usability in a digital library search system},
author={Robert K. France, Lucy Terry Nowell, Edward A. Fox, Rani A. Saad and
Jianxin Zhao},
journal={arXiv preprint arXiv:cs/9902013},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902013},
primaryClass={cs.DL}
} | france1999use |
arxiv-676256 | cs/9902014 | Proceedings from Critical Infrastructure: The Path Ahead (XIWT Symposium on Cross-Industry Activities for Information Infrastructure Robustness) | <|reference_start|>Proceedings from Critical Infrastructure: The Path Ahead (XIWT Symposium on Cross-Industry Activities for Information Infrastructure Robustness): The Cross-Industry Working Team (XIWT), with the support of the Stanford University Consortium for Research on Information Security and Policy (CRISP), sponsored a symposium on cross-industry activities aimed at improving the reliability, dependability, and robustness of the information infrastructure. Held 3-4 November 1998 in Crystal City, Virginia, the symposium engaged representatives from industry, academia, and government in discussion of current and potential cross-industry, cross-sector activities including information exchange, collaborative operations, and cooperative research and development. This proceedings summarizes the discussions and results of the meeting.<|reference_end|> | arxiv | @article{leiner1999proceedings,
title={Proceedings from Critical Infrastructure: The Path Ahead (XIWT Symposium
on Cross-Industry Activities for Information Infrastructure Robustness)},
author={Barry M. Leiner and Ekaterina A. Drozdova},
journal={arXiv preprint arXiv:cs/9902014},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902014},
primaryClass={cs.NI}
} | leiner1999proceedings |
arxiv-676257 | cs/9902015 | Resource Discovery in Trilogy | <|reference_start|>Resource Discovery in Trilogy: Trilogy is a collaborative project whose key aim is the development of an integrated virtual laboratory to support research training within each institution and collaborative projects between the partners. In this paper, the architecture and underpinning platform of the system is described with particular emphasis being placed on the structure and the integration of the distributed database. A key element is the ontology that provides the multi-agent system with a conceptualisation specification of the domain; this ontology is explained, accompanied by a discussion how such a system is integrated and used within the virtual laboratory. Although in this paper, Telecommunications and in particular Broadband networks are used as exemplars, the underlying system principles are applicable to any domain where a combination of experimental and literature-based resources are required.<|reference_end|> | arxiv | @article{chevalier1999resource,
title={Resource Discovery in Trilogy},
author={Franck Chevalier, David Harle, Geoffrey Smith},
journal={arXiv preprint arXiv:cs/9902015},
year={1999},
number={STRA1},
archivePrefix={arXiv},
eprint={cs/9902015},
primaryClass={cs.DL cs.AI cs.MA}
} | chevalier1999resource |
arxiv-676258 | cs/9902016 | Multimedia Description Framework (MDF) for Content Description of Audio/Video Documents | <|reference_start|>Multimedia Description Framework (MDF) for Content Description of Audio/Video Documents: MPEG is undertaking a new initiative to standardize content description of audio and video data/documents. When it is finalized in 2001, MPEG-7 is expected to provide standardized description schemes for concise and unambiguous content description of data/documents of complex media types. Meanwhile, other meta-data or description schemes, such as Dublin Core, XML, etc., are becoming popular in different application domains. In this paper, we propose the Multimedia Description Framework (MDF), which is designated to accommodate multiple description (meta-data) schemes, both MPEG-7 and non-MPEG-7, into integrated architecture. We will use examples to show how MDF description makes use of combined strength of different description schemes to enhance its expression power and flexibility. We conclude the paper with discussion of using MDF description of a movie video to search/retrieve required scene clips from the movie, on the MDF prototype system we have implemented.<|reference_end|> | arxiv | @article{hu1999multimedia,
title={Multimedia Description Framework (MDF) for Content Description of
Audio/Video Documents},
author={Michael J. Hu, Ye Jian},
journal={arXiv preprint arXiv:cs/9902016},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902016},
primaryClass={cs.DL}
} | hu1999multimedia |
arxiv-676259 | cs/9902017 | Not Available | <|reference_start|>Not Available: withdrawn by author<|reference_end|> | arxiv | @article{available1999not,
title={Not Available},
author={Not Available},
journal={arXiv preprint arXiv:cs/9902017},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902017},
primaryClass={cs.DL cs.DB}
} | available1999not |
arxiv-676260 | cs/9902018 | ZBroker: A Query Routing Broker for Z3950 Databases | <|reference_start|>ZBroker: A Query Routing Broker for Z3950 Databases: A query routing broker is a software agent that determines from a large set of accessing information sources the ones most relevant to a user's information need. As the number of information sources on the Internet increases dramatically, future users will have to rely on query routing brokers to decide a small number of information sources to query without incurring too much query processing overheads. In this paper, we describe a query routing broker known as ZBroker developed for bibliographic database servers that support the Z39.50 protocol. ZBroker samples the content of each bibliographic database by using training queries and their results, and summarizes the bibliographic database content into a knowledge base. We present the design and implementation of ZBroker and describe its Web-based user interface.<|reference_end|> | arxiv | @article{lin1999zbroker:,
title={ZBroker: A Query Routing Broker for Z39.50 Databases},
author={Yong Lin, Jian Xu, Ee-Peng Lim, Wee-Keong Ng},
journal={arXiv preprint arXiv:cs/9902018},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902018},
primaryClass={cs.DL cs.DB}
} | lin1999zbroker: |
arxiv-676261 | cs/9902019 | Multimodal Surrogates for Video Browsing | <|reference_start|>Multimodal Surrogates for Video Browsing: Three types of video surrogates - visual (keyframes), verbal (keywords/phrases), and combination of the two - were designed and studied in a qualitative investigation of user cognitive processes. The results favor the combined surrogates in which verbal information and images reinforce each other, lead to better comprehension, and may actually require less processing time. The results also highlight image features users found most helpful. These findings will inform the interface design and video representation for video retrieval and browsing.<|reference_end|> | arxiv | @article{ding1999multimodal,
title={Multimodal Surrogates for Video Browsing},
author={Wei Ding, Gary Marchionini, Dagobert Soergel},
journal={arXiv preprint arXiv:cs/9902019},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902019},
primaryClass={cs.DL cs.HC}
} | ding1999multimodal |
arxiv-676262 | cs/9902020 | Using Query Mediators for Distributed Searching in Federated Digital Libraries | <|reference_start|>Using Query Mediators for Distributed Searching in Federated Digital Libraries: We describe an architecture and investigate the characteristics of distributed searching in federated digital libraries. We introduce the notion of a query mediator as a digital library service responsible for selecting among available search engines, routing queries to those search engines, and aggregating results. We examine operational data from the NCSTRL distributed digital library that reveals a number of characteristics of distributed resource discovery. These include availability and response time of indexers and the distinction between the query mediator view of these characteristics and the indexer view.<|reference_end|> | arxiv | @article{dushay1999using,
title={Using Query Mediators for Distributed Searching in Federated Digital
Libraries},
author={Naomi Dushay, James C. French, and Carl Lagoze},
journal={arXiv preprint arXiv:cs/9902020},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902020},
primaryClass={cs.DL}
} | dushay1999using |
arxiv-676263 | cs/9902021 | Visualization of Retrieved Documents using a Presentation Server | <|reference_start|>Visualization of Retrieved Documents using a Presentation Server: In any search-based digital library (DL) systems dealing with a non-trivial number of documents, users are often required to go through a long list of short document descriptions in order to identify what they are looking for. To tackle the problem, a variety of document organization algorithms and/or visualization techniques have been used to guide users in selecting relevant documents. Since these techniques require heavy computations, however, we developed a presentation server designed to serve as an intermediary between retrieval servers and clients equipped with a visualization interface. In addition, we designed our own visual interface by which users can view a set of documents from different perspectives through layers of document maps. We finally ran experiments to show that the visual interface, in conjunction with the presentation server, indeed helps users in selecting relevant documents from the retrieval results.<|reference_end|> | arxiv | @article{song1999visualization,
title={Visualization of Retrieved Documents using a Presentation Server},
author={Sa-Kwang Song & Sung Hyon Myaeng},
journal={arXiv preprint arXiv:cs/9902021},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902021},
primaryClass={cs.DL cs.IR}
} | song1999visualization |
arxiv-676264 | cs/9902022 | Semi-Automatic Indexing of Multilingual Documents | <|reference_start|>Semi-Automatic Indexing of Multilingual Documents: With the growing significance of digital libraries and the Internet, more and more electronic texts become accessible to a wide and geographically disperse public. This requires adequate tools to facilitate indexing, storage, and retrieval of documents written in different languages. We present a method for semi-automatic indexing of electronic documents and construction of a multilingual thesaurus, which can be used for query formulation and information retrieval. We use special dictionaries and user interaction in order to solve ambiguities and find adequate canonical terms in the language and adequate abstract language-independent terms. The abstract thesaurus is updated incrementally by new indexed documents and is used to search document concerning terms in a query to the document base.<|reference_end|> | arxiv | @article{schiel1999semi-automatic,
title={Semi-Automatic Indexing of Multilingual Documents},
author={Ulrich Schiel, Ianna M. Sodre Ferreira de Souza and Edberto Ferneda},
journal={arXiv preprint arXiv:cs/9902022},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902022},
primaryClass={cs.DL}
} | schiel1999semi-automatic |
arxiv-676265 | cs/9902023 | A New Ranking Principle for Multimedia Information Retrieval | <|reference_start|>A New Ranking Principle for Multimedia Information Retrieval: A theoretic framework for multimedia information retrieval is introduced which guarantees optimal retrieval effectiveness. In particular, a Ranking Principle for Distributed Multimedia-Documents (RPDM) is described together with an algorithm that satisfies this principle. Finally, the RPDM is shown to be a generalization of the Probability Ranking principle (PRP) which guarantees optimal retrieval effectiveness in the case of text document retrieval. The PRP justifies theoretically the relevance ranking adopted by modern search engines. In contrast to the classical PRP, the new RPDM takes into account transmission and inspection time, and most importantly, aspectual recall rather than simple recall.<|reference_end|> | arxiv | @article{wechsler1999a,
title={A New Ranking Principle for Multimedia Information Retrieval},
author={Martin Wechsler, Peter Schauble},
journal={arXiv preprint arXiv:cs/9902023},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902023},
primaryClass={cs.DL}
} | wechsler1999a |
arxiv-676266 | cs/9902024 | Algorithms of Two-Level Parallelization for DSMC of Unsteady Flows in Molecular Gasdynamics | <|reference_start|>Algorithms of Two-Level Parallelization for DSMC of Unsteady Flows in Molecular Gasdynamics: The general scheme of two-level parallelization (TLP) for direct simulation Monte Carlo of unsteady gas flows on shared memory multiprocessor computers has been described. The high efficient algorithm of parallel independent runs is used on the first level. The data parallelization is employed for the second one. Two versions of TLP algorithm are elaborated with static and dynamic load balancing. The method of dynamic processor reallocation is used for dynamic load balancing. Two gasdynamic unsteady problems were used to study speedup and efficiency of the algorithms. The conditions of efficient application field for the algorithms have been determined.<|reference_end|> | arxiv | @article{bogdanov1999algorithms,
title={Algorithms of Two-Level Parallelization for DSMC of Unsteady Flows in
Molecular Gasdynamics},
author={Alexander V. Bogdanov, Nick Yu. Bykov, Igor A. Grishin, Gregory O.
Khanlarov, German A. Lukianov and Vladimir V. Zakharov},
journal={arXiv preprint arXiv:cs/9902024},
year={1999},
number={10-98},
archivePrefix={arXiv},
eprint={cs/9902024},
primaryClass={cs.CE cs.PF}
} | bogdanov1999algorithms |
arxiv-676267 | cs/9902025 | An Efficient Mean Field Approach to the Set Covering Problem | <|reference_start|>An Efficient Mean Field Approach to the Set Covering Problem: A mean field feedback artificial neural network algorithm is developed and explored for the set covering problem. A convenient encoding of the inequality constraints is achieved by means of a multilinear penalty function. An approximate energy minimum is obtained by iterating a set of mean field equations, in combination with annealing. The approach is numerically tested against a set of publicly available test problems with sizes ranging up to 5x10^3 rows and 10^6 columns. When comparing the performance with exact results for sizes where these are available, the approach yields results within a few percent from the optimal solutions. Comparisons with other approximate methods also come out well, in particular given the very low CPU consumption required -- typically a few seconds. Arbitrary problems can be processed using the algorithm via a public domain server.<|reference_end|> | arxiv | @article{ohlsson1999an,
title={An Efficient Mean Field Approach to the Set Covering Problem},
author={Mattias Ohlsson, Carsten Peterson and Bo S"oderberg},
journal={arXiv preprint arXiv:cs/9902025},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902025},
primaryClass={cs.NE}
} | ohlsson1999an |
arxiv-676268 | cs/9902026 | Probabilistic Inductive Inference:a Survey | <|reference_start|>Probabilistic Inductive Inference:a Survey: Inductive inference is a recursion-theoretic theory of learning, first developed by E. M. Gold (1967). This paper surveys developments in probabilistic inductive inference. We mainly focus on finite inference of recursive functions, since this simple paradigm has produced the most interesting (and most complex) results.<|reference_end|> | arxiv | @article{ambainis1999probabilistic,
title={Probabilistic Inductive Inference:a Survey},
author={Andris Ambainis},
journal={arXiv preprint arXiv:cs/9902026},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902026},
primaryClass={cs.LG cs.CC cs.LO math.LO}
} | ambainis1999probabilistic |
arxiv-676269 | cs/9902027 | Autocatalytic Theory of Meaning | <|reference_start|>Autocatalytic Theory of Meaning: Recently it has been argued that autocatalytic theory could be applied to the origin of culture. Here possible application to a theory of meaning in the philosophy of language, called radical interpretation, is commented upon and compared to previous applications.<|reference_end|> | arxiv | @article{roberts1999autocatalytic,
title={Autocatalytic Theory of Meaning},
author={Mark D. Roberts},
journal={PSYCHOLOQUY.99.10.014},
year={1999},
archivePrefix={arXiv},
eprint={cs/9902027},
primaryClass={cs.CL adap-org nlin.AO}
} | roberts1999autocatalytic |
arxiv-676270 | cs/9902028 | A Scrollbar-based Visualization for Document Navigation | <|reference_start|>A Scrollbar-based Visualization for Document Navigation: We are interested in questions of improving user control in best-match text-retrieval systems, specifically questions as to whether simple visualizations that nonetheless go beyond the minimal ones generally available can significantly help users. Recently, we have been investigating ways to help users decide-given a set of documents retrieved by a query-which documents and passages are worth closer examination. We built a document viewer incorporating a visualization centered around a novel content-displaying scrollbar and color term highlighting, and studied whether the visualization is helpful to non-expert searchers. Participants' reaction to the visualization was very positive, while the objective results were inconclusive.<|reference_end|> | arxiv | @article{byrd1999a,
title={A Scrollbar-based Visualization for Document Navigation},
author={Donald Byrd},
journal={arXiv preprint arXiv:cs/9902028},
year={1999},
number={IR-163},
archivePrefix={arXiv},
eprint={cs/9902028},
primaryClass={cs.IR cs.HC}
} | byrd1999a |
arxiv-676271 | cs/9902029 | The "Fodor"-FODOR fallacy bites back | <|reference_start|>The "Fodor"-FODOR fallacy bites back: The paper argues that Fodor and Lepore are misguided in their attack on Pustejovsky's Generative Lexicon, largely because their argument rests on a traditional, but implausible and discredited, view of the lexicon on which it is effectively empty of content, a view that stands in the long line of explaining word meaning (a) by ostension and then (b) explaining it by means of a vacuous symbol in a lexicon, often the word itself after typographic transmogrification. (a) and (b) both share the wrong belief that to a word must correspond a simple entity that is its meaning. I then turn to the semantic rules that Pustejovsky uses and argue first that, although they have novel features, they are in a well-established Artificial Intelligence tradition of explaining meaning by reference to structures that mention other structures assigned to words that may occur in close proximity to the first. It is argued that Fodor and Lepore's view that there cannot be such rules is without foundation, and indeed systems using such rules have proved their practical worth in computational systems. Their justification descends from line of argument, whose high points were probably Wittgenstein and Quine that meaning is not to be understood by simple links to the world, ostensive or otherwise, but by the relationship of whole cultural representational structures to each other and to the world as a whole.<|reference_end|> | arxiv | @article{wilks1999the,
title={The "Fodor"-FODOR fallacy bites back},
author={Yorick Wilks},
journal={arXiv preprint arXiv:cs/9902029},
year={1999},
number={cs-98-13},
archivePrefix={arXiv},
eprint={cs/9902029},
primaryClass={cs.CL}
} | wilks1999the |
arxiv-676272 | cs/9902030 | Is Word Sense Disambiguation just one more NLP task? | <|reference_start|>Is Word Sense Disambiguation just one more NLP task?: This paper compares the tasks of part-of-speech (POS) tagging and word-sense-tagging or disambiguation (WSD), and argues that the tasks are not related by fineness of grain or anything like that, but are quite different kinds of task, particularly becuase there is nothing in POS corresponding to sense novelty. The paper also argues for the reintegration of sub-tasks that are being separated for evaluation<|reference_end|> | arxiv | @article{wilks1999is,
title={Is Word Sense Disambiguation just one more NLP task?},
author={Yorick Wilks},
journal={arXiv preprint arXiv:cs/9902030},
year={1999},
number={cs-98-12},
archivePrefix={arXiv},
eprint={cs/9902030},
primaryClass={cs.CL}
} | wilks1999is |
arxiv-676273 | cs/9903001 | Introduction to the RSA algorithm and modular arithmetic | <|reference_start|>Introduction to the RSA algorithm and modular arithmetic: These notes are a brief introduction to the RSA algorithm and modular arithmetic. They are intended for an undergraduate audience.<|reference_end|> | arxiv | @article{milson1999introduction,
title={Introduction to the RSA algorithm and modular arithmetic},
author={R. Milson},
journal={arXiv preprint arXiv:cs/9903001},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903001},
primaryClass={cs.CR}
} | milson1999introduction |
arxiv-676274 | cs/9903002 | An Algebraic Programming Style for Numerical Software and its Optimization | <|reference_start|>An Algebraic Programming Style for Numerical Software and its Optimization: The abstract mathematical theory of partial differential equations (PDEs) is formulated in terms of manifolds, scalar fields, tensors, and the like, but these algebraic structures are hardly recognizable in actual PDE solvers. The general aim of the Sophus programming style is to bridge the gap between theory and practice in the domain of PDE solvers. Its main ingredients are a library of abstract datatypes corresponding to the algebraic structures used in the mathematical theory and an algebraic expression style similar to the expression style used in the mathematical theory. Because of its emphasis on abstract datatypes, Sophus is most naturally combined with object-oriented languages or other languages supporting abstract datatypes. The resulting source code patterns are beyond the scope of current compiler optimizations, but are sufficiently specific for a dedicated source-to-source optimizer. The limited, domain-specific, character of Sophus is the key to success here. This kind of optimization has been tested on computationally intensive Sophus style code with promising results. The general approach may be useful for other styles and in other application domains as well.<|reference_end|> | arxiv | @article{dinesh1999an,
title={An Algebraic Programming Style for Numerical Software and its
Optimization},
author={T. B. Dinesh (Academic Systems Corp.), M. Haveraaen (UiB) and J.
Heering (CWI)},
journal={Scientific Programming 8 (2000) 4 pages 247-259 (Special issue on
coordinate-free numerics)},
year={1999},
number={SEN-R9844 (CWI, Amsterdam)},
archivePrefix={arXiv},
eprint={cs/9903002},
primaryClass={cs.SE cs.AI cs.CE cs.MS}
} | dinesh1999an |
arxiv-676275 | cs/9903003 | A Formal Framework for Linguistic Annotation | <|reference_start|>A Formal Framework for Linguistic Annotation: `Linguistic annotation' covers any descriptive or analytic notations applied to raw language data. The basic data may be in the form of time functions -- audio, video and/or physiological recordings -- or it may be textual. The added notations may include transcriptions of all sorts (from phonetic features to discourse structures), part-of-speech and sense tagging, syntactic analysis, `named entity' identification, co-reference annotation, and so on. While there are several ongoing efforts to provide formats and tools for such annotations and to publish annotated linguistic databases, the lack of widely accepted standards is becoming a critical problem. Proposed standards, to the extent they exist, have focussed on file formats. This paper focuses instead on the logical structure of linguistic annotations. We survey a wide variety of existing annotation formats and demonstrate a common conceptual core, the annotation graph. This provides a formal framework for constructing, maintaining and searching linguistic annotations, while remaining consistent with many alternative data structures and file formats.<|reference_end|> | arxiv | @article{bird1999a,
title={A Formal Framework for Linguistic Annotation},
author={Steven Bird and Mark Liberman (University of Pennsylvania)},
journal={arXiv preprint arXiv:cs/9903003},
year={1999},
number={Tech Report MS-CIS-99-01, Dept of Computer and Information Science},
archivePrefix={arXiv},
eprint={cs/9903003},
primaryClass={cs.CL}
} | bird1999a |
arxiv-676276 | cs/9903004 | A Flit Level Simulator for Wormhole Routing | <|reference_start|>A Flit Level Simulator for Wormhole Routing: Wormhole routing, the latest switching technique to be utilized by massively parallel computers, enjoys the distinct advantage of a low latency when compared to other switching techniques. This low latency is due to the nearly distance insensitive routing traits in the absence of channel contention. The low latency of wormhole routing brings about a liability of this switching technique, a chance of deadlock. Deadlock is a concern in wormhole routed networks due to the fact a message does not release its allocated resources until all flits of a message have completely traversed the router in which these resources are associated. The deadlock condition is addressed in the routing algorithm. Simulation tools are currently needed that will aid in the size and number of resources necessary to obtain the optimum utilization of network resources for an algorithm. Some of these resources include the topology of the network along with the number of nodes for the topology, the size of the message, and the number and size of buffers at each router.<|reference_end|> | arxiv | @article{smith1999a,
title={A Flit Level Simulator for Wormhole Routing},
author={Denvil Smith},
journal={arXiv preprint arXiv:cs/9903004},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903004},
primaryClass={cs.DC cs.OS}
} | smith1999a |
arxiv-676277 | cs/9903005 | Numeration systems on a regular language | <|reference_start|>Numeration systems on a regular language: Generalizations of linear numeration systems in which the set of natural numbers is recognizable by finite automata are obtained by describing an arbitrary infinite regular language following the lexicographic ordering. For these systems of numeration, we show that ultimately periodic sets are recognizable. We also study the translation and the multiplication by constants as well as the order dependence of the recognizability.<|reference_end|> | arxiv | @article{lecomte1999numeration,
title={Numeration systems on a regular language},
author={Pierre B. A. Lecomte, Michel Rigo},
journal={arXiv preprint arXiv:cs/9903005},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903005},
primaryClass={cs.OH}
} | lecomte1999numeration |
arxiv-676278 | cs/9903006 | Designing SAT for HCP | <|reference_start|>Designing SAT for HCP: For arbitrary undirected graph $G$, we are designing SATISFIABILITY problem (SAT) for HCP, using tools of Boolean algebra only. The obtained SAT be the logic formulation of conditions for Hamiltonian cycle existence, and use $m$ Boolean variables, where $m$ is the number of graph edges. This Boolean expression is true if and only if an initial graph is Hamiltonian. That is, each satisfying assignment of the Boolean variables determines a Hamiltonian cycle of $G$, and each Hamiltonian cycle of $G$ corresponds to a satisfying assignment of the Boolean variables. In common case, the obtained Boolean expression may has an exponential length (the number of Boolean literals).<|reference_end|> | arxiv | @article{plotnikov1999designing,
title={Designing SAT for HCP},
author={Anatoly D. Plotnikov (Vinnitsa Institute of Regional Economics and
Management)},
journal={arXiv preprint arXiv:cs/9903006},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903006},
primaryClass={cs.LO}
} | plotnikov1999designing |
arxiv-676279 | cs/9903007 | Some Remarks on the Geometry of Grammar | <|reference_start|>Some Remarks on the Geometry of Grammar: This paper, following (Dymetman:1998), presents an approach to grammar description and processing based on the geometry of cancellation diagrams, a concept which plays a central role in combinatorial group theory (Lyndon-Schuppe:1977). The focus here is on the geometric intuitions and on relating group-theoretical diagrams to the traditional charts associated with context-free grammars and type-0 rewriting systems. The paper is structured as follows. We begin in Section 1 by analyzing charts in terms of constructs called cells, which are a geometrical counterpart to rules. Then we move in Section 2 to a presentation of cancellation diagrams and show how they can be used computationally. In Section 3 we give a formal algebraic presentation of the concept of group computation structure, which is based on the standard notions of free group and conjugacy. We then relate in Section 4 the geometric and the algebraic views of computation by using the fundamental theorem of combinatorial group theory (Rotman:1994). In Section 5 we study in more detail the relationship between the two views on the basis of a simple grammar stated as a group computation structure. In section 6 we extend this grammar to handle non-local constructs such as relative pronouns and quantifiers. We conclude in Section 7 with some brief notes on the differences between normal submonoids and normal subgroups, group computation versus rewriting systems, and the use of group morphisms to study the computational complexity of parsing and generation.<|reference_end|> | arxiv | @article{dymetman1999some,
title={Some Remarks on the Geometry of Grammar},
author={Marc Dymetman},
journal={arXiv preprint arXiv:cs/9903007},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903007},
primaryClass={cs.CL cs.LO}
} | dymetman1999some |
arxiv-676280 | cs/9903008 | Empirically Evaluating an Adaptable Spoken Dialogue System | <|reference_start|>Empirically Evaluating an Adaptable Spoken Dialogue System: Recent technological advances have made it possible to build real-time, interactive spoken dialogue systems for a wide variety of applications. However, when users do not respect the limitations of such systems, performance typically degrades. Although users differ with respect to their knowledge of system limitations, and although different dialogue strategies make system limitations more apparent to users, most current systems do not try to improve performance by adapting dialogue behavior to individual users. This paper presents an empirical evaluation of TOOT, an adaptable spoken dialogue system for retrieving train schedules on the web. We conduct an experiment in which 20 users carry out 4 tasks with both adaptable and non-adaptable versions of TOOT, resulting in a corpus of 80 dialogues. The values for a wide range of evaluation measures are then extracted from this corpus. Our results show that adaptable TOOT generally outperforms non-adaptable TOOT, and that the utility of adaptation depends on TOOT's initial dialogue strategies.<|reference_end|> | arxiv | @article{litman1999empirically,
title={Empirically Evaluating an Adaptable Spoken Dialogue System},
author={Diane J. Litman (AT&T Labs - Research) and Shimei Pan (Columbia
University)},
journal={arXiv preprint arXiv:cs/9903008},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903008},
primaryClass={cs.CL}
} | litman1999empirically |
arxiv-676281 | cs/9903009 | Space-Efficient Routing Tables for Almost All Networks and the Incompressibility Method | <|reference_start|>Space-Efficient Routing Tables for Almost All Networks and the Incompressibility Method: We use the incompressibility method based on Kolmogorov complexity to determine the total number of bits of routing information for almost all network topologies. In most models for routing, for almost all labeled graphs $\Theta (n^2)$ bits are necessary and sufficient for shortest path routing. By `almost all graphs' we mean the Kolmogorov random graphs which constitute a fraction of $1-1/n^c$ of all graphs on $n$ nodes, where $c > 0$ is an arbitrary fixed constant. There is a model for which the average case lower bound rises to $\Omega(n^2 \log n)$ and another model where the average case upper bound drops to $O(n \log^2 n)$. This clearly exposes the sensitivity of such bounds to the model under consideration. If paths have to be short, but need not be shortest (if the stretch factor may be larger than 1), then much less space is needed on average, even in the more demanding models. Full-information routing requires $\Theta (n^3)$ bits on average. For worst-case static networks we prove a $\Omega(n^2 \log n)$ lower bound for shortest path routing and all stretch factors $<2$ in some networks where free relabeling is not allowed.<|reference_end|> | arxiv | @article{buhrman1999space-efficient,
title={Space-Efficient Routing Tables for Almost All Networks and the
Incompressibility Method},
author={Harry Buhrman (CWI), Jaap-Henk Hoepman, Paul Vitanyi (CWI and
University of Amsterdam)},
journal={arXiv preprint arXiv:cs/9903009},
year={1999},
number={CWI Tech Report 1997},
archivePrefix={arXiv},
eprint={cs/9903009},
primaryClass={cs.DC cs.AR cs.CC cs.DS cs.NI}
} | buhrman1999space-efficient |
arxiv-676282 | cs/9903010 | A class of problems of NP to be worth to search an efficient solving algorithm | <|reference_start|>A class of problems of NP to be worth to search an efficient solving algorithm: We examine possibility to design an efficient solving algorithm for problems of the class \np. It is introduced a classification of \np problems by the property that a partial solution of size $k$ can be extended into a partial solution of size $k+1$ in polynomial time. It is defined an unique class problems to be worth to search an efficient solving algorithm. The problems, which are outside of this class, are inherently exponential. We show that the Hamiltonian cycle problem is inherently exponential.<|reference_end|> | arxiv | @article{plotnikov1999a,
title={A class of problems of NP to be worth to search an efficient solving
algorithm},
author={Anatoly D. Plotnikov (Vinnitsa Institute of Regional Economics and
Management)},
journal={arXiv preprint arXiv:cs/9903010},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903010},
primaryClass={cs.DS}
} | plotnikov1999a |
arxiv-676283 | cs/9903011 | A complete anytime algorithm for balanced number partitioning | <|reference_start|>A complete anytime algorithm for balanced number partitioning: Given a set of numbers, the balanced partioning problem is to divide them into two subsets, so that the sum of the numbers in each subset are as nearly equal as possible, subject to the constraint that the cardinalities of the subsets be within one of each other. We combine the balanced largest differencing method (BLDM) and Korf's complete Karmarkar-Karp algorithm to get a new algorithm that optimally solves the balanced partitioning problem. For numbers with twelve significant digits or less, the algorithm can optimally solve balanced partioning problems of arbitrary size in practice. For numbers with greater precision, it first returns the BLDM solution, then continues to find better solutions as time allows.<|reference_end|> | arxiv | @article{mertens1999a,
title={A complete anytime algorithm for balanced number partitioning},
author={Stephan Mertens},
journal={arXiv preprint arXiv:cs/9903011},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903011},
primaryClass={cs.DS cond-mat.dis-nn cs.AI}
} | mertens1999a |
arxiv-676284 | cs/9903012 | Formalization of the class of problems solvable by a nondeterministic Turing machine | <|reference_start|>Formalization of the class of problems solvable by a nondeterministic Turing machine: The objective of this article is to formalize the definition of NP problems. We construct a mathematical model of discrete problems as independence systems with weighted elements. We introduce two auxiliary sets that characterize the solution of the problem: the adjoint set, which contains the elements from the original set none of which can be adjoined to the already chosen solution elements; and the residual set, in which every element can be adjoined to previously chosen solution elements. In a problem without lookahead, every adjoint set can be generated by the solution algorithm effectively, in polynomial time. The main result of the study is the assertion that the NP class is identical with the class of problems without lookahead. Hence it follows that if we fail to find an effective (polynomial-time) solution algorithm for a given problem, then we need to look for an alternative formulation of the problem in set of problems without lookahead.<|reference_end|> | arxiv | @article{plotnikov1999formalization,
title={Formalization of the class of problems solvable by a nondeterministic
Turing machine},
author={Anatoly D. Plotnikov},
journal={Cybernetics and Systems Analysis. Vol. 33, 5(1997) pp. 635-640},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903012},
primaryClass={cs.DS cs.CC}
} | plotnikov1999formalization |
arxiv-676285 | cs/9903013 | The Impact of Net Culture on Mainstream Societies: a Global Analysis | <|reference_start|>The Impact of Net Culture on Mainstream Societies: a Global Analysis: In this work the impact of the Internet culture on standard mainstream societies has been analyzed. After analytically establishing the fact that the Net can be viewed as a pan-societal superstructure which supports its own distinct culture, an ethnographic analysis is provided to find out the key aspects of this culture. The elements of this culture which have an empowering impacts on the standard mainstream societies, as well as the elements in it which can cause discouraging social effects are then discussed by a global investigation of the present status of various fundamental aspects (e,g, education, economics, politics, entertainment etc) of the mainstream societies as well as their links with the Net culture. Though immensely potential for providing various prominent positive impacts, the key findings of this work indicate that misuse of Internet can create tremendous harm to the members of the mainstream societies by generating a set of morally crippled people as well as a future generation completely void of principles and ethics. This structured diagnostic approach to the social problems caused by the manhandling of Internet leads to a concrete effort of providing the measures that can be taken to enhance or to overcome the supporting and limiting effects of the Net culture respectively with the intent to benefit our society and to protect the teratoidation of certain ethical values.<|reference_end|> | arxiv | @article{das1999the,
title={The Impact of Net Culture on Mainstream Societies: a Global Analysis},
author={Tapas Kumar Das},
journal={arXiv preprint arXiv:cs/9903013},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903013},
primaryClass={cs.CY}
} | das1999the |
arxiv-676286 | cs/9903014 | Perpetual Adaptation of Software to Hardware: An Extensible Architecture for Providing Code Optimization as a Central System Service | <|reference_start|>Perpetual Adaptation of Software to Hardware: An Extensible Architecture for Providing Code Optimization as a Central System Service: We present an open architecture for just-in-time code generation and dynamic code optimization that is flexible, customizable, and extensible. While previous research has primarily investigated functional aspects of such a system, architectural aspects have so far remained unexplored. In this paper, we argue that these properties are important to generate optimal code for a variety of hardware architectures and different processor generations within processor families. These properties are also important to make system-level code generation useful in practice.<|reference_end|> | arxiv | @article{kistler1999perpetual,
title={Perpetual Adaptation of Software to Hardware: An Extensible Architecture
for Providing Code Optimization as a Central System Service},
author={Thomas Kistler and Michael Franz},
journal={arXiv preprint arXiv:cs/9903014},
year={1999},
number={ICS-TR-99-12},
archivePrefix={arXiv},
eprint={cs/9903014},
primaryClass={cs.OS cs.PL}
} | kistler1999perpetual |
arxiv-676287 | cs/9903015 | Scholarly Communication and the Continuum of Electronic Publishing | <|reference_start|>Scholarly Communication and the Continuum of Electronic Publishing: Electronic publishing opportunities, manifested today in a variety of electronic journals and Web-based compendia, have captured the imagination of many scholars. These opportunities have also destabilized norms about the character of legitimate scholarly publishing in some fields. Unfortunately, much of the literature about scholarly e-publishing homogenizes the character of publishing. This article provides an analytical approach for evaluating disciplinary conventions and for proposing policies about scholarly e-publishing. We characterize three dimensions of scholarly publishing as a communicative practice -- publicity, access, and trustworthiness, and examine several forms of paper and electronic publications in this framework. This analysis shows how the common claim that e-publishing "substantially expands access" is over-simplified. It also indicates how peer-reviewing (whether in paper or electronically) provides valuable functions for scholarly communication that are not effectively replaced by self-posting articles in electronic media.<|reference_end|> | arxiv | @article{kling1999scholarly,
title={Scholarly Communication and the Continuum of Electronic Publishing},
author={Rob Kling and Geoffrey McKim},
journal={Journal of the American Society for Information Science 50(10):
890-906. (July 19, 1999)},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903015},
primaryClass={cs.CY physics.soc-ph}
} | kling1999scholarly |
arxiv-676288 | cs/9903016 | Modeling Belief in Dynamic Systems, Part II: Revision and Update | <|reference_start|>Modeling Belief in Dynamic Systems, Part II: Revision and Update: The study of belief change has been an active area in philosophy and AI. In recent years two special cases of belief change, belief revision and belief update, have been studied in detail. In a companion paper (Friedman & Halpern, 1997), we introduce a new framework to model belief change. This framework combines temporal and epistemic modalities with a notion of plausibility, allowing us to examine the change of beliefs over time. In this paper, we show how belief revision and belief update can be captured in our framework. This allows us to compare the assumptions made by each method, and to better understand the principles underlying them. In particular, it shows that Katsuno and Mendelzon's notion of belief update (Katsuno & Mendelzon, 1991a) depends on several strong assumptions that may limit its applicability in artificial intelligence. Finally, our analysis allow us to identify a notion of minimal change that underlies a broad range of belief change operations including revision and update.<|reference_end|> | arxiv | @article{friedman1999modeling,
title={Modeling Belief in Dynamic Systems, Part II: Revision and Update},
author={N Friedman, J.Y. Halpern},
journal={Journal of Artificial Intelligence Research, Vol.10 (1999) 117-167},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903016},
primaryClass={cs.AI}
} | friedman1999modeling |
arxiv-676289 | cs/9903017 | SIMMUNE, a tool for simulating and analyzing immune system behavior | <|reference_start|>SIMMUNE, a tool for simulating and analyzing immune system behavior: We present a new approach to the simulation and analysis of immune system behavior. The simulations that can be done with our software package called SIMMUNE are based on immunological data that describe the behavior of immune system agents (cells, molecules) on a microscopial (i.e. agent-agent interaction) scale by defining cellular stimulus-response mechanisms. Since the behavior of the agents in SIMMUNE can be very flexibly configured, its application is not limited to immune system simulations. We outline the principles of SIMMUNE's multiscale analysis of emergent structure within the simulated immune system that allow the identification of immunological contexts using minimal a priori assumptions about the higher level organization of the immune system.<|reference_end|> | arxiv | @article{meier-schellersheim1999simmune,,
title={SIMMUNE, a tool for simulating and analyzing immune system behavior},
author={M. Meier-Schellersheim, G. Mack},
journal={arXiv preprint arXiv:cs/9903017},
year={1999},
number={DESY-99-034},
archivePrefix={arXiv},
eprint={cs/9903017},
primaryClass={cs.MA q-bio}
} | meier-schellersheim1999simmune, |
arxiv-676290 | cs/9903018 | LuaJava - A Scripting Tool for Java | <|reference_start|>LuaJava - A Scripting Tool for Java: Scripting languages are becoming more and more important as a tool for software development, as they provide great flexibility for rapid prototyping and for configuring componentware applications. In this paper we present LuaJava, a scripting tool for Java. LuaJava adopts Lua, a dynamically typed interpreted language, as its script language. Great emphasis is given to the transparency of the integration between the two languages, so that objects from one language can be used inside the other like native objects. The final result of this integration is a tool that allows the construction of configurable Java applications, using off-the-shelf components, in a high abstraction level.<|reference_end|> | arxiv | @article{cassino1999luajava,
title={LuaJava - A Scripting Tool for Java},
author={Carlos Cassino, Roberto Ierusalimschy, and Noemi Rodriguez},
journal={arXiv preprint arXiv:cs/9903018},
year={1999},
number={PUC-RioInf.MCC02/99},
archivePrefix={arXiv},
eprint={cs/9903018},
primaryClass={cs.SE}
} | cassino1999luajava |
arxiv-676291 | cs/9903019 | Workflow Automation with Lotus Notes for the Governmental Administrative Information System | <|reference_start|>Workflow Automation with Lotus Notes for the Governmental Administrative Information System: The paper presents an introductory overview of the workflow automation area, outlining the main types, basic technologies, the essential features of workflow applications. Two sorts of process models for the definition of workflows (according to the conversation-based and activity-based methodologies) are sketched. Later on, the nature of Lotus Notes and its capabilities (as an environment for workflow management systems development) are indicated. Concluding, the experience of automating administrative workflows (developing a Subsystem of Inter-institutional Document Management of the VADIS project) is briefly outlined.<|reference_end|> | arxiv | @article{maskeliunas1999workflow,
title={Workflow Automation with Lotus Notes for the Governmental Administrative
Information System},
author={Saulius Maskeliunas (Institute of Mathematics & Informatics, Vilnius)},
journal={arXiv preprint arXiv:cs/9903019},
year={1999},
number={MII.PSIS/SM.99-01},
archivePrefix={arXiv},
eprint={cs/9903019},
primaryClass={cs.HC}
} | maskeliunas1999workflow |
arxiv-676292 | cs/9903020 | Tiling with bars under tomographic constraints | <|reference_start|>Tiling with bars under tomographic constraints: We wish to tile a rectangle or a torus with only vertical and horizontal bars of a given length, such that the number of bars in every column and row equals given numbers. We present results for particular instances and for a more general problem, while leaving open the initial problem.<|reference_end|> | arxiv | @article{durr1999tiling,
title={Tiling with bars under tomographic constraints},
author={Christoph Durr, Eric Goles, Ivan Rapaport, Eric Remila},
journal={arXiv preprint arXiv:cs/9903020},
year={1999},
archivePrefix={arXiv},
eprint={cs/9903020},
primaryClass={cs.DS cs.CC}
} | durr1999tiling |
arxiv-676293 | cs/9904001 | A Proposal for the Establishment of Review Boards - a flexible approach to the selection of academic knowledge | <|reference_start|>A Proposal for the Establishment of Review Boards - a flexible approach to the selection of academic knowledge: Paper journals use a small number of trusted academics to select information on behalf of all their readers. This inflexibility in the selection was justified due to the expense of publishing. The advent of cheap distribution via the internet allows a new trade-off between time and expense and the flexibility of the selection process. This paper explores one such possible process one where the role of mark-up and archiving is separated from that of review. The idea is that authors publish their papers on their own web pages or in a public paper archive, a board of reviewers judge that paper on a number of different criteria. The detailed results of the reviews are stored in such a way as to enable readers to use these judgements to find the papers they want using search engines on the web. Thus instead of journals using generic selection criteria readers can set their own to suit their needs. The resulting system might be even cheaper than web-journals to implement.<|reference_end|> | arxiv | @article{edmonds1999a,
title={A Proposal for the Establishment of Review Boards - a flexible approach
to the selection of academic knowledge},
author={Bruce Edmonds},
journal={arXiv preprint arXiv:cs/9904001},
year={1999},
number={CPM-99-50},
archivePrefix={arXiv},
eprint={cs/9904001},
primaryClass={cs.CY cs.DL cs.IR}
} | edmonds1999a |
arxiv-676294 | cs/9904002 | A geometric framework for modelling similarity search | <|reference_start|>A geometric framework for modelling similarity search: The aim of this paper is to propose a geometric framework for modelling similarity search in large and multidimensional data spaces of general nature, which seems to be flexible enough to address such issues as analysis of complexity, indexability, and the `curse of dimensionality.' Such a framework is provided by the concept of the so-called similarity workload, which is a probability metric space $\Omega$ (query domain) with a distinguished finite subspace $X$ (dataset), together with an assembly of concepts, techniques, and results from metric geometry. They include such notions as metric transform, $\e$-entropy, and the phenomenon of concentration of measure on high-dimensional structures. In particular, we discuss the relevance of the latter to understanding the curse of dimensionality. As some of those concepts and techniques are being currently reinvented by the database community, it seems desirable to try and bridge the gap between database research and the relevant work already done in geometry and analysis.<|reference_end|> | arxiv | @article{pestov1999a,
title={A geometric framework for modelling similarity search},
author={Vladimir Pestov},
journal={Proc. 10-th Int. Workshop on Database and Expert Systems
Applications (DEXA'99), Sept. 1-3, 1999, Florence, Italy, IEEE Comp. Soc.,
pp. 150-154.},
year={1999},
doi={10.1109/DEXA.1999.795158},
number={RP-99-12, School of Math and Comp Sci, Victoria University of
Wellington, New Zealand},
archivePrefix={arXiv},
eprint={cs/9904002},
primaryClass={cs.IR cs.DB cs.DS}
} | pestov1999a |
arxiv-676295 | cs/9904003 | The Structure of Weighting Coefficient Matrices of Harmonic Differential Quadrature and Its Applications | <|reference_start|>The Structure of Weighting Coefficient Matrices of Harmonic Differential Quadrature and Its Applications: The structure of weighting coefficient matrices of Harmonic Differential Quadrature (HDQ) is found to be either centrosymmetric or skew centrosymmetric depending on the order of the corresponding derivatives. The properties of both matrices are briefly discussed in this paper. It is noted that the computational effort of the harmonic quadrature for some problems can be further reduced up to 75 per cent by using the properties of the above-mentioned matrices.<|reference_end|> | arxiv | @article{chen1999the,
title={The Structure of Weighting Coefficient Matrices of Harmonic Differential
Quadrature and Its Applications},
author={W. Chen, W. Wang, T. Zhong},
journal={Commnuications in Numerical Methods in Engineering, 12 (1996), pp.
455-459},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904003},
primaryClass={cs.CE cs.NA math.NA}
} | chen1999the |
arxiv-676296 | cs/9904004 | Mixing Metaphors | <|reference_start|>Mixing Metaphors: Mixed metaphors have been neglected in recent metaphor research. This paper suggests that such neglect is short-sighted. Though mixing is a more complex phenomenon than straight metaphors, the same kinds of reasoning and knowledge structures are required. This paper provides an analysis of both parallel and serial mixed metaphors within the framework of an AI system which is already capable of reasoning about straight metaphorical manifestations and argues that the processes underlying mixing are central to metaphorical meaning. Therefore, any theory of metaphors must be able to account for mixing.<|reference_end|> | arxiv | @article{lee1999mixing,
title={Mixing Metaphors},
author={Mark Lee and John Barnden},
journal={Proceedings of the AISB'99 Symposium on Metaphor, Artificial
Intelligence, and Cognition, pages 11-16, Edinburgh},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904004},
primaryClass={cs.CL cs.AI}
} | lee1999mixing |
arxiv-676297 | cs/9904005 | Transport Level Security: a proof using the Gong-Needham-Yahalom Logic | <|reference_start|>Transport Level Security: a proof using the Gong-Needham-Yahalom Logic: This paper provides a proof of the proposed Internet standard Transport Level Security protocol using the Gong-Needham-Yahalom logic. It is intended as a teaching aid and hopes to show to students: the potency of a formal method for protocol design; some of the subtleties of authenticating parties on a network where all messages can be intercepted; the design of what should be a widely accepted standard.<|reference_end|> | arxiv | @article{eaves1999transport,
title={Transport Level Security: a proof using the Gong-Needham-Yahalom Logic},
author={Walter Eaves},
journal={arXiv preprint arXiv:cs/9904005},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904005},
primaryClass={cs.CR}
} | eaves1999transport |
arxiv-676298 | cs/9904006 | Jacobian matrix: a bridge between linear and nonlinear polynomial-only problems | <|reference_start|>Jacobian matrix: a bridge between linear and nonlinear polynomial-only problems: By using the Hadamard matrix product concept, this paper introduces two generalized matrix formulation forms of numerical analogue of nonlinear differential operators. The SJT matrix-vector product approach is found to be a simple, efficient and accurate technique in the calculation of the Jacobian matrix of the nonlinear discretization by finite difference, finite volume, collocation, dual reciprocity BEM or radial functions based numerical methods. We also present and prove simple underlying relationship (theorem (3.1)) between general nonlinear analogue polynomials and their corresponding Jacobian matrices, which forms the basis of this paper. By means of theorem 3.1, stability analysis of numerical solutions of nonlinear initial value problems can be easily handled based on the well-known results for linear problems. Theorem 3.1 also leads naturally to the straightforward extension of various linear iterative algorithms such as the SOR, Gauss-Seidel and Jacobi methods to nonlinear algebraic equations. Since an exact alternative of the quasi-Newton equation is established via theorem 3.1, we derive a modified BFGS quasi-Newton method. A simple formula is also given to examine the deviation between the approximate and exact Jacobian matrices. Furthermore, in order to avoid the evaluation of the Jacobian matrix and its inverse, the pseudo-Jacobian matrix is introduced with a general applicability of any nonlinear systems of equations. It should be pointed out that a large class of real-world nonlinear problems can be modeled or numerically discretized polynomial-only algebraic system of equations. The results presented here are in general applicable for all these problems. This paper can be considered as a starting point in the research of nonlinear computation and analysis from an innovative viewpoint.<|reference_end|> | arxiv | @article{chen1999jacobian,
title={Jacobian matrix: a bridge between linear and nonlinear polynomial-only
problems},
author={W. Chen},
journal={arXiv preprint arXiv:cs/9904006},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904006},
primaryClass={cs.CE cs.NA math.NA}
} | chen1999jacobian |
arxiv-676299 | cs/9904007 | The Study on the Nonlinear Computations of the DQ and DC Methods | <|reference_start|>The Study on the Nonlinear Computations of the DQ and DC Methods: This paper points out that the differential quadrature (DQ) and differential cubature (DC) methods due to their global domain property are more efficient for nonlinear problems than the traditional numerical techniques such as finite element and finite difference methods. By introducing the Hadamard product of matrices, we obtain an explicit matrix formulation for the DQ and DC solutions of nonlinear differential and integro-differential equations. Due to its simplicity and flexibility, the present Hadamard product approach makes the DQ and DC methods much easier to be used. Many studies on the Hadamard product can be fully exploited for the DQ and DC nonlinear computations. Furthermore, we first present SJT product of matrix and vector to compute accurately and efficiently the Frechet derivative matrix in the Newton-Raphson method for the solution of the nonlinear formulations. We also propose a simple approach to simplify the DQ or DC formulations for some nonlinear differential operators and thus the computational efficiency of these methods is improved significantly. We give the matrix multiplication formulas to compute efficiently the weighting coefficient matrices of the DC method. The spherical harmonics are suggested as the test functions in the DC method to handle the nonlinear differential equations occurring in global and hemispheric weather forecasting problems. Some examples are analyzed to demonstrate the simplicity and efficiency of the presented techniques. It is emphasized that innovations presented are applicable to the nonlinear computations of the other numerical methods as well.<|reference_end|> | arxiv | @article{chen1999the,
title={The Study on the Nonlinear Computations of the DQ and DC Methods},
author={W. Chen (Corresponding author) and Tingxiu Zhong},
journal={arXiv preprint arXiv:cs/9904007},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904007},
primaryClass={cs.CE cs.NA math.NA}
} | chen1999the |
arxiv-676300 | cs/9904008 | Transducers from Rewrite Rules with Backreferences | <|reference_start|>Transducers from Rewrite Rules with Backreferences: Context sensitive rewrite rules have been widely used in several areas of natural language processing, including syntax, morphology, phonology and speech processing. Kaplan and Kay, Karttunen, and Mohri & Sproat have given various algorithms to compile such rewrite rules into finite-state transducers. The present paper extends this work by allowing a limited form of backreferencing in such rules. The explicit use of backreferencing leads to more elegant and general solutions.<|reference_end|> | arxiv | @article{gerdemann1999transducers,
title={Transducers from Rewrite Rules with Backreferences},
author={Dale Gerdemann and Gertjan van Noord},
journal={arXiv preprint arXiv:cs/9904008},
year={1999},
archivePrefix={arXiv},
eprint={cs/9904008},
primaryClass={cs.CL}
} | gerdemann1999transducers |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.