corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-669801 | cs/0101002 | Automated Debugging In Java Using OCL And JDI | <|reference_start|>Automated Debugging In Java Using OCL And JDI: Correctness constraints provide a foundation for automated debugging within object-oriented systems. This paper discusses a new approach to incorporating correctness constraints into Java development environments. Our approach uses the Object Constraint Language ("OCL") as a specification language and the Java Debug Interface ("JDI") as a verification API. OCL provides a standard language for expressing object-oriented constraints that can integrate with Unified Modeling Language ("UML") software models. JDI provides a standard Java API capable of supporting type-safe and side effect free runtime constraint evaluation. The resulting correctness constraint mechanism: (1) entails no programming language modifications; (2) requires neither access nor changes to existing source code; and (3) works with standard off-the-shelf Java virtual machines ("VMs"). A prototype correctness constraint auditor is presented to demonstrate the utility of this mechanism for purposes of automated debugging.<|reference_end|> | arxiv | @article{murray2001automated,
title={Automated Debugging In Java Using OCL And JDI},
author={David J. Murray (Lehigh University), Dale E. Parson (Lucent
Technologies)},
journal={arXiv preprint arXiv:cs/0101002},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101002},
primaryClass={cs.SE cs.PL}
} | murray2001automated |
arxiv-669802 | cs/0101003 | Signal-Theoretic Characterization of Waveguide Mesh Geometries for Models of Two--Dimensional Wave Propagation in Elastic Media | <|reference_start|>Signal-Theoretic Characterization of Waveguide Mesh Geometries for Models of Two--Dimensional Wave Propagation in Elastic Media: Waveguide Meshes are efficient and versatile models of wave propagation along a multidimensional ideal medium. The choice of the mesh geometry affects both the computational cost and the accuracy of simulations. In this paper, we focus on 2D geometries and use multidimensional sampling theory to compare the square, triangular, and hexagonal meshes in terms of sampling efficiency and dispersion error under conditions of critical sampling. The analysis shows that the triangular geometry exhibits the most desirable tradeoff between accuracy and computational cost.<|reference_end|> | arxiv | @article{fontana2001signal-theoretic,
title={Signal-Theoretic Characterization of Waveguide Mesh Geometries for
Models of Two--Dimensional Wave Propagation in Elastic Media},
author={Federico Fontana and Davide Rocchesso},
journal={arXiv preprint arXiv:cs/0101003},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101003},
primaryClass={math.NA cs.NA cs.SD}
} | fontana2001signal-theoretic |
arxiv-669803 | cs/0101004 | Decomposing Finite Abelian Groups | <|reference_start|>Decomposing Finite Abelian Groups: This paper describes a quantum algorithm for efficiently decomposing finite Abelian groups. Such a decomposition is needed in order to apply the Abelian hidden subgroup algorithm. Such a decomposition (assuming the Generalized Riemann Hypothesis) also leads to an efficient algorithm for computing class numbers (known to be at least as difficult as factoring).<|reference_end|> | arxiv | @article{cheung2001decomposing,
title={Decomposing Finite Abelian Groups},
author={Kevin K. H. Cheung and Michele Mosca},
journal={arXiv preprint arXiv:cs/0101004},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101004},
primaryClass={cs.DS quant-ph}
} | cheung2001decomposing |
arxiv-669804 | cs/0101005 | Slicing Event Traces of Large Software Systems | <|reference_start|>Slicing Event Traces of Large Software Systems: Debugging of large software systems consisting of many processes accessing shared resources is a very difficult task. Many commercial systems record essential events during system execution for post-mortem analysis. However, the event traces of large and long-running systems can be quite voluminous. Analysis of such event traces to identify sources of incorrect behavior can be very tedious, error-prone, and inefficient. In this paper, we propose a novel technique of slicing event traces as a means of reducing the number of events for analysis. This technique identifies events that may have influenced observed incorrect system behavior. In order to recognize influencing events several types of dependencies between events are identified. These dependencies are determined automatically from an event trace. In order to improve the precision of slicing we propose to use additional dependencies, referred to as cause-effect dependencies, which can further reduce the size of sliced event traces. Our initial experience has shown that this slicing technique can significantly reduce the size of event traces for analysis.<|reference_end|> | arxiv | @article{smith2001slicing,
title={Slicing Event Traces of Large Software Systems},
author={Raymond Smith (Lucent Technologies), Bogdan Korel (Illinois Institute
of Technology)},
journal={arXiv preprint arXiv:cs/0101005},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101005},
primaryClass={cs.SE cs.PL}
} | smith2001slicing |
arxiv-669805 | cs/0101006 | Optimal Moebius Transformations for Information Visualization and Meshing | <|reference_start|>Optimal Moebius Transformations for Information Visualization and Meshing: We give linear-time quasiconvex programming algorithms for finding a Moebius transformation of a set of spheres in a unit ball or on the surface of a unit sphere that maximizes the minimum size of a transformed sphere. We can also use similar methods to maximize the minimum distance among a set of pairs of input points. We apply these results to vertex separation and symmetry display in spherical graph drawing, viewpoint selection in hyperbolic browsing, element size control in conformal structured mesh generation, and brain flat mapping.<|reference_end|> | arxiv | @article{bern2001optimal,
title={Optimal Moebius Transformations for Information Visualization and
Meshing},
author={Marshall Bern and David Eppstein},
journal={arXiv preprint arXiv:cs/0101006},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101006},
primaryClass={cs.CG}
} | bern2001optimal |
arxiv-669806 | cs/0101007 | Assertion checker for the C programming language based on computations over event traces | <|reference_start|>Assertion checker for the C programming language based on computations over event traces: This paper suggests an approach to the development of software testing and debugging automation tools based on precise program behavior models. The program behavior model is defined as a set of events (event trace) with two basic binary relations over events -- precedence and inclusion, and represents the temporal relationship between actions. A language for the computations over event traces is developed that provides a basis for assertion checking, debugging queries, execution profiles, and performance measurements. The approach is nondestructive, since assertion texts are separated from the target program source code and can be maintained independently. Assertions can capture the dynamic properties of a particular target program and can formalize the general knowledge of typical bugs and debugging strategies. An event grammar provides a sound basis for assertion language implementation via target program automatic instrumentation. An implementation architecture and preliminary experiments with a prototype assertion checker for the C programming language are discussed.<|reference_end|> | arxiv | @article{auguston2001assertion,
title={Assertion checker for the C programming language based on computations
over event traces},
author={Mikhail Auguston},
journal={arXiv preprint arXiv:cs/0101007},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101007},
primaryClass={cs.SE cs.PL}
} | auguston2001assertion |
arxiv-669807 | cs/0101008 | A Knowledge-based Automated Debugger in Learning System | <|reference_start|>A Knowledge-based Automated Debugger in Learning System: Currently, programming instructors continually face the problem of helping to debug students' programs. Although there currently exist a number of debuggers and debugging tools in various platforms, most of these projects or products are crafted through the needs of software maintenance, and not through the perspective of teaching of programming. Moreover, most debuggers are too general, meant for experts as well as not user-friendly. We propose a new knowledge-based automated debugger to be used as a user-friendly tool by the students to self-debug their own programs. Stereotyped code (cliche) and bugs cliche will be stored as library of plans in the knowledge-base. Recognition of correct code or bugs is based on pattern matching and constraint satisfaction. Given a syntax error-free program and its specification, this debugger called Adil (Automated Debugger in Learning system) will be able locate, pinpoint and explain logical errors of programs. If there are no errors, it will be able to explain the meaning of the program. Adil is based on the design of the Conceiver, an automated program understanding system developed at Universiti Kebangsaan Malaysia.<|reference_end|> | arxiv | @article{zin2001a,
title={A Knowledge-based Automated Debugger in Learning System},
author={Abdullah Mohd Zin (1), Syed Ahmad Aljunid (1 and 2), Zarina Shukur
(1), Mohd Jan Nordin (1) ((1) Universiti Kebangsaan Malaysia, Bangi,
Malaysia, (2) Universiti Teknologi MARA, Shah Alam, Malaysia)},
journal={arXiv preprint arXiv:cs/0101008},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101008},
primaryClass={cs.SE cs.PL}
} | zin2001a |
arxiv-669808 | cs/0101009 | Generation of and Debugging with Logical Pre and Postconditions | <|reference_start|>Generation of and Debugging with Logical Pre and Postconditions: This paper shows the debugging facilities provided by the SLAM system. The SLAM system includes i) a specification language that integrates algebraic specifications and model-based specifications using the object oriented model. Class operations are defined by using rules each of them with logical pre and postconditions but with a functional flavour. ii) A development environment that, among other features, is able to generate readable code in a high level object oriented language. iii) The generated code includes (part of) the pre and postconditions as assertions, that can be automatically checked in the debug mode execution of programs. We focus on this last aspect. The SLAM language is expressive enough to describe many useful properties and these properties are translated into a Prolog program that is linked (via an adequate interface) with the user program. The debugging execution of the program interacts with the Prolog engine which is responsible for checking properties.<|reference_end|> | arxiv | @article{navarro2001generation,
title={Generation of and Debugging with Logical Pre and Postconditions},
author={Angel Herrranz-Nieva Juan Jose Moreno Navarro},
journal={arXiv preprint arXiv:cs/0101009},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101009},
primaryClass={cs.PL cs.SE}
} | navarro2001generation |
arxiv-669809 | cs/0101010 | An Even Faster and More Unifying Algorithm for Comparing Trees via Unbalanced Bipartite Matchings | <|reference_start|>An Even Faster and More Unifying Algorithm for Comparing Trees via Unbalanced Bipartite Matchings: A widely used method for determining the similarity of two labeled trees is to compute a maximum agreement subtree of the two trees. Previous work on this similarity measure is only concerned with the comparison of labeled trees of two special kinds, namely, uniformly labeled trees (i.e., trees with all their nodes labeled by the same symbol) and evolutionary trees (i.e., leaf-labeled trees with distinct symbols for distinct leaves). This paper presents an algorithm for comparing trees that are labeled in an arbitrary manner. In addition to this generality, this algorithm is faster than the previous algorithms. Another contribution of this paper is on maximum weight bipartite matchings. We show how to speed up the best known matching algorithms when the input graphs are node-unbalanced or weight-unbalanced. Based on these enhancements, we obtain an efficient algorithm for a new matching problem called the hierarchical bipartite matching problem, which is at the core of our maximum agreement subtree algorithm.<|reference_end|> | arxiv | @article{kao2001an,
title={An Even Faster and More Unifying Algorithm for Comparing Trees via
Unbalanced Bipartite Matchings},
author={Ming-Yang Kao, Tak-Wah Lam, Wing-Kin Sung, Hing-Fung Ting},
journal={arXiv preprint arXiv:cs/0101010},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101010},
primaryClass={cs.CV cs.DS}
} | kao2001an |
arxiv-669810 | cs/0101011 | Multiple-Size Divide-and-Conquer Recurrences | <|reference_start|>Multiple-Size Divide-and-Conquer Recurrences: This short note reports a master theorem on tight asymptotic solutions to divide-and-conquer recurrences with more than one recursive term: for example, T(n) = 1/4 T(n/16) + 1/3 T(3n/5) + 4 T(n/100) + 10 T(n/300) + n^2.<|reference_end|> | arxiv | @article{kao2001multiple-size,
title={Multiple-Size Divide-and-Conquer Recurrences},
author={Ming-Yang Kao},
journal={SIGACT News, 28(2):67--69, June 1997},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101011},
primaryClass={cs.GL cs.DS}
} | kao2001multiple-size |
arxiv-669811 | cs/0101012 | Communities of Practice in the Distributed International Environment | <|reference_start|>Communities of Practice in the Distributed International Environment: Modern commercial organisations are facing pressures which have caused them to lose personnel. When they lose people, they also lose their knowledge. Organisations also have to cope with the internationalisation of business forcing collaboration and knowledge sharing across time and distance. Knowledge Management (KM) claims to tackle these issues. This paper looks at an area where KM does not offer sufficient support, that is, the sharing of knowledge that is not easy to articulate. The focus in this paper is on Communities of Practice in commercial organisations. We do this by exploring knowledge sharing in Lave and Wenger's [1] theory of Communities of Practice and investigating how Communities of Practice may translate to a distributed international environment. The paper reports on two case studies that explore the functioning of Communities of Practice across international boundaries.<|reference_end|> | arxiv | @article{hildreth2001communities,
title={Communities of Practice in the Distributed International Environment},
author={Paul Hildreth, Chris Kimble and Peter Wright},
journal={Journal of Knowledge Management, 4(1), March 2000, pp 27 - 37},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101012},
primaryClass={cs.HC cs.IR}
} | hildreth2001communities |
arxiv-669812 | cs/0101013 | A Classification of Symbolic Transition Systems | <|reference_start|>A Classification of Symbolic Transition Systems: We define five increasingly comprehensive classes of infinite-state systems, called STS1--5, whose state spaces have finitary structure. For four of these classes, we provide examples from hybrid systems.<|reference_end|> | arxiv | @article{henzinger2001a,
title={A Classification of Symbolic Transition Systems},
author={Thomas A. Henzinger, Rupak Majumdar and Jean-Francois Raskin},
journal={arXiv preprint arXiv:cs/0101013},
year={2001},
number={UCB-CSD99/1086},
archivePrefix={arXiv},
eprint={cs/0101013},
primaryClass={cs.LO}
} | henzinger2001a |
arxiv-669813 | cs/0101014 | On the problem of computing the well-founded semantics | <|reference_start|>On the problem of computing the well-founded semantics: The well-founded semantics is one of the most widely studied and used semantics of logic programs with negation. In the case of finite propositional programs, it can be computed in polynomial time, more specifically, in O(|At(P)|size(P)) steps, where size(P) denotes the total number of occurrences of atoms in a logic program P. This bound is achieved by an algorithm introduced by Van Gelder and known as the alternating-fixpoint algorithm. Improving on the alternating-fixpoint algorithm turned out to be difficult. In this paper we study extensions and modifications of the alternating-fixpoint approach. We then restrict our attention to the class of programs whose rules have no more than one positive occurrence of an atom in their bodies. For programs in that class we propose a new implementation of the alternating-fixpoint method in which false atoms are computed in a top-down fashion. We show that our algorithm is faster than other known algorithms and that for a wide class of programs it is linear and so, asymptotically optimal.<|reference_end|> | arxiv | @article{lonc2001on,
title={On the problem of computing the well-founded semantics},
author={Zbigniew Lonc and Miroslaw Truszczynski},
journal={Theory and Practice of Logic Programming, 1(5), 591-609, 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101014},
primaryClass={cs.LO cs.AI cs.DS}
} | lonc2001on |
arxiv-669814 | cs/0101015 | Combinatorial Toolbox for Protein Sequence Design and Landscape Analysis in the Grand Canonical Model | <|reference_start|>Combinatorial Toolbox for Protein Sequence Design and Landscape Analysis in the Grand Canonical Model: In modern biology, one of the most important research problems is to understand how protein sequences fold into their native 3D structures. To investigate this problem at a high level, one wishes to analyze the protein landscapes, i.e., the structures of the space of all protein sequences and their native 3D structures. Perhaps the most basic computational problem at this level is to take a target 3D structure as input and design a fittest protein sequence with respect to one or more fitness functions of the target 3D structure. We develop a toolbox of combinatorial techniques for protein landscape analysis in the Grand Canonical model of Sun, Brem, Chan, and Dill. The toolbox is based on linear programming, network flow, and a linear-size representation of all minimum cuts of a network. It not only substantially expands the network flow technique for protein sequence design in Kleinberg's seminal work but also is applicable to a considerably broader collection of computational problems than those considered by Kleinberg. We have used this toolbox to obtain a number of efficient algorithms and hardness results. We have further used the algorithms to analyze 3D structures drawn from the Protein Data Bank and have discovered some novel relationships between such native 3D structures and the Grand Canonical model.<|reference_end|> | arxiv | @article{aspnes2001combinatorial,
title={Combinatorial Toolbox for Protein Sequence Design and Landscape Analysis
in the Grand Canonical Model},
author={James Aspnes, Julia Hartling, Ming-Yang Kao, Junhyong Kim, Gauri Shah},
journal={arXiv preprint arXiv:cs/0101015},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101015},
primaryClass={cs.CE cs.CC q-bio.BM}
} | aspnes2001combinatorial |
arxiv-669815 | cs/0101016 | A Dynamic Programming Approach to De Novo Peptide Sequencing via Tandem Mass Spectrometry | <|reference_start|>A Dynamic Programming Approach to De Novo Peptide Sequencing via Tandem Mass Spectrometry: The tandem mass spectrometry fragments a large number of molecules of the same peptide sequence into charged prefix and suffix subsequences, and then measures mass/charge ratios of these ions. The de novo peptide sequencing problem is to reconstruct the peptide sequence from a given tandem mass spectral data of k ions. By implicitly transforming the spectral data into an NC-spectrum graph G=(V,E) where |V|=2k+2, we can solve this problem in O(|V|+|E|) time and O(|V|) space using dynamic programming. Our approach can be further used to discover a modified amino acid in O(|V||E|) time and to analyze data with other types of noise in O(|V||E|) time. Our algorithms have been implemented and tested on actual experimental data.<|reference_end|> | arxiv | @article{chen2001a,
title={A Dynamic Programming Approach to De Novo Peptide Sequencing via Tandem
Mass Spectrometry},
author={Ting Chen, Ming-Yang Kao, Matthew Tepel, John Rush, George M. Church},
journal={arXiv preprint arXiv:cs/0101016},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101016},
primaryClass={cs.CE cs.DS}
} | chen2001a |
arxiv-669816 | cs/0101017 | Checking Properties within Fairness and Behavior Abstractions | <|reference_start|>Checking Properties within Fairness and Behavior Abstractions: This paper is motivated by the fact that verifying liveness properties under a fairness condition is often problematic, especially when abstraction is used. It shows that using a more abstract notion than truth under fairness, specifically the concept of a property being satisfied within fairness can lead to interesting possibilities. Technically, it is first established that deciding satisfaction within fairness is a PSPACE-complete problem and it is shown that properties satisfied within fairness can always be satisfied by some fair implementation. Thereafter, the interaction between behavior abstraction and satisfaction within fairness is studied and it is proved that satisfaction of properties within fairness can be verified on behavior abstractions, if the abstraction homomorphism is weakly continuation-closed.<|reference_end|> | arxiv | @article{ultes-nitsche2001checking,
title={Checking Properties within Fairness and Behavior Abstractions},
author={Ulrich Ultes-Nitsche and Pierre Wolper},
journal={arXiv preprint arXiv:cs/0101017},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101017},
primaryClass={cs.LO}
} | ultes-nitsche2001checking |
arxiv-669817 | cs/0101018 | GPCG: A Case Study in the Performance and Scalability of Optimization Algorithms | <|reference_start|>GPCG: A Case Study in the Performance and Scalability of Optimization Algorithms: GPCG is an algorithm within the Toolkit for Advanced Optimization (TAO) for solving bound constrained, convex quadratic problems. Originally developed by More' and Toraldo, this algorithm was designed for large-scale problems but had been implemented only for a single processor. The TAO implementation is available for a wide range of high-performance architecture, and has been tested on up to 64 processors to solve problems with over 2.5 million variables.<|reference_end|> | arxiv | @article{benson2001gpcg:,
title={GPCG: A Case Study in the Performance and Scalability of Optimization
Algorithms},
author={Steven J. Benson, Lois Curfman McInnes, and Jorge J. Mor'e},
journal={arXiv preprint arXiv:cs/0101018},
year={2001},
number={ANL/MCS-P768-0799},
archivePrefix={arXiv},
eprint={cs/0101018},
primaryClass={cs.MS}
} | benson2001gpcg: |
arxiv-669818 | cs/0101019 | General Loss Bounds for Universal Sequence Prediction | <|reference_start|>General Loss Bounds for Universal Sequence Prediction: The Bayesian framework is ideally suited for induction problems. The probability of observing $x_t$ at time $t$, given past observations $x_1...x_{t-1}$ can be computed with Bayes' rule if the true distribution $\mu$ of the sequences $x_1x_2x_3...$ is known. The problem, however, is that in many cases one does not even have a reasonable estimate of the true distribution. In order to overcome this problem a universal distribution $\xi$ is defined as a weighted sum of distributions $\mu_i\inM$, where $M$ is any countable set of distributions including $\mu$. This is a generalization of Solomonoff induction, in which $M$ is the set of all enumerable semi-measures. Systems which predict $y_t$, given $x_1...x_{t-1}$ and which receive loss $l_{x_t y_t}$ if $x_t$ is the true next symbol of the sequence are considered. It is proven that using the universal $\xi$ as a prior is nearly as good as using the unknown true distribution $\mu$. Furthermore, games of chance, defined as a sequence of bets, observations, and rewards are studied. The time needed to reach the winning zone is bounded in terms of the relative entropy of $\mu$ and $\xi$. Extensions to arbitrary alphabets, partial and delayed prediction, and more active systems are discussed.<|reference_end|> | arxiv | @article{hutter2001general,
title={General Loss Bounds for Universal Sequence Prediction},
author={Marcus Hutter},
journal={Proc. 18th Int. Conf. on Machine Learning ICML (2001) 210-217},
year={2001},
number={IDSIA-03-01},
archivePrefix={arXiv},
eprint={cs/0101019},
primaryClass={cs.AI cs.LG math.ST stat.TH}
} | hutter2001general |
arxiv-669819 | cs/0101020 | More Robust Multiparty Protocols with Oblivious Transfer | <|reference_start|>More Robust Multiparty Protocols with Oblivious Transfer: With oblivious transfer multiparty protocols become possible even in the presence of a faulty majority. But all known protocols can be aborted by just one disruptor. This paper presents more robust solutions for multiparty protocols with oblivious transfer. This additional robustness against disruptors weakens the security of the protocol and the guarantee that the result is correct. We can observe a trade off between robustness against disruption and security and correctness. We give an application to quantum multiparty protocols. These allow the implementation of oblivious transfer and the protocols of this paper relative to temporary assumptions, i.e., the security increases after the termination of the protocol.<|reference_end|> | arxiv | @article{mueller-quade2001more,
title={More Robust Multiparty Protocols with Oblivious Transfer},
author={J. Mueller-Quade (Universitaet Karlsruhe), H. Imai (U. of Tokyo)},
journal={arXiv preprint arXiv:cs/0101020},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101020},
primaryClass={cs.CR}
} | mueller-quade2001more |
arxiv-669820 | cs/0101021 | A Fast General Methodology for Information-Theoretically Optimal Encodings of Graphs | <|reference_start|>A Fast General Methodology for Information-Theoretically Optimal Encodings of Graphs: We propose a fast methodology for encoding graphs with information-theoretically minimum numbers of bits. Specifically, a graph with property pi is called a pi-graph. If pi satisfies certain properties, then an n-node m-edge pi-graph G can be encoded by a binary string X such that (1) G and X can be obtained from each other in O(n log n) time, and (2) X has at most beta(n)+o(beta(n)) bits for any continuous super-additive function beta(n) so that there are at most 2^{beta(n)+o(beta(n))} distinct n-node pi-graphs. The methodology is applicable to general classes of graphs; this paper focuses on planar graphs. Examples of such pi include all conjunctions over the following groups of properties: (1) G is a planar graph or a plane graph; (2) G is directed or undirected; (3) G is triangulated, triconnected, biconnected, merely connected, or not required to be connected; (4) the nodes of G are labeled with labels from {1, ..., ell_1} for ell_1 <= n; (5) the edges of G are labeled with labels from {1, ..., ell_2} for ell_2 <= m; and (6) each node (respectively, edge) of G has at most ell_3 = O(1) self-loops (respectively, ell_4 = O(1) multiple edges). Moreover, ell_3 and ell_4 are not required to be O(1) for the cases of pi being a plane triangulation. These examples are novel applications of small cycle separators of planar graphs and are the only nontrivial classes of graphs, other than rooted trees, with known polynomial-time information-theoretically optimal coding schemes.<|reference_end|> | arxiv | @article{he2001a,
title={A Fast General Methodology for Information-Theoretically Optimal
Encodings of Graphs},
author={Xin He, Ming-Yang Kao, Hsueh-I Lu},
journal={SIAM Journal on Computing, 30(3):838--846, 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101021},
primaryClass={cs.DS cs.GR}
} | he2001a |
arxiv-669821 | cs/0101022 | Semantics and Termination of Simply-Moded Logic Programs with Dynamic Scheduling | <|reference_start|>Semantics and Termination of Simply-Moded Logic Programs with Dynamic Scheduling: In logic programming, dynamic scheduling refers to a situation where the selection of the atom in each resolution (computation) step is determined at runtime, as opposed to a fixed selection rule such as the left-to-right one of Prolog. This has applications e.g. in parallel programming. A mechanism to control dynamic scheduling is provided in existing languages in the form of delay declarations. Input-consuming derivations were introduced to describe dynamic scheduling while abstracting from the technical details. In this paper, we first formalise the relationship between delay declarations and input-consuming derivations, showing in many cases a one-to-one correspondence. Then, we define a model-theoretic semantics for input-consuming derivations of simply-moded programs. Finally, for this class of programs, we provide a necessary and sufficient criterion for termination.<|reference_end|> | arxiv | @article{bossi2001semantics,
title={Semantics and Termination of Simply-Moded Logic Programs with Dynamic
Scheduling},
author={Annalisa Bossi, Sandro Etalle, Sabina Rossi and Jan-Georg Smaus},
journal={arXiv preprint arXiv:cs/0101022},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101022},
primaryClass={cs.LO cs.PL}
} | bossi2001semantics |
arxiv-669822 | cs/0101023 | Properties of Input-Consuming Derivations | <|reference_start|>Properties of Input-Consuming Derivations: We study the properties of input-consuming derivations of moded logic programs. Input-consuming derivations can be used to model the behavior of logic programs using dynamic scheduling and employing constructs such as delay declarations. We consider the class of nicely-moded programs and queries. We show that for these programs a weak version of the well-known switching lemma holds also for input-consuming derivations. Furthermore, we show that, under suitable conditions, there exists an algebraic characterization of termination of input-consuming derivations.<|reference_end|> | arxiv | @article{bossi2001properties,
title={Properties of Input-Consuming Derivations},
author={Annalisa Bossi, Sandro Etalle, Sabina Rossi},
journal={arXiv preprint arXiv:cs/0101023},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101023},
primaryClass={cs.PL cs.LO}
} | bossi2001properties |
arxiv-669823 | cs/0101024 | On-Line Difference Maximization | <|reference_start|>On-Line Difference Maximization: In this paper we examine problems motivated by on-line financial problems and stochastic games. In particular, we consider a sequence of entirely arbitrary distinct values arriving in random order, and must devise strategies for selecting low values followed by high values in such a way as to maximize the expected gain in rank from low values to high values. First, we consider a scenario in which only one low value and one high value may be selected. We give an optimal on-line algorithm for this scenario, and analyze it to show that, surprisingly, the expected gain is n-O(1), and so differs from the best possible off-line gain by only a constant additive term (which is, in fact, fairly small -- at most 15). In a second scenario, we allow multiple nonoverlapping low/high selections, where the total gain for our algorithm is the sum of the individual pair gains. We also give an optimal on-line algorithm for this problem, where the expected gain is n^2/8-\Theta(n\log n). An analysis shows that the optimal expected off-line gain is n^2/6+\Theta(1), so the performance of our on-line algorithm is within a factor of 3/4 of the best off-line strategy.<|reference_end|> | arxiv | @article{kao2001on-line,
title={On-Line Difference Maximization},
author={Ming-Yang Kao, Stephen R. Tate},
journal={SIAM Journal on Discrete Mathematics, 12(1):78-90, 1999},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101024},
primaryClass={cs.DS cs.DM}
} | kao2001on-line |
arxiv-669824 | cs/0101025 | Decomposing Non-Redundant Sharing by Complementation | <|reference_start|>Decomposing Non-Redundant Sharing by Complementation: Complementation, the inverse of the reduced product operation, is a technique for systematically finding minimal decompositions of abstract domains. File' and Ranzato advanced the state of the art by introducing a simple method for computing a complement. As an application, they considered the extraction by complementation of the pair-sharing domain PS from the Jacobs and Langen's set-sharing domain SH. However, since the result of this operation was still SH, they concluded that PS was too abstract for this. Here, we show that the source of this result lies not with PS but with SH and, more precisely, with the redundant information contained in SH with respect to ground-dependencies and pair-sharing. In fact, a proper decomposition is obtained if the non-redundant version of SH, PSD, is substituted for SH. To establish the results for PSD, we define a general schema for subdomains of SH that includes PSD and Def as special cases. This sheds new light on the structure of PSD and exposes a natural though unexpected connection between Def and PSD. Moreover, we substantiate the claim that complementation alone is not sufficient to obtain truly minimal decompositions of domains. The right solution to this problem is to first remove redundancies by computing the quotient of the domain with respect to the observable behavior, and only then decompose it by complementation.<|reference_end|> | arxiv | @article{zaffanella2001decomposing,
title={Decomposing Non-Redundant Sharing by Complementation},
author={Enea Zaffanella, Patricia M. Hill and Roberto Bagnara},
journal={arXiv preprint arXiv:cs/0101025},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101025},
primaryClass={cs.PL}
} | zaffanella2001decomposing |
arxiv-669825 | cs/0101026 | Deterministic computations whose history is independent of the order of asynchronous updating | <|reference_start|>Deterministic computations whose history is independent of the order of asynchronous updating: Consider a network of processors (sites) in which each site x has a finite set N(x) of neighbors. There is a transition function f that for each site x computes the next state \xi(x) from the states in N(x). But these transitions (updates) are applied in arbitrary order, one or many at a time. If the state of site x at time t is \eta(x,t) then let us define the sequence \zeta(x,0), \zeta(x,1), ... by taking the sequence \eta(x,0), \eta(x,1), ..., and deleting repetitions. The function f is said to have invariant histories if the sequence \zeta(x,i), (while it lasts, in case it is finite) depends only on the initial configuration, not on the order of updates. This paper shows that though the invariant history property is typically undecidable, there is a useful simple sufficient condition, called commutativity: For any configuration, for any pair x,y of neighbors, if the updating would change both \xi(x) and \xi(y) then the result of updating first x and then y is the same as the result of doing this in the reverse order.<|reference_end|> | arxiv | @article{gacs2001deterministic,
title={Deterministic computations whose history is independent of the order of
asynchronous updating},
author={Peter Gacs},
journal={arXiv preprint arXiv:cs/0101026},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101026},
primaryClass={cs.DC cs.CC}
} | gacs2001deterministic |
arxiv-669826 | cs/0101027 | Open Archives Initiative protocol development and implementation at arXiv | <|reference_start|>Open Archives Initiative protocol development and implementation at arXiv: I outline the involvement of the Los Alamos e-print archive (arXiv) within the Open Archives Initiative (OAI) and describe the implementation of the data provider side of the OAI protocol v1.0. I highlight the ways in which we map the existing structure of arXiv onto elements of the protocol.<|reference_end|> | arxiv | @article{warner2001open,
title={Open Archives Initiative protocol development and implementation at
arXiv},
author={Simeon Warner (LANL)},
journal={arXiv preprint arXiv:cs/0101027},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101027},
primaryClass={cs.DL}
} | warner2001open |
arxiv-669827 | cs/0101028 | Optimal Constructions of Hybrid Algorithms | <|reference_start|>Optimal Constructions of Hybrid Algorithms: We study on-line strategies for solving problems with hybrid algorithms. There is a problem Q and w basic algorithms for solving Q. For some lambda <= w, we have a computer with lambda disjoint memory areas, each of which can be used to run a basic algorithm and store its intermediate results. In the worst case, only one basic algorithm can solve Q in finite time, and all the other basic algorithms run forever without solving Q. To solve Q with a hybrid algorithm constructed from the basic algorithms, we run a basic algorithm for some time, then switch to another, and continue this process until Q is solved. The goal is to solve Q in the least amount of time. Using competitive ratios to measure the efficiency of a hybrid algorithm, we construct an optimal deterministic hybrid algorithm and an efficient randomized hybrid algorithm. This resolves an open question on searching with multiple robots posed by Baeza-Yates, Culberson and Rawlins. We also prove that our randomized algorithm is optimal for lambda = 1, settling a conjecture of Kao, Reif and Tate.<|reference_end|> | arxiv | @article{kao2001optimal,
title={Optimal Constructions of Hybrid Algorithms},
author={Ming-Yang Kao, Yuan Ma, Michael Sipser, Yiqun Yin},
journal={Journal of Algorithms, 29:142--164, 1998},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101028},
primaryClass={cs.DM cs.DS}
} | kao2001optimal |
arxiv-669828 | cs/0101029 | Tap Tips: Lightweight Discovery of Touchscreen Targets | <|reference_start|>Tap Tips: Lightweight Discovery of Touchscreen Targets: We describe tap tips, a technique for providing touch-screen target location hints. Tap tips are lightweight in that they are non-modal, appear only when needed, require a minimal number of user gestures, and do not add to the standard touchscreen gesture vocabulary. We discuss our implementation of tap tips in an electronic guidebook system and some usability test results.<|reference_end|> | arxiv | @article{aoki2001tap,
title={Tap Tips: Lightweight Discovery of Touchscreen Targets},
author={Paul M. Aoki, Amy Hurst, Allison Woodruff},
journal={Extended Abstracts, ACM SIGCHI Conf. on Human Factors in Computing
Systems, Seattle, WA, March 2001, 237-238. ACM Press.},
year={2001},
doi={10.1145/634067.634208},
archivePrefix={arXiv},
eprint={cs/0101029},
primaryClass={cs.HC}
} | aoki2001tap |
arxiv-669829 | cs/0101030 | Tree Contractions and Evolutionary Trees | <|reference_start|>Tree Contractions and Evolutionary Trees: An evolutionary tree is a rooted tree where each internal vertex has at least two children and where the leaves are labeled with distinct symbols representing species. Evolutionary trees are useful for modeling the evolutionary history of species. An agreement subtree of two evolutionary trees is an evolutionary tree which is also a topological subtree of the two given trees. We give an algorithm to determine the largest possible number of leaves in any agreement subtree of two trees T_1 and T_2 with n leaves each. If the maximum degree d of these trees is bounded by a constant, the time complexity is O(n log^2(n)) and is within a log(n) factor of optimal. For general d, this algorithm runs in O(n d^2 log(d) log^2(n)) time or alternatively in O(n d sqrt(d) log^3(n)) time.<|reference_end|> | arxiv | @article{kao2001tree,
title={Tree Contractions and Evolutionary Trees},
author={Ming-Yang Kao},
journal={SIAM Journal on Computing, 27(6):1592--1616, December 1998},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101030},
primaryClass={cs.CE cs.DS}
} | kao2001tree |
arxiv-669830 | cs/0101031 | Cavity Matchings, Label Compressions, and Unrooted Evolutionary Trees | <|reference_start|>Cavity Matchings, Label Compressions, and Unrooted Evolutionary Trees: We present an algorithm for computing a maximum agreement subtree of two unrooted evolutionary trees. It takes O(n^{1.5} log n) time for trees with unbounded degrees, matching the best known time complexity for the rooted case. Our algorithm allows the input trees to be mixed trees, i.e., trees that may contain directed and undirected edges at the same time. Our algorithm adopts a recursive strategy exploiting a technique called label compression. The backbone of this technique is an algorithm that computes the maximum weight matchings over many subgraphs of a bipartite graph as fast as it takes to compute a single matching.<|reference_end|> | arxiv | @article{kao2001cavity,
title={Cavity Matchings, Label Compressions, and Unrooted Evolutionary Trees},
author={Ming-Yang Kao, Tak-Wah Lam, Wing-Kin Sung, Hing-Fung Ting},
journal={SIAM Journal on Computing, 30(2):602--624, 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101031},
primaryClass={cs.CE cs.DS}
} | kao2001cavity |
arxiv-669831 | cs/0101032 | Total Protection of Analytic Invariant Information in Cross Tabulated Tables | <|reference_start|>Total Protection of Analytic Invariant Information in Cross Tabulated Tables: To protect sensitive information in a cross tabulated table, it is a common practice to suppress some of the cells in the table. An analytic invariant is a power series in terms of the suppressed cells that has a unique feasible value and a convergence radius equal to +\infty. Intuitively, the information contained in an invariant is not protected even though the values of the suppressed cells are not disclosed. This paper gives an optimal linear-time algorithm for testing whether there exist nontrivial analytic invariants in terms of the suppressed cells in a given set of suppressed cells. This paper also presents NP-completeness results and an almost linear-time algorithm for the problem of suppressing the minimum number of cells in addition to the sensitive ones so that the resulting table does not leak analytic invariant information about a given set of suppressed cells.<|reference_end|> | arxiv | @article{kao2001total,
title={Total Protection of Analytic Invariant Information in Cross Tabulated
Tables},
author={Ming-Yang Kao},
journal={SIAM Journal on Computing, 26(1):231--242, February 1997},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101032},
primaryClass={cs.CR cs.DM cs.DS}
} | kao2001total |
arxiv-669832 | cs/0101033 | Linear-Time Succinct Encodings of Planar Graphs via Canonical Orderings | <|reference_start|>Linear-Time Succinct Encodings of Planar Graphs via Canonical Orderings: Let G be an embedded planar undirected graph that has n vertices, m edges, and f faces but has no self-loop or multiple edge. If G is triangulated, we can encode it using {4/3}m-1 bits, improving on the best previous bound of about 1.53m bits. In case exponential time is acceptable, roughly 1.08m bits have been known to suffice. If G is triconnected, we use at most (2.5+2\log{3})\min\{n,f\}-7 bits, which is at most 2.835m bits and smaller than the best previous bound of 3m bits. Both of our schemes take O(n) time for encoding and decoding.<|reference_end|> | arxiv | @article{he2001linear-time,
title={Linear-Time Succinct Encodings of Planar Graphs via Canonical Orderings},
author={Xin He, Ming-Yang Kao, Hsueh-I Lu},
journal={SIAM Journal on Discrete Mathematics, 12(3):317--325, 1999},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101033},
primaryClass={cs.DS cs.GR}
} | he2001linear-time |
arxiv-669833 | cs/0101034 | Data Security Equals Graph Connectivity | <|reference_start|>Data Security Equals Graph Connectivity: To protect sensitive information in a cross tabulated table, it is a common practice to suppress some of the cells in the table. This paper investigates four levels of data security of a two-dimensional table concerning the effectiveness of this practice. These four levels of data security protect the information contained in, respectively, individual cells, individual rows and columns, several rows or columns as a whole, and a table as a whole. The paper presents efficient algorithms and NP-completeness results for testing and achieving these four levels of data security. All these complexity results are obtained by means of fundamental equivalences between the four levels of data security of a table and four types of connectivity of a graph constructed from that table.<|reference_end|> | arxiv | @article{kao2001data,
title={Data Security Equals Graph Connectivity},
author={Ming-Yang Kao},
journal={SIAM Journal on Discrete Mathematics, 9:87--100, 1996},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101034},
primaryClass={cs.CR cs.DB cs.DS}
} | kao2001data |
arxiv-669834 | cs/0101035 | The Guidebook, the Friend, and the Room: Visitor Experience in a Historic House | <|reference_start|>The Guidebook, the Friend, and the Room: Visitor Experience in a Historic House: In this paper, we describe an electronic guidebook prototype and report on a study of its use in a historic house. Supported by mechanisms in the guidebook, visitors constructed experiences that had a high degree of interaction with three entities: the guidebook, their companions, and the house and its contents. For example, we found that most visitors played audio descriptions played through speakers (rather than using headphones or reading textual descriptions) to facilitate communication with their companions.<|reference_end|> | arxiv | @article{woodruff2001the,
title={The Guidebook, the Friend, and the Room: Visitor Experience in a
Historic House},
author={Allison Woodruff, Paul M. Aoki, Amy Hurst, Margaret H. Szymanski},
journal={Extended Abstracts, ACM SIGCHI Conf. on Human Factors in Computing
Systems, Seattle, WA, March 2001, 273-274. ACM Press.},
year={2001},
doi={10.1145/634067.634229},
archivePrefix={arXiv},
eprint={cs/0101035},
primaryClass={cs.HC}
} | woodruff2001the |
arxiv-669835 | cs/0101036 | The Generalized Universal Law of Generalization | <|reference_start|>The Generalized Universal Law of Generalization: It has been argued by Shepard that there is a robust psychological law that relates the distance between a pair of items in psychological space and the probability that they will be confused with each other. Specifically, the probability of confusion is a negative exponential function of the distance between the pair of items. In experimental contexts, distance is typically defined in terms of a multidimensional Euclidean space-but this assumption seems unlikely to hold for complex stimuli. We show that, nonetheless, the Universal Law of Generalization can be derived in the more complex setting of arbitrary stimuli, using a much more universal measure of distance. This universal distance is defined as the length of the shortest program that transforms the representations of the two items of interest into one another: the algorithmic information distance. It is universal in the sense that it minorizes every computable distance: it is the smallest computable distance. We show that the universal law of generalization holds with probability going to one-provided the confusion probabilities are computable. We also give a mathematically more appealing form<|reference_end|> | arxiv | @article{chater2001the,
title={The Generalized Universal Law of Generalization},
author={Nick Chater (Univ. Warwick), Paul Vitanyi (CWI and Univ. Amsterdam)},
journal={arXiv preprint arXiv:cs/0101036},
year={2001},
archivePrefix={arXiv},
eprint={cs/0101036},
primaryClass={cs.CV cs.AI math.PR physics.soc-ph}
} | chater2001the |
arxiv-669836 | cs/0102001 | Benchmarking Optimization Software with Performance Profiles | <|reference_start|>Benchmarking Optimization Software with Performance Profiles: We propose performance profiles-distribution functions for a performance metric-as a tool for benchmarking and comparing optimization software. We show that performance profiles combine the best features of other tools for performance evaluation.<|reference_end|> | arxiv | @article{dolan2001benchmarking,
title={Benchmarking Optimization Software with Performance Profiles},
author={Elizabeth D. Dolan and Jorge J. Mor'e},
journal={Math. Program., Ser. A 91: 201-213 (2002)},
year={2001},
number={ANL/MCS-P861-1200},
archivePrefix={arXiv},
eprint={cs/0102001},
primaryClass={cs.MS}
} | dolan2001benchmarking |
arxiv-669837 | cs/0102002 | On the Automated Classification of Web Sites | <|reference_start|>On the Automated Classification of Web Sites: In this paper we discuss several issues related to automated text classification of web sites. We analyze the nature of web content and metadata in relation to requirements for text features. We find that HTML metatags are a good source of text features, but are not in wide use despite their role in search engine rankings. We present an approach for targeted spidering including metadata extraction and opportunistic crawling of specific semantic hyperlinks. We describe a system for automatically classifying web sites into industry categories and present performance results based on different combinations of text features and training data. This system can serve as the basis for a generalized framework for automated metadata creation.<|reference_end|> | arxiv | @article{pierre2001on,
title={On the Automated Classification of Web Sites},
author={John M. Pierre},
journal={arXiv preprint arXiv:cs/0102002},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102002},
primaryClass={cs.IR}
} | pierre2001on |
arxiv-669838 | cs/0102003 | Fast Pricing of European Asian Options with Provable Accuracy: Single-stock and Basket Options | <|reference_start|>Fast Pricing of European Asian Options with Provable Accuracy: Single-stock and Basket Options: This paper develops three polynomial-time pricing techniques for European Asian options with provably small errors, where the stock prices follow binomial trees or trees of higher-degree. The first technique is the first known Monte Carlo algorithm with analytical error bounds suitable for pricing single-stock options with meaningful confidence and speed. The second technique is a general recursive bucketing-based scheme that can use the Aingworth-Motwani-Oldham aggregation algorithm, Monte-Carlo simulation and possibly others as the base-case subroutine. This scheme enables robust trade-offs between accuracy and time over subtrees of different sizes. For long-term options or high frequency price averaging, it can price single-stock options with smaller errors in less time than the base-case algorithms themselves. The third technique combines Fast Fourier Transform with bucketing-based schemes for pricing basket options. This technique takes polynomial time in the number of days and the number of stocks, and does not add any errors to those already incurred in the companion bucketing scheme. This technique assumes that the price of each underlying stock moves independently.<|reference_end|> | arxiv | @article{akcoglu2001fast,
title={Fast Pricing of European Asian Options with Provable Accuracy:
Single-stock and Basket Options},
author={Karhan Akcoglu, Ming-Yang Kao, Shuba Raghavan},
journal={arXiv preprint arXiv:cs/0102003},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102003},
primaryClass={cs.CE}
} | akcoglu2001fast |
arxiv-669839 | cs/0102004 | Computational Geometry Column 41 | <|reference_start|>Computational Geometry Column 41: The recent result that n congruent balls in R^d have at most 4 distinct geometric permutations is described.<|reference_end|> | arxiv | @article{o'rourke2001computational,
title={Computational Geometry Column 41},
author={Joseph O'Rourke},
journal={arXiv preprint arXiv:cs/0102004},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102004},
primaryClass={cs.CG cs.DM}
} | o'rourke2001computational |
arxiv-669840 | cs/0102005 | Compact Encodings of Planar Graphs via Canonical Orderings and Multiple Parentheses | <|reference_start|>Compact Encodings of Planar Graphs via Canonical Orderings and Multiple Parentheses: Let G be a plane graph of n nodes, m edges, f faces, and no self-loop. G need not be connected or simple (i.e., free of multiple edges). We give three sets of coding schemes for G which all take O(m+n) time for encoding and decoding. Our schemes employ new properties of canonical orderings for planar graphs and new techniques of processing strings of multiple types of parentheses. For applications that need to determine in O(1) time the adjacency of two nodes and the degree of a node, we use 2m+(5+1/k)n + o(m+n) bits for any constant k > 0 while the best previous bound by Munro and Raman is 2m+8n + o(m+n). If G is triconnected or triangulated, our bit count decreases to 2m+3n + o(m+n) or 2m+2n + o(m+n), respectively. If G is simple, our bit count is (5/3)m+(5+1/k)n + o(n) for any constant k > 0. Thus, if a simple G is also triconnected or triangulated, then 2m+2n + o(n) or 2m+n + o(n) bits suffice, respectively. If only adjacency queries are supported, the bit counts for a general G and a simple G become 2m+(14/3)n + o(m+n) and (4/3)m+5n + o(n), respectively. If we only need to reconstruct G from its code, a simple and triconnected G uses roughly 2.38m + O(1) bits while the best previous bound by He, Kao, and Lu is 2.84m.<|reference_end|> | arxiv | @article{chuang2001compact,
title={Compact Encodings of Planar Graphs via Canonical Orderings and Multiple
Parentheses},
author={Richie Chih-Nan Chuang, Ashim Garg, Xin He, Ming-Yang Kao, Hsueh-I Lu},
journal={arXiv preprint arXiv:cs/0102005},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102005},
primaryClass={cs.DS cs.DM}
} | chuang2001compact |
arxiv-669841 | cs/0102006 | Orderly Spanning Trees with Applications | <|reference_start|>Orderly Spanning Trees with Applications: We introduce and study the {\em orderly spanning trees} of plane graphs. This algorithmic tool generalizes {\em canonical orderings}, which exist only for triconnected plane graphs. Although not every plane graph admits an orderly spanning tree, we provide an algorithm to compute an {\em orderly pair} for any connected planar graph $G$, consisting of a plane graph $H$ of $G$, and an orderly spanning tree of $H$. We also present several applications of orderly spanning trees: (1) a new constructive proof for Schnyder's Realizer Theorem, (2) the first area-optimal 2-visibility drawing of $G$, and (3) the best known encodings of $G$ with O(1)-time query support. All algorithms in this paper run in linear time.<|reference_end|> | arxiv | @article{chiang2001orderly,
title={Orderly Spanning Trees with Applications},
author={Yi-Ting Chiang, Ching-Chi Lin, and Hsueh-I Lu},
journal={SIAM Journal on Computing 34(4): 924-945 (2005)},
year={2001},
doi={10.1137/S0097539702411381},
archivePrefix={arXiv},
eprint={cs/0102006},
primaryClass={cs.DS cs.DM}
} | chiang2001orderly |
arxiv-669842 | cs/0102007 | Common-Face Embeddings of Planar Graphs | <|reference_start|>Common-Face Embeddings of Planar Graphs: Given a planar graph G and a sequence C_1,...,C_q, where each C_i is a family of vertex subsets of G, we wish to find a plane embedding of G, if any exists, such that for each i in {1,...,q}, there is a face F_i in the embedding whose boundary contains at least one vertex from each set in C_i. This problem has applications to the recovery of topological information from geographical data and the design of constrained layouts in VLSI. Let I be the input size, i.e., the total number of vertices and edges in G and the families C_i, counting multiplicity. We show that this problem is NP-complete in general. We also show that it is solvable in O(I log I) time for the special case where for each input family C_i, each set in C_i induces a connected subgraph of the input graph G. Note that the classical problem of simply finding a planar embedding is a further special case of this case with q=0. Therefore, the processing of the additional constraints C_1,...,C_q only incurs a logarithmic factor of overhead.<|reference_end|> | arxiv | @article{chen2001common-face,
title={Common-Face Embeddings of Planar Graphs},
author={Zhi-Zhong Chen, Xin He, Ming-Yang Kao},
journal={arXiv preprint arXiv:cs/0102007},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102007},
primaryClass={cs.DS cs.DM}
} | chen2001common-face |
arxiv-669843 | cs/0102008 | Optimal Bid Sequences for Multiple-Object Auctions with Unequal Budgets | <|reference_start|>Optimal Bid Sequences for Multiple-Object Auctions with Unequal Budgets: In a multiple-object auction, every bidder tries to win as many objects as possible with a bidding algorithm. This paper studies position-randomized auctions, which form a special class of multiple-object auctions where a bidding algorithm consists of an initial bid sequence and an algorithm for randomly permuting the sequence. We are especially concerned with situations where some bidders know the bidding algorithms of others. For the case of only two bidders, we give an optimal bidding algorithm for the disadvantaged bidder. Our result generalizes previous work by allowing the bidders to have unequal budgets. One might naturally anticipate that the optimal expected numbers of objects won by the bidders would be proportional to their budgets. Surprisingly, this is not true. Our new algorithm runs in optimal O(n) time in a straightforward manner. The case with more than two bidders is open.<|reference_end|> | arxiv | @article{chen2001optimal,
title={Optimal Bid Sequences for Multiple-Object Auctions with Unequal Budgets},
author={Yuyu Chen, Ming-Yang Kao, Hsueh-I Lu},
journal={arXiv preprint arXiv:cs/0102008},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102008},
primaryClass={cs.CE cs.DM cs.DS}
} | chen2001optimal |
arxiv-669844 | cs/0102009 | Optimal Augmentation for Bipartite Componentwise Biconnectivity in Linear Time | <|reference_start|>Optimal Augmentation for Bipartite Componentwise Biconnectivity in Linear Time: A graph is componentwise biconnected if every connected component either is an isolated vertex or is biconnected. We present a linear-time algorithm for the problem of adding the smallest number of edges to make a bipartite graph componentwise biconnected while preserving its bipartiteness. This algorithm has immediate applications for protecting sensitive information in statistical tables.<|reference_end|> | arxiv | @article{hsu2001optimal,
title={Optimal Augmentation for Bipartite Componentwise Biconnectivity in
Linear Time},
author={Tsan-sheng Hsu, Ming-Yang Kao},
journal={arXiv preprint arXiv:cs/0102009},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102009},
primaryClass={cs.DS cs.DM}
} | hsu2001optimal |
arxiv-669845 | cs/0102010 | The Enhanced Double Digest Problem for DNA Physical Mapping | <|reference_start|>The Enhanced Double Digest Problem for DNA Physical Mapping: The double digest problem is a common NP-hard approach to constructing physical maps of DNA sequences. This paper presents a new approach called the enhanced double digest problem. Although this new problem is also NP-hard, it can be solved in linear time in certain theoretically interesting cases.<|reference_end|> | arxiv | @article{kao2001the,
title={The Enhanced Double Digest Problem for DNA Physical Mapping},
author={Ming-Yang Kao, Jared Samet, Wing-Kin Sung},
journal={arXiv preprint arXiv:cs/0102010},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102010},
primaryClass={cs.CE cs.DM cs.DS}
} | kao2001the |
arxiv-669846 | cs/0102011 | A Price Dynamics in Bandwidth Markets for Point-to-point Connections | <|reference_start|>A Price Dynamics in Bandwidth Markets for Point-to-point Connections: We simulate a network of N routers and M network users making concurrent point-to-point connections by buying and selling router capacity from each other. The resources need to be acquired in complete sets, but there is only one spot market for each router. In order to describe the internal dynamics of the market, we model the observed prices by N-dimensional Ito-processes. Modeling using stochastic processes is novel in this context of describing interactions between end-users in a system with shared resources, and allows a standard set of mathematical tools to be applied. The derived models can also be used to price contingent claims on network capacity and thus to price complex network services such as quality of service levels, multicast, etc.<|reference_end|> | arxiv | @article{rasmusson2001a,
title={A Price Dynamics in Bandwidth Markets for Point-to-point Connections},
author={Lars Rasmusson, Erik Aurell},
journal={arXiv preprint arXiv:cs/0102011},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102011},
primaryClass={cs.NI cond-mat.soft cs.MA}
} | rasmusson2001a |
arxiv-669847 | cs/0102012 | Chaos for Stream Cipher | <|reference_start|>Chaos for Stream Cipher: This paper discusses mixing of chaotic systems as a dependable method for secure communication. Distribution of the entropy function for steady state as well as plaintext input sequences are analyzed. It is shown that the mixing of chaotic sequences results in a sequence that does not have any state dependence on the information encrypted by them. The generated output states of such a cipher approach the theoretical maximum for both complexity measures and cycle length. These features are then compared with some popular ciphers.<|reference_end|> | arxiv | @article{philip2001chaos,
title={Chaos for Stream Cipher},
author={Ninan Sajeeth Philip and K. Babu Joseph},
journal={In proceedings of ADCOM 2000, Tata McGraw Hill 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102012},
primaryClass={cs.CR}
} | philip2001chaos |
arxiv-669848 | cs/0102013 | Quantum Multi-Prover Interactive Proof Systems with Limited Prior Entanglement | <|reference_start|>Quantum Multi-Prover Interactive Proof Systems with Limited Prior Entanglement: This paper gives the first formal treatment of a quantum analogue of multi-prover interactive proof systems. It is proved that the class of languages having quantum multi-prover interactive proof systems is necessarily contained in NEXP, under the assumption that provers are allowed to share at most polynomially many prior-entangled qubits. This implies that, in particular, if provers do not share any prior entanglement with each other, the class of languages having quantum multi-prover interactive proof systems is equal to NEXP. Related to these, it is shown that, in the case a prover does not have his private qubits, the class of languages having quantum single-prover interactive proof systems is also equal to NEXP.<|reference_end|> | arxiv | @article{kobayashi2001quantum,
title={Quantum Multi-Prover Interactive Proof Systems with Limited Prior
Entanglement},
author={Hirotada Kobayashi, Keiji Matsumoto},
journal={Journal of Computer and System Sciences, 66(3):429--450, 2003},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102013},
primaryClass={cs.CC quant-ph}
} | kobayashi2001quantum |
arxiv-669849 | cs/0102014 | On the predictability of Rainfall in Kerala- An application of ABF Neural Network | <|reference_start|>On the predictability of Rainfall in Kerala- An application of ABF Neural Network: Rainfall in Kerala State, the southern part of Indian Peninsula in particular is caused by the two monsoons and the two cyclones every year. In general, climate and rainfall are highly nonlinear phenomena in nature giving rise to what is known as the `butterfly effect'. We however attempt to train an ABF neural network on the time series rainfall data and show for the first time that in spite of the fluctuations resulting from the nonlinearity in the system, the trends in the rainfall pattern in this corner of the globe have remained unaffected over the past 87 years from 1893 to 1980. We also successfully filter out the chaotic part of the system and illustrate that its effects are marginal over long term predictions.<|reference_end|> | arxiv | @article{philip2001on,
title={On the predictability of Rainfall in Kerala- An application of ABF
Neural Network},
author={Ninan Sajeeth Philip and K. Babu Joseph},
journal={arXiv preprint arXiv:cs/0102014},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102014},
primaryClass={cs.NE cs.AI}
} | philip2001on |
arxiv-669850 | cs/0102015 | Non-convex cost functionals in boosting algorithms and methods for panel selection | <|reference_start|>Non-convex cost functionals in boosting algorithms and methods for panel selection: In this document we propose a new improvement for boosting techniques as proposed in Friedman '99 by the use of non-convex cost functional. The idea is to introduce a correlation term to better deal with forecasting of additive time series. The problem is discussed in a theoretical way to prove the existence of minimizing sequence, and in a numerical way to propose a new "ArgMin" algorithm. The model has been used to perform the touristic presence forecast for the winter season 1999/2000 in Trentino (italian Alps).<|reference_end|> | arxiv | @article{visentin2001non-convex,
title={Non-convex cost functionals in boosting algorithms and methods for panel
selection},
author={Marco Visentin},
journal={arXiv preprint arXiv:cs/0102015},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102015},
primaryClass={cs.NE cs.LG cs.NA math.NA}
} | visentin2001non-convex |
arxiv-669851 | cs/0102016 | A Scientific Data Management System for Irregular Applications | <|reference_start|>A Scientific Data Management System for Irregular Applications: Many scientific applications are I/O intensive and generate or access large data sets, spanning hundreds or thousands of "files." Management, storage, efficient access, and analysis of this data present an extremely challenging task. We have developed a software system, called Scientific Data Manager (SDM), that uses a combination of parallel file I/O and database support for high-performance scientific data management. SDM provides a high-level API to the user and internally, uses a parallel file system to store real data and a database to store application-related metadata. In this paper, we describe how we designed and implemented SDM to support irregular applications. SDM can efficiently handle the reading and writing of data in an irregular mesh as well as the distribution of index values. We describe the SDM user interface and how we implemented it to achieve high performance. SDM makes extensive use of MPI-IO's noncontiguous collective I/O functions. SDM also uses the concept of a history file to optimize the cost of the index distribution using the metadata stored in the database. We present performance results with two irregular applications, a CFD code called FUN3D and a Rayleigh-Taylor instability code, on the SGI Origin2000 at Argonne National Laboratory.<|reference_end|> | arxiv | @article{no2001a,
title={A Scientific Data Management System for Irregular Applications},
author={Jaechun No, Rajeev Thakur, Dinesh Kaushik, Lori Freitag, and Alok
Choudhary},
journal={arXiv preprint arXiv:cs/0102016},
year={2001},
number={ANL/MCS-P866-1000},
archivePrefix={arXiv},
eprint={cs/0102016},
primaryClass={cs.DC}
} | no2001a |
arxiv-669852 | cs/0102017 | Components and Interfaces of a Process Management System for Parallel Programs | <|reference_start|>Components and Interfaces of a Process Management System for Parallel Programs: Parallel jobs are different from sequential jobs and require a different type of process management. We present here a process management system for parallel programs such as those written using MPI. A primary goal of the system, which we call MPD (for multipurpose daemon), is to be scalable. By this we mean that startup of interactive parallel jobs comprising thousands of processes is quick, that signals can be quickly delivered to processes, and that stdin, stdout, and stderr are managed intuitively. Our primary target is parallel machines made up of clusters of SMPs, but the system is also useful in more tightly integrated environments. We describe how MPD enables much faster startup and better runtime management of parallel jobs. We show how close control of stdio can support the easy implementation of a number of convenient system utilities, even a parallel debugger. We describe a simple but general interface that can be used to separate any process manager from a parallel library, which we use to keep MPD separate from MPICH.<|reference_end|> | arxiv | @article{butler2001components,
title={Components and Interfaces of a Process Management System for Parallel
Programs},
author={Ralph Butler, William Gropp, Ewing Lusk},
journal={arXiv preprint arXiv:cs/0102017},
year={2001},
number={ANL/MCS-P872-0201},
archivePrefix={arXiv},
eprint={cs/0102017},
primaryClass={cs.DC}
} | butler2001components |
arxiv-669853 | cs/0102018 | An effective Procedure for Speeding up Algorithms | <|reference_start|>An effective Procedure for Speeding up Algorithms: The provably asymptotically fastest algorithm within a factor of 5 for formally described problems will be constructed. The main idea is to enumerate all programs provably equivalent to the original problem by enumerating all proofs. The algorithm could be interpreted as a generalization and improvement of Levin search, which is, within a multiplicative constant, the fastest algorithm for inverting functions. Blum's speed-up theorem is avoided by taking into account only programs for which a correctness proof exists. Furthermore, it is shown that the fastest program that computes a certain function is also one of the shortest programs provably computing this function. To quantify this statement, the definition of Kolmogorov complexity is extended, and two new natural measures for the complexity of a function are defined.<|reference_end|> | arxiv | @article{hutter2001an,
title={An effective Procedure for Speeding up Algorithms},
author={Marcus Hutter},
journal={Workshop on Mathematical approaches to Biological Computation
(MaBiC 2001) and Workshop on Algorithmic Information Theory (TAI 2001)},
year={2001},
number={IDSIA-16-00},
archivePrefix={arXiv},
eprint={cs/0102018},
primaryClass={cs.CC cs.AI cs.LG}
} | hutter2001an |
arxiv-669854 | cs/0102019 | Easy and Hard Constraint Ranking in OT: Algorithms and Complexity | <|reference_start|>Easy and Hard Constraint Ranking in OT: Algorithms and Complexity: We consider the problem of ranking a set of OT constraints in a manner consistent with data. We speed up Tesar and Smolensky's RCD algorithm to be linear on the number of constraints. This finds a ranking so each attested form x_i beats or ties a particular competitor y_i. We also generalize RCD so each x_i beats or ties all possible competitors. Alas, this more realistic version of learning has no polynomial algorithm unless P=NP! Indeed, not even generation does. So one cannot improve qualitatively upon brute force: Merely checking that a single (given) ranking is consistent with given forms is coNP-complete if the surface forms are fully observed and Delta_2^p-complete if not. Indeed, OT generation is OptP-complete. As for ranking, determining whether any consistent ranking exists is coNP-hard (but in Delta_2^p) if the forms are fully observed, and Sigma_2^p-complete if not. Finally, we show that generation and ranking are easier in derivational theories: in P, and NP-complete.<|reference_end|> | arxiv | @article{eisner2001easy,
title={Easy and Hard Constraint Ranking in OT: Algorithms and Complexity},
author={Jason Eisner (Dept. of Computer Science / University of Rochester)},
journal={Jason Eisner, Lauri Karttunen and Alain Theriault (eds.),
Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special
Interest Group in Computational Phonology (SIGPHON), pp. 22-33. Luxembourg,
August 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102019},
primaryClass={cs.CL cs.CC}
} | eisner2001easy |
arxiv-669855 | cs/0102020 | Multi-Syllable Phonotactic Modelling | <|reference_start|>Multi-Syllable Phonotactic Modelling: This paper describes a novel approach to constructing phonotactic models. The underlying theoretical approach to phonological description is the multisyllable approach in which multiple syllable classes are defined that reflect phonotactically idiosyncratic syllable subcategories. A new finite-state formalism, OFS Modelling, is used as a tool for encoding, automatically constructing and generalising phonotactic descriptions. Language-independent prototype models are constructed which are instantiated on the basis of data sets of phonological strings, and generalised with a clustering algorithm. The resulting approach enables the automatic construction of phonotactic models that encode arbitrarily close approximations of a language's set of attested phonological forms. The approach is applied to the construction of multi-syllable word-level phonotactic models for German, English and Dutch.<|reference_end|> | arxiv | @article{belz2001multi-syllable,
title={Multi-Syllable Phonotactic Modelling},
author={Anja Belz (CCSRC, SRI International)},
journal={Jason Eisner, Lauri Karttunen and Alain Theriault (eds.),
Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special
Interest Group in Computational Phonology (SIGPHON), pp. 46-56. Luxembourg,
August 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102020},
primaryClass={cs.CL}
} | belz2001multi-syllable |
arxiv-669856 | cs/0102021 | Taking Primitive Optimality Theory Beyond the Finite State | <|reference_start|>Taking Primitive Optimality Theory Beyond the Finite State: Primitive Optimality Theory (OTP) (Eisner, 1997a; Albro, 1998), a computational model of Optimality Theory (Prince and Smolensky, 1993), employs a finite state machine to represent the set of active candidates at each stage of an Optimality Theoretic derivation, as well as weighted finite state machines to represent the constraints themselves. For some purposes, however, it would be convenient if the set of candidates were limited by some set of criteria capable of being described only in a higher-level grammar formalism, such as a Context Free Grammar, a Context Sensitive Grammar, or a Multiple Context Free Grammar (Seki et al., 1991). Examples include reduplication and phrasal stress models. Here we introduce a mechanism for OTP-like Optimality Theory in which the constraints remain weighted finite state machines, but sets of candidates are represented by higher-level grammars. In particular, we use multiple context-free grammars to model reduplication in the manner of Correspondence Theory (McCarthy and Prince, 1995), and develop an extended version of the Earley Algorithm (Earley, 1970) to apply the constraints to a reduplicating candidate set.<|reference_end|> | arxiv | @article{albro2001taking,
title={Taking Primitive Optimality Theory Beyond the Finite State},
author={Daniel Albro (UCLA)},
journal={Jason Eisner, Lauri Karttunen and Alain Theriault (eds.),
Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special
Interest Group in Computational Phonology (SIGPHON), pp. 57-67. Luxembourg,
August 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102021},
primaryClass={cs.CL}
} | albro2001taking |
arxiv-669857 | cs/0102022 | Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special Interest Group in Computational Phonology (SIGPHON) | <|reference_start|>Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special Interest Group in Computational Phonology (SIGPHON): Home page of the workshop proceedings, with pointers to the individually archived papers. Includes front matter from the printed version of the proceedings.<|reference_end|> | arxiv | @article{eisner2001finite-state,
title={Finite-State Phonology: Proceedings of the 5th Workshop of the ACL
Special Interest Group in Computational Phonology (SIGPHON)},
author={Jason Eisner (University of Rochester), Lauri Karttunen (Xerox
Research Centre Europe), Alain Theriault (Universite de Montreal)},
journal={Jason Eisner, Lauri Karttunen and Alain Theriault (eds.),
Finite-State Phonology: Proceedings of the 5th Workshop of the ACL Special
Interest Group in Computational Phonology (SIGPHON). Luxembourg, August 2000},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102022},
primaryClass={cs.CL}
} | eisner2001finite-state |
arxiv-669858 | cs/0102023 | Factored Notation for Interval I/O | <|reference_start|>Factored Notation for Interval I/O: This note addresses the input and output of intervals in the sense of interval arithmetic and interval constraints. The most obvious, and so far most widely used notation, for intervals has drawbacks that we remedy with a new notation that we propose to call factored notation. It is more compact and allows one to find a good trade-off between interval width and ease of reading. We describe how such a trade-off can be based on the information yield (in the sense of information theory) of the last decimal shown.<|reference_end|> | arxiv | @article{van emden2001factored,
title={Factored Notation for Interval I/O},
author={M.H. van Emden},
journal={arXiv preprint arXiv:cs/0102023},
year={2001},
number={DCS-264-IR},
archivePrefix={arXiv},
eprint={cs/0102023},
primaryClass={math.NA cs.NA}
} | van emden2001factored |
arxiv-669859 | cs/0102024 | P-Immune Sets with Holes Lack Self-Reducibility Properties | <|reference_start|>P-Immune Sets with Holes Lack Self-Reducibility Properties: No P-immune set having exponential gaps is positive-Turing self-reducible.<|reference_end|> | arxiv | @article{hemaspaandra2001p-immune,
title={P-Immune Sets with Holes Lack Self-Reducibility Properties},
author={Lane A. Hemaspaandra and Harald Hempel},
journal={arXiv preprint arXiv:cs/0102024},
year={2001},
number={UR-CS-TR-2001-742},
archivePrefix={arXiv},
eprint={cs/0102024},
primaryClass={cs.CC}
} | hemaspaandra2001p-immune |
arxiv-669860 | cs/0102025 | An Effective Fixpoint Semantics for Linear Logic Programs | <|reference_start|>An Effective Fixpoint Semantics for Linear Logic Programs: In this paper we investigate the theoretical foundation of a new bottom-up semantics for linear logic programs, and more precisely for the fragment of LinLog that consists of the language LO enriched with the constant 1. We use constraints to symbolically and finitely represent possibly infinite collections of provable goals. We define a fixpoint semantics based on a new operator in the style of Tp working over constraints. An application of the fixpoint operator can be computed algorithmically. As sufficient conditions for termination, we show that the fixpoint computation is guaranteed to converge for propositional LO. To our knowledge, this is the first attempt to define an effective fixpoint semantics for linear logic programs. As an application of our framework, we also present a formal investigation of the relations between LO and Disjunctive Logic Programming. Using an approach based on abstract interpretation, we show that DLP fixpoint semantics can be viewed as an abstraction of our semantics for LO. We prove that the resulting abstraction is correct and complete for an interesting class of LO programs encoding Petri Nets.<|reference_end|> | arxiv | @article{bozzano2001an,
title={An Effective Fixpoint Semantics for Linear Logic Programs},
author={Marco Bozzano, Giorgio Delzanno, Maurizio Martelli},
journal={arXiv preprint arXiv:cs/0102025},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102025},
primaryClass={cs.PL}
} | bozzano2001an |
arxiv-669861 | cs/0102026 | Mathematical Model of Word Length on the Basis of the Cebanov-Fucks Distribution with Uniform Parameter Distribution | <|reference_start|>Mathematical Model of Word Length on the Basis of the Cebanov-Fucks Distribution with Uniform Parameter Distribution: The data on 13 typologically different languages have been processed using a two-parameter word length model, based on 1-displaced uniform Poisson distribution. Statistical dependencies of the 2nd parameter on the 1st one are revealed for the German texts and genre of letters.<|reference_end|> | arxiv | @article{kromer2001mathematical,
title={Mathematical Model of Word Length on the Basis of the Cebanov-Fucks
Distribution with Uniform Parameter Distribution},
author={Victor Kromer},
journal={Kromer V.W. Matematiceskaja model' dliny slova na osnove
raspredelenija Cebanova-Fuksa s ravnomernym raspredeleniem parametra //
Informatika i problemy telekommunikacij: Mezdunarodnaja naucno-techniceskaja
konferencija (SibGUTI, 26-27 aprelja 2001 g.) Materialy konferencii. -
Novosibirsk: Isd-vo SibGUTI, 2001. - S. 74-75.},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102026},
primaryClass={cs.CL}
} | kromer2001mathematical |
arxiv-669862 | cs/0102027 | Gene Expression Programming: a New Adaptive Algorithm for Solving Problems | <|reference_start|>Gene Expression Programming: a New Adaptive Algorithm for Solving Problems: Gene expression programming, a genotype/phenotype genetic algorithm (linear and ramified), is presented here for the first time as a new technique for the creation of computer programs. Gene expression programming uses character linear chromosomes composed of genes structurally organized in a head and a tail. The chromosomes function as a genome and are subjected to modification by means of mutation, transposition, root transposition, gene transposition, gene recombination, and one- and two-point recombination. The chromosomes encode expression trees which are the object of selection. The creation of these separate entities (genome and expression tree) with distinct functions allows the algorithm to perform with high efficiency that greatly surpasses existing adaptive techniques. The suite of problems chosen to illustrate the power and versatility of gene expression programming includes symbolic regression, sequence induction with and without constant creation, block stacking, cellular automata rules for the density-classification problem, and two problems of boolean concept learning: the 11-multiplexer and the GP rule problem.<|reference_end|> | arxiv | @article{ferreira2001gene,
title={Gene Expression Programming: a New Adaptive Algorithm for Solving
Problems},
author={Candida Ferreira},
journal={Complex Systems, 13(2): 87-129, 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102027},
primaryClass={cs.AI cs.NE}
} | ferreira2001gene |
arxiv-669863 | cs/0102028 | Communities of Practice: Going Virtual | <|reference_start|>Communities of Practice: Going Virtual: With the current trends towards downsizing, outsourcing and globalisation, modern organisations are reducing the numbers of people they employ. In addition, organisations now have to cope with the increasing internationalisation of business forcing collaboration and knowledge sharing across time and distance simultaneously. There is a need for new ways of thinking about how knowledge is shared in distributed groups. In this paper we explore a relatively new approach to knowledge sharing using Lave and Wenger's (1991) theory of Communities of Practice (CoPs). We investigate whether CoPs might translate to a geographically distributed international environment through a case study that explores the functioning of a CoP across national boundaries.<|reference_end|> | arxiv | @article{kimble2001communities,
title={Communities of Practice: Going Virtual},
author={Chris Kimble, Paul Hildreth, and Peter Wright},
journal={Chapter 13 in Knowledge Management and Business Model Innovation,
Idea Group Publishing, Hershey (USA)/London (UK), 2001. pp 220 - 234},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102028},
primaryClass={cs.HC cs.CY}
} | kimble2001communities |
arxiv-669864 | cs/0102029 | Computer based Information Systems and Managers' Work | <|reference_start|>Computer based Information Systems and Managers' Work: This paper identifies three categories of model: the Technology Impact Model; the Social Impact Model and the Integrationist Model, which imply different views of the "impact" of Information Technology on work organisation. These models are used to structure data from case studies conducted by the authors to explore the implications of the use of computer-based information systems for managers' work. The paper argues that the "impact" of information systems is not a single stable and predictable outcome but a non-linear ongoing process that changes and evolves over time. It also argues that the actions of individuals and groups within an organisation are not wholly determined by outside forces: people can and do react to, and shape, systems in different ways. In this sense, the "impact" of computer-based information systems on managers' work reflects decisions made by managers themselves about how the technology is used.<|reference_end|> | arxiv | @article{kimble2001computer,
title={Computer based Information Systems and Managers' Work},
author={Chris Kimble and Kevin McLoughlin},
journal={New Technology, Work and Employment, 10 (1), March, 1995. pp 56 -
67},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102029},
primaryClass={cs.CY cs.HC}
} | kimble2001computer |
arxiv-669865 | cs/0102030 | Soundness, Idempotence and Commutativity of Set-Sharing | <|reference_start|>Soundness, Idempotence and Commutativity of Set-Sharing: It is important that practical data-flow analyzers are backed by reliably proven theoretical results. Abstract interpretation provides a sound mathematical framework and necessary generic properties for an abstract domain to be well-defined and sound with respect to the concrete semantics. In logic programming, the abstract domain Sharing is a standard choice for sharing analysis for both practical work and further theoretical study. In spite of this, we found that there were no satisfactory proofs for the key properties of commutativity and idempotence that are essential for Sharing to be well-defined and that published statements of the soundness of Sharing assume the occurs-check. This paper provides a generalization of the abstraction function for Sharing that can be applied to any language, with or without the occurs-check. Results for soundness, idempotence and commutativity for abstract unification using this abstraction function are proven.<|reference_end|> | arxiv | @article{hill2001soundness,,
title={Soundness, Idempotence and Commutativity of Set-Sharing},
author={Patricia M. Hill, Roberto Bagnara, Enea Zaffanella},
journal={arXiv preprint arXiv:cs/0102030},
year={2001},
archivePrefix={arXiv},
eprint={cs/0102030},
primaryClass={cs.PL}
} | hill2001soundness, |
arxiv-669866 | cs/0103001 | Construction of an algorithm in parallel for the Fast Fourier Transform | <|reference_start|>Construction of an algorithm in parallel for the Fast Fourier Transform: It has been designed,built and executed a code for the Fast Fourier Transform (FFT),compiled and executed in a cluster of 2^n computers under the operating system MacOS and using the routines MacMPI. As practical application,the code has been used to obtain the transformed from an astronomic imagen,to execute a filter on its and with a transformed inverse to recover the image with the variates given by the filter.The computers arrangement are installed in the Observatorio Astronomico National in Colombia under the name OAN Cluster and in this has been executed several applications.<|reference_end|> | arxiv | @article{higuera2001construction,
title={Construction of an algorithm in parallel for the Fast Fourier Transform},
author={G. Mario A. Higuera, Humberto Sarria, Diana Fonseca, John Idarraga},
journal={arXiv preprint arXiv:cs/0103001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103001},
primaryClass={cs.DC cs.DS}
} | higuera2001construction |
arxiv-669867 | cs/0103002 | Quantitative Neural Network Model of the Tip-of-the-Tongue Phenomenon Based on Synthesized Memory-Psycholinguistic-Metacognitive Approach | <|reference_start|>Quantitative Neural Network Model of the Tip-of-the-Tongue Phenomenon Based on Synthesized Memory-Psycholinguistic-Metacognitive Approach: A new three-stage computer artificial neural network model of the tip-of-the-tongue phenomenon is proposed. Each word's node is build from some interconnected learned auto-associative two-layer neural networks each of which represents separate word's semantic, lexical, or phonological components. The model synthesizes memory, psycholinguistic, and metamemory approaches, bridges speech errors and naming chronometry research traditions, and can explain quantitatively many tip-of-the-tongue effects.<|reference_end|> | arxiv | @article{gopych2001quantitative,
title={Quantitative Neural Network Model of the Tip-of-the-Tongue Phenomenon
Based on Synthesized Memory-Psycholinguistic-Metacognitive Approach},
author={Petro M. Gopych},
journal={arXiv preprint arXiv:cs/0103002},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103002},
primaryClass={cs.CL cs.AI q-bio.NC q-bio.QM}
} | gopych2001quantitative |
arxiv-669868 | cs/0103003 | Learning Policies with External Memory | <|reference_start|>Learning Policies with External Memory: In order for an agent to perform well in partially observable domains, it is usually necessary for actions to depend on the history of observations. In this paper, we explore a {\it stigmergic} approach, in which the agent's actions include the ability to set and clear bits in an external memory, and the external memory is included as part of the input to the agent. In this case, we need to learn a reactive policy in a highly non-Markovian domain. We explore two algorithms: SARSA(\lambda), which has had empirical success in partially observable domains, and VAPS, a new algorithm due to Baird and Moore, with convergence guarantees in partially observable domains. We compare the performance of these two algorithms on benchmark problems.<|reference_end|> | arxiv | @article{peshkin2001learning,
title={Learning Policies with External Memory},
author={Leonid Peshkin, Nicolas Meuleau and Leslie Kaelbling},
journal={In Bratko, I., and Dzeroski, S., eds., Machine Learning:
Proceedings of the Sixteenth International Conference, pp. 307-314. Morgan
Kaufmann, San Francisco, CA},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103003},
primaryClass={cs.LG}
} | peshkin2001learning |
arxiv-669869 | cs/0103004 | Rapid Application Evolution and Integration Through Document Metamorphosis | <|reference_start|>Rapid Application Evolution and Integration Through Document Metamorphosis: The Harland document management system implements a data model in which document (object) structure can be altered by mixin-style multiple inheritance at any time. This kind of structural fluidity has long been supported by knowledge-base management systems, but its use has primarily been in support of reasoning and inference. In this paper, we report our experiences building and supporting several non-trivial applications on top of this data model. Based on these experiences, we argue that structural fluidity is convenient for data-intensive applications other than knowledge-base management. Specifically, we suggest that this flexible data model is a natural fit for the decoupled programming methodology that arises naturally when using enterprise component frameworks.<|reference_end|> | arxiv | @article{aoki2001rapid,
title={Rapid Application Evolution and Integration Through Document
Metamorphosis},
author={Paul M. Aoki, Ian E. Smith, James D. Thornton},
journal={arXiv preprint arXiv:cs/0103004},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103004},
primaryClass={cs.DB}
} | aoki2001rapid |
arxiv-669870 | cs/0103005 | Source-Filter Decomposition of Harmonic Sounds | <|reference_start|>Source-Filter Decomposition of Harmonic Sounds: This paper describes a method for decomposing steady-state instrument data into excitation and formant filter components. The input data, taken from several series of recordings of acoustical instruments is analyzed in the frequency domain, and for each series a model is built, which most accurately represents the data as a source-filter system. The source part is taken to be a harmonic excitation system with frequency-invariant magnitudes, and the filter part is considered to be responsible for all spectral inhomogenieties. This method has been applied to the SHARC database of steady state instrument data to create source-filter models for a large number of acoustical instruments. Subsequent use of such models can have a wide variety of applications, including improvements to wavetable and physical modeling synthesis, high quality pitch shifting, and creation of "hybrid" instrument timbres.<|reference_end|> | arxiv | @article{bisnovatyi2001source-filter,
title={Source-Filter Decomposition of Harmonic Sounds},
author={Ilia Bisnovatyi and Michael J. O'Donnell},
journal={arXiv preprint arXiv:cs/0103005},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103005},
primaryClass={cs.SD}
} | bisnovatyi2001source-filter |
arxiv-669871 | cs/0103006 | Flexible Software Framework for Modal Synthesis | <|reference_start|>Flexible Software Framework for Modal Synthesis: Modal synthesis is an important area of physical modeling whose exploration in the past has been held back by a large number of control parameters, the scarcity of general-purpose design tools and the difficulty of obtaining the computational power required for real-time synthesis. This paper presents an overview of a flexible software framework facilitating the design and control of instruments based on modal synthesis. The framework is designed as a hierarchy of polymorphic synthesis objects, representing modal structures of various complexity. As a method of generalizing all interactions among the elements of a modal system, an abstract notion of {\it energy} is introduced, and a set of energy transfer functions is provided. Such abstraction leads to a design where the dynamics of interactions can be largely separated from the specifics of particular modal structures, yielding an easily configurable and expandable system. A real-time version of the framework has been implemented as a set of C++ classes along with an integrating shell and a GUI, and is currently being used to design and play modal instruments, as well as to survey fundamental properties of various modal algorithms.<|reference_end|> | arxiv | @article{bisnovatyi2001flexible,
title={Flexible Software Framework for Modal Synthesis},
author={Ilia Bisnovatyi},
journal={in Proceedings of COST-G6 Conference on Digital Audio Effects
(DAFx-00), Dec 7-9, 2000, Verona, Italy},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103006},
primaryClass={cs.SD}
} | bisnovatyi2001flexible |
arxiv-669872 | cs/0103007 | Two-parameter Model of Word Length "Language - Genre" | <|reference_start|>Two-parameter Model of Word Length "Language - Genre": A two-parameter model of word length measured by the number of syllables comprising it is proposed. The first parameter is dependent on language type, the second one - on text genre and reflects the degree of completion of synergetic processes of language optimization.<|reference_end|> | arxiv | @article{kromer2001two-parameter,
title={Two-parameter Model of Word Length "Language - Genre"},
author={Victor Kromer},
journal={arXiv preprint arXiv:cs/0103007},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103007},
primaryClass={cs.CL}
} | kromer2001two-parameter |
arxiv-669873 | cs/0103008 | The Limits of Horn Logic Programs | <|reference_start|>The Limits of Horn Logic Programs: Given a sequence $\{\Pi_n\}$ of Horn logic programs, the limit $\Pi$ of $\{\Pi_n\}$ is the set of the clauses such that every clause in $\Pi$ belongs to almost every $\Pi_n$ and every clause in infinitely many $\Pi_n$'s belongs to $\Pi$ also. The limit program $\Pi$ is still Horn but may be infinite. In this paper, we consider if the least Herbrand model of the limit of a given Horn logic program sequence $\{\Pi_n\}$ equals the limit of the least Herbrand models of each logic program $\Pi_n$. It is proved that this property is not true in general but holds if Horn logic programs satisfy an assumption which can be syntactically checked and be satisfied by a class of Horn logic programs. Thus, under this assumption we can approach the least Herbrand model of the limit $\Pi$ by the sequence of the least Herbrand models of each finite program $\Pi_n$. We also prove that if a finite Horn logic program satisfies this assumption, then the least Herbrand model of this program is recursive. Finally, by use of the concept of stability from dynamical systems, we prove that this assumption is exactly a sufficient condition to guarantee the stability of fixed points for Horn logic programs.<|reference_end|> | arxiv | @article{ma2001the,
title={The Limits of Horn Logic Programs},
author={Shilong Ma, Yuefei Sui, Ke Xu},
journal={In P. J. Stuckey (Ed.): Proc. of 18th ICLP (short paper), LNCS
2401, p. 467, Denmark, 2002.},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103008},
primaryClass={cs.LO cs.PL}
} | ma2001the |
arxiv-669874 | cs/0103009 | Toward an architecture for quantum programming | <|reference_start|>Toward an architecture for quantum programming: It is becoming increasingly clear that, if a useful device for quantum computation will ever be built, it will be embodied by a classical computing machine with control over a truly quantum subsystem, this apparatus performing a mixture of classical and quantum computation. This paper investigates a possible approach to the problem of programming such machines: a template high level quantum language is presented which complements a generic general purpose classical language with a set of quantum primitives. The underlying scheme involves a run-time environment which calculates the byte-code for the quantum operations and pipes it to a quantum device controller or to a simulator. This language can compactly express existing quantum algorithms and reduce them to sequences of elementary operations; it also easily lends itself to automatic, hardware independent, circuit simplification. A publicly available preliminary implementation of the proposed ideas has been realized using the C++ language.<|reference_end|> | arxiv | @article{bettelli2001toward,
title={Toward an architecture for quantum programming},
author={S. Bettelli, L. Serafini, T. Calarco},
journal={Eur. Phys. J. D, Vol. 25, No. 2, pp. 181-200 (2003)},
year={2001},
doi={10.1140/epjd/e2003-00242-2},
number={IRST technical report 0103-010},
archivePrefix={arXiv},
eprint={cs/0103009},
primaryClass={cs.PL quant-ph}
} | bettelli2001toward |
arxiv-669875 | cs/0103010 | Magical Number Seven Plus or Minus Two: Syntactic Structure Recognition in Japanese and English Sentences | <|reference_start|>Magical Number Seven Plus or Minus Two: Syntactic Structure Recognition in Japanese and English Sentences: George A. Miller said that human beings have only seven chunks in short-term memory, plus or minus two. We counted the number of bunsetsus (phrases) whose modifiees are undetermined in each step of an analysis of the dependency structure of Japanese sentences, and which therefore must be stored in short-term memory. The number was roughly less than nine, the upper bound of seven plus or minus two. We also obtained similar results with English sentences under the assumption that human beings recognize a series of words, such as a noun phrase (NP), as a unit. This indicates that if we assume that the human cognitive units in Japanese and English are bunsetsu and NP respectively, analysis will support Miller's $7 \pm 2$ theory.<|reference_end|> | arxiv | @article{murata2001magical,
title={Magical Number Seven Plus or Minus Two: Syntactic Structure Recognition
in Japanese and English Sentences},
author={Masaki Murata, Kiyotaka Uchimoto, Qing Ma, and Hitoshi Isahara},
journal={CICLing'2001, Mexico City, February 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103010},
primaryClass={cs.CL}
} | murata2001magical |
arxiv-669876 | cs/0103011 | A Machine-Learning Approach to Estimating the Referential Properties of Japanese Noun Phrases | <|reference_start|>A Machine-Learning Approach to Estimating the Referential Properties of Japanese Noun Phrases: The referential properties of noun phrases in the Japanese language, which has no articles, are useful for article generation in Japanese-English machine translation and for anaphora resolution in Japanese noun phrases. They are generally classified as generic noun phrases, definite noun phrases, and indefinite noun phrases. In the previous work, referential properties were estimated by developing rules that used clue words. If two or more rules were in conflict with each other, the category having the maximum total score given by the rules was selected as the desired category. The score given by each rule was established by hand, so the manpower cost was high. In this work, we automatically adjusted these scores by using a machine-learning method and succeeded in reducing the amount of manpower needed to adjust these scores.<|reference_end|> | arxiv | @article{murata2001a,
title={A Machine-Learning Approach to Estimating the Referential Properties of
Japanese Noun Phrases},
author={Masaki Murata, Kiyotaka Uchimoto, Qing Ma, and Hitoshi Isahara},
journal={CICLing'2001, Mexico City, February 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103011},
primaryClass={cs.CL}
} | murata2001a |
arxiv-669877 | cs/0103012 | Meaning Sort - Three examples: dictionary construction, tagged corpus construction, and information presentation system | <|reference_start|>Meaning Sort - Three examples: dictionary construction, tagged corpus construction, and information presentation system: It is often useful to sort words into an order that reflects relations among their meanings as obtained by using a thesaurus. In this paper, we introduce a method of arranging words semantically by using several types of `{\sf is-a}' thesauri and a multi-dimensional thesaurus. We also describe three major applications where a meaning sort is useful and show the effectiveness of a meaning sort. Since there is no doubt that a word list in meaning-order is easier to use than a word list in some random order, a meaning sort, which can easily produce a word list in meaning-order, must be useful and effective.<|reference_end|> | arxiv | @article{murata2001meaning,
title={Meaning Sort - Three examples: dictionary construction, tagged corpus
construction, and information presentation system},
author={Masaki Murata, Kyoko Kanzaki, Kiyotaka Uchimoto, Qing Ma, and Hitoshi
Isahara},
journal={CICLing'2001, Mexico City, February 2001},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103012},
primaryClass={cs.CL}
} | murata2001meaning |
arxiv-669878 | cs/0103013 | CRL at Ntcir2 | <|reference_start|>CRL at Ntcir2: We have developed systems of two types for NTCIR2. One is an enhenced version of the system we developed for NTCIR1 and IREX. It submitted retrieval results for JJ and CC tasks. A variety of parameters were tried with the system. It used such characteristics of newspapers as locational information in the CC tasks. The system got good results for both of the tasks. The other system is a portable system which avoids free parameters as much as possible. The system submitted retrieval results for JJ, JE, EE, EJ, and CC tasks. The system automatically determined the number of top documents and the weight of the original query used in automatic-feedback retrieval. It also determined relevant terms quite robustly. For EJ and JE tasks, it used document expansion to augment the initial queries. It achieved good results, except on the CC tasks.<|reference_end|> | arxiv | @article{murata2001crl,
title={CRL at Ntcir2},
author={Masaki Murata, Masao Utiyama, Qing Ma, Hiromi Ozaku, and Hitoshi
Isahara},
journal={arXiv preprint arXiv:cs/0103013},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103013},
primaryClass={cs.CL}
} | murata2001crl |
arxiv-669879 | cs/0103014 | Faster-than-light effects and negative group delays in optics and electronics, and their applications | <|reference_start|>Faster-than-light effects and negative group delays in optics and electronics, and their applications: Recent manifestations of apparently faster-than-light effects confirmed our predictions that the group velocity in transparent optical media can exceed c. Special relativity is not violated by these phenomena. Moreover, in the electronic domain, the causality principle does not forbid negative group delays of analytic signals in electronic circuits, in which the peak of an output pulse leaves the exit port of a circuit before the peak of the input pulse enters the input port. Furthermore, pulse distortion for these superluminal analytic signals can be negligible in both the optical and electronic domains. Here we suggest an extension of these ideas to the microelectronic domain. The underlying principle is that negative feedback can be used to produce negative group delays. Such negative group delays can be used to cancel out the positive group delays due to transistor latency (e.g., the finite RC rise time of MOSFETS caused by their intrinsic gate capacitance), as well as the propagation delays due to the interconnects between transistors. Using this principle, it is possible to speed up computer systems.<|reference_end|> | arxiv | @article{chiao2001faster-than-light,
title={Faster-than-light effects and negative group delays in optics and
electronics, and their applications},
author={Raymond Y. Chiao, Jandir M. Hickmann and Daniel Solli},
journal={arXiv preprint arXiv:cs/0103014},
year={2001},
doi={10.1117/12.432562},
archivePrefix={arXiv},
eprint={cs/0103014},
primaryClass={cs.PF}
} | chiao2001faster-than-light |
arxiv-669880 | cs/0103015 | Fitness Uniform Selection to Preserve Genetic Diversity | <|reference_start|>Fitness Uniform Selection to Preserve Genetic Diversity: In evolutionary algorithms, the fitness of a population increases with time by mutating and recombining individuals and by a biased selection of more fit individuals. The right selection pressure is critical in ensuring sufficient optimization progress on the one hand and in preserving genetic diversity to be able to escape from local optima on the other. We propose a new selection scheme, which is uniform in the fitness values. It generates selection pressure towards sparsely populated fitness regions, not necessarily towards higher fitness, as is the case for all other selection schemes. We show that the new selection scheme can be much more effective than standard selection schemes.<|reference_end|> | arxiv | @article{hutter2001fitness,
title={Fitness Uniform Selection to Preserve Genetic Diversity},
author={Marcus Hutter},
journal={Proceedings of the 2002 Congress on Evolutionary Computation
(CEC-2002) 783-788},
year={2001},
number={IDSIA-01-01},
archivePrefix={arXiv},
eprint={cs/0103015},
primaryClass={cs.AI cs.DC cs.LG q-bio}
} | hutter2001fitness |
arxiv-669881 | cs/0103016 | Search in Power-Law Networks | <|reference_start|>Search in Power-Law Networks: Many communication and social networks have power-law link distributions, containing a few nodes which have a very high degree and many with low degree. The high connectivity nodes play the important role of hubs in communication and networking, a fact which can be exploited when designing efficient search algorithms. We introduce a number of local search strategies which utilize high degree nodes in power-law graphs and which have costs which scale sub-linearly with the size of the graph. We also demonstrate the utility of these strategies on the Gnutella peer-to-peer network.<|reference_end|> | arxiv | @article{adamic2001search,
title={Search in Power-Law Networks},
author={L. A. Adamic (1), R. M. Lukose (2), A. R. Puniyani (1), B. A. Huberman
(2) ((1) Stanford University, (2) HP Sand Hill Labs, Palo Alto, CA)},
journal={Phys. Rev. E 64, 046135 (2001)},
year={2001},
doi={10.1103/PhysRevE.64.046135},
archivePrefix={arXiv},
eprint={cs/0103016},
primaryClass={cs.NI cond-mat.dis-nn cond-mat.stat-mech cs.PF}
} | adamic2001search |
arxiv-669882 | cs/0103017 | Nice point sets can have nasty Delaunay triangulations | <|reference_start|>Nice point sets can have nasty Delaunay triangulations: We consider the complexity of Delaunay triangulations of sets of points in R^3 under certain practical geometric constraints. The spread of a set of points is the ratio between the longest and shortest pairwise distances. We show that in the worst case, the Delaunay triangulation of n points in R^3 with spread D has complexity Omega(min{D^3, nD, n^2}) and O(min{D^4, n^2}). For the case D = Theta(sqrt{n}), our lower bound construction consists of a uniform sample of a smooth convex surface with bounded curvature. We also construct a family of smooth connected surfaces such that the Delaunay triangulation of any good point sample has near-quadratic complexity.<|reference_end|> | arxiv | @article{erickson2001nice,
title={Nice point sets can have nasty Delaunay triangulations},
author={Jeff Erickson},
journal={arXiv preprint arXiv:cs/0103017},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103017},
primaryClass={cs.CG}
} | erickson2001nice |
arxiv-669883 | cs/0103018 | The Existential Theory of Equations with Rational Constraints in Free Groups is PSPACE-Complete | <|reference_start|>The Existential Theory of Equations with Rational Constraints in Free Groups is PSPACE-Complete: It is known that the existential theory of equations in free groups is decidable. This is a famous result of Makanin. On the other hand it has been shown that the scheme of his algorithm is not primitive recursive. In this paper we present an algorithm that works in polynomial space, even in the more general setting where each variable has a rational constraint, that is, the solution has to respect a specification given by a regular word language. Our main result states that the existential theory of equations in free groups with rational constraints is PSPACE-complete. We obtain this result as a corollary of the corresponding statement about free monoids with involution.<|reference_end|> | arxiv | @article{diekert2001the,
title={The Existential Theory of Equations with Rational Constraints in Free
Groups is PSPACE-Complete},
author={Volker Diekert, Claudio Gutierrez, Christian Hagenah},
journal={arXiv preprint arXiv:cs/0103018},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103018},
primaryClass={cs.DS cs.LO}
} | diekert2001the |
arxiv-669884 | cs/0103019 | On the NP-completeness of Finding an Optimal Strategy in Games with Common Payoffs | <|reference_start|>On the NP-completeness of Finding an Optimal Strategy in Games with Common Payoffs: Consider a very simple class of (finite) games: after an initial move by nature, each player makes one move. Moreover, the players have common interests: at each node, all the players get the same payoff. We show that the problem of determining whether there exists a joint strategy where each player has an expected payoff of at least r is NP-complete as a function of the number of nodes in the extensive-form representation of the game.<|reference_end|> | arxiv | @article{chu2001on,
title={On the NP-completeness of Finding an Optimal Strategy in Games with
Common Payoffs},
author={Francis Chu and Joseph Y. Halpern},
journal={arXiv preprint arXiv:cs/0103019},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103019},
primaryClass={cs.GT cs.CC cs.DC}
} | chu2001on |
arxiv-669885 | cs/0103020 | Belief Revision: A Critique | <|reference_start|>Belief Revision: A Critique: We examine carefully the rationale underlying the approaches to belief change taken in the literature, and highlight what we view as methodological problems. We argue that to study belief change carefully, we must be quite explicit about the ``ontology'' or scenario underlying the belief change process. This is something that has been missing in previous work, with its focus on postulates. Our analysis shows that we must pay particular attention to two issues that have often been taken for granted: The first is how we model the agent's epistemic state. (Do we use a set of beliefs, or a richer structure, such as an ordering on worlds? And if we use a set of beliefs, in what language are these beliefs are expressed?) We show that even postulates that have been called ``beyond controversy'' are unreasonable when the agent's beliefs include beliefs about her own epistemic state as well as the external world. The second is the status of observations. (Are observations known to be true, or just believed? In the latter case, how firm is the belief?) Issues regarding the status of observations arise particularly when we consider iterated belief revision, and we must confront the possibility of revising by p and then by not-p.<|reference_end|> | arxiv | @article{friedman2001belief,
title={Belief Revision: A Critique},
author={Nir Friedman and Joseph Y. Halpern},
journal={Journal of Logic, Language, and Information, vol. 8, 1999, pp.
401-420},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103020},
primaryClass={cs.AI cs.LO}
} | friedman2001belief |
arxiv-669886 | cs/0103021 | Quantum Clock Synchronization with one qubit | <|reference_start|>Quantum Clock Synchronization with one qubit: The clock synchronization problem is to determine the time difference T between two spatially separated parties. We improve on I. Chuang's quantum clock synchronization algorithm and show that it is possible to obtain T to n bits of accuracy while communicating only one qubit in one direction and using an O(2^n) frequency range. We also prove a quantum lower bound of \Omega(2^n) for the product of the transmitted qubits and the range of frequencies, thus showing that our algorithm is optimal.<|reference_end|> | arxiv | @article{harrelson2001quantum,
title={Quantum Clock Synchronization with one qubit},
author={Chris Harrelson, Iordanis Kerenidis (UC Berkeley)},
journal={arXiv preprint arXiv:cs/0103021},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103021},
primaryClass={cs.CC quant-ph}
} | harrelson2001quantum |
arxiv-669887 | cs/0103022 | Secure, Efficient Data Transport and Replica Management for High-Performance Data-Intensive Computing | <|reference_start|>Secure, Efficient Data Transport and Replica Management for High-Performance Data-Intensive Computing: An emerging class of data-intensive applications involve the geographically dispersed extraction of complex scientific information from very large collections of measured or computed data. Such applications arise, for example, in experimental physics, where the data in question is generated by accelerators, and in simulation science, where the data is generated by supercomputers. So-called Data Grids provide essential infrastructure for such applications, much as the Internet provides essential services for applications such as e-mail and the Web. We describe here two services that we believe are fundamental to any Data Grid: reliable, high-speed transporet and replica management. Our high-speed transport service, GridFTP, extends the popular FTP protocol with new features required for Data Grid applciations, such as striping and partial file access. Our replica management service integrates a replica catalog with GridFTP transfers to provide for the creation, registration, location, and management of dataset replicas. We present the design of both services and also preliminary performance results. Our implementations exploit security and other services provided by the Globus Toolkit.<|reference_end|> | arxiv | @article{allcock2001secure,,
title={Secure, Efficient Data Transport and Replica Management for
High-Performance Data-Intensive Computing},
author={Bill Allcock, Joe Bester, John Bresnahan, Ann L. Chervenak, Ian
Foster, Carl Kesselman, Sam Meder, Veronika Nefedova, Darcy Quesnel, Steven
Tuecke},
journal={arXiv preprint arXiv:cs/0103022},
year={2001},
number={ANL/MCS-P871-0201},
archivePrefix={arXiv},
eprint={cs/0103022},
primaryClass={cs.DC cs.DB}
} | allcock2001secure, |
arxiv-669888 | cs/0103023 | A Dualheap Selection Algorithm - A Call for Analysis | <|reference_start|>A Dualheap Selection Algorithm - A Call for Analysis: An algorithm is presented that efficiently solves the selection problem: finding the k-th smallest member of a set. Relevant to a divide-and-conquer strategy, the algorithm also partitions a set into small and large valued subsets. Applied recursively, this partitioning results in a sorted set. The algorithm's applicability is therefore much broader than just the selection problem. The presented algorithm is based upon R.W. Floyd's 1964 algorithm that constructs a heap from the bottom-up. Empirically, the presented algorithm's performance appears competitive with the popular quickselect algorithm, a variant of C.A.R. Hoare's 1962 quicksort algorithm. Furthermore, constructing a heap from the bottom-up is an inherently parallel process (processors can work independently and simultaneously on subheap construction), suggesting a performance advantage with parallel implementations. Given the presented algorithm's broad applicability, simplicity, serial performance, and parallel nature, further study is warranted. Specifically, worst-case analysis is an important but still unsolved problem.<|reference_end|> | arxiv | @article{sepesi2001a,
title={A Dualheap Selection Algorithm - A Call for Analysis},
author={Greg Sepesi},
journal={arXiv preprint arXiv:cs/0103023},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103023},
primaryClass={cs.DS cs.DC}
} | sepesi2001a |
arxiv-669889 | cs/0103024 | Notes on computing peaks in k-levels and parametric spanning trees | <|reference_start|>Notes on computing peaks in k-levels and parametric spanning trees: We give an algorithm to compute all the local peaks in the $k$-level of an arrangement of $n$ lines in $O(n \log n) + \tilde{O}((kn)^{2/3})$ time. We can also find $\tau$ largest peaks in $O(n \log ^2 n) + \tilde{O}((\tau n)^{2/3})$ time. Moreover, we consider the longest edge in a parametric minimum spanning tree (in other words, a bottleneck edge for connectivity), and give an algorithm to compute the parameter value (within a given interval) maximizing/minimizing the length of the longest edge in MST. The time complexity is $\tilde{O}(n^{8/7}k^{1/7} + n k^{1/3})$<|reference_end|> | arxiv | @article{katoh2001notes,
title={Notes on computing peaks in k-levels and parametric spanning trees},
author={Naoki Katoh and Takeshi Tokuyama},
journal={arXiv preprint arXiv:cs/0103024},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103024},
primaryClass={cs.CG cs.DS}
} | katoh2001notes |
arxiv-669890 | cs/0103025 | The Anatomy of the Grid - Enabling Scalable Virtual Organizations | <|reference_start|>The Anatomy of the Grid - Enabling Scalable Virtual Organizations: "Grid" computing has emerged as an important new field, distinguished from conventional distributed computing by its focus on large-scale resource sharing, innovative applications, and, in some cases, high-performance orientation. In this article, we define this new field. First, we review the "Grid problem," which we define as flexible, secure, coordinated resource sharing among dynamic collections of individuals, institutions, and resources-what we refer to as virtual organizations. In such settings, we encounter unique authentication, authorization, resource access, resource discovery, and other challenges. It is this class of problem that is addressed by Grid technologies. Next, we present an extensible and open Grid architecture, in which protocols, services, application programming interfaces, and software development kits are categorized according to their roles in enabling resource sharing. We describe requirements that we believe any such mechanisms must satisfy, and we discuss the central role played by the intergrid protocols that enable interoperability among different Grid systems. Finally, we discuss how Grid technologies relate to other contemporary technologies, including enterprise integration, application service provider, storage service provider, and peer-to-peer computing. We maintain that Grid concepts and technologies complement and have much to contribute to these other approaches.<|reference_end|> | arxiv | @article{foster2001the,
title={The Anatomy of the Grid - Enabling Scalable Virtual Organizations},
author={Ian Foster, Carl Kesselman, and Steven Tuecke},
journal={arXiv preprint arXiv:cs/0103025},
year={2001},
number={ANL/MCS-P870-0201},
archivePrefix={arXiv},
eprint={cs/0103025},
primaryClass={cs.AR cs.DC}
} | foster2001the |
arxiv-669891 | cs/0103026 | A Decision Tree of Bigrams is an Accurate Predictor of Word Sense | <|reference_start|>A Decision Tree of Bigrams is an Accurate Predictor of Word Sense: This paper presents a corpus-based approach to word sense disambiguation where a decision tree assigns a sense to an ambiguous word based on the bigrams that occur nearby. This approach is evaluated using the sense-tagged corpora from the 1998 SENSEVAL word sense disambiguation exercise. It is more accurate than the average results reported for 30 of 36 words, and is more accurate than the best results for 19 of 36 words.<|reference_end|> | arxiv | @article{pedersen2001a,
title={A Decision Tree of Bigrams is an Accurate Predictor of Word Sense},
author={Ted Pedersen},
journal={arXiv preprint arXiv:cs/0103026},
year={2001},
archivePrefix={arXiv},
eprint={cs/0103026},
primaryClass={cs.CL}
} | pedersen2001a |
arxiv-669892 | cs/0104001 | Mantaining Dynamic Matrices for Fully Dynamic Transitive Closure | <|reference_start|>Mantaining Dynamic Matrices for Fully Dynamic Transitive Closure: In this paper we introduce a general framework for casting fully dynamic transitive closure into the problem of reevaluating polynomials over matrices. With this technique, we improve the best known bounds for fully dynamic transitive closure. In particular, we devise a deterministic algorithm for general directed graphs that achieves $O(n^2)$ amortized time for updates, while preserving unit worst-case cost for queries. In case of deletions only, our algorithm performs updates faster in O(n) amortized time. Our matrix-based approach yields an algorithm for directed acyclic graphs that breaks through the $O(n^2)$ barrier on the single-operation complexity of fully dynamic transitive closure. We can answer queries in $O(n^\epsilon)$ time and perform updates in $O(n^{\omega(1,\epsilon,1)-\epsilon}+n^{1+\epsilon})$ time, for any $\epsilon\in[0,1]$, where $\omega(1,\epsilon,1)$ is the exponent of the multiplication of an $n\times n^{\epsilon}$ matrix by an $n^{\epsilon}\times n$ matrix. The current best bounds on $\omega(1,\epsilon,1)$ imply an $O(n^{0.58})$ query time and an $O(n^{1.58})$ update time. Our subquadratic algorithm is randomized, and has one-side error.<|reference_end|> | arxiv | @article{demetrescu2001mantaining,
title={Mantaining Dynamic Matrices for Fully Dynamic Transitive Closure},
author={Camil Demetrescu and Giuseppe F. Italiano},
journal={arXiv preprint arXiv:cs/0104001},
year={2001},
number={TR-DIS-03-01},
archivePrefix={arXiv},
eprint={cs/0104001},
primaryClass={cs.DS cs.DM}
} | demetrescu2001mantaining |
arxiv-669893 | cs/0104002 | Replica Selection in the Globus Data Grid | <|reference_start|>Replica Selection in the Globus Data Grid: The Globus Data Grid architecture provides a scalable infrastructure for the management of storage resources and data that are distributed across Grid environments. These services are designed to support a variety of scientific applications, ranging from high-energy physics to computational genomics, that require access to large amounts of data (terabytes or even petabytes) with varied quality of service requirements. By layering on a set of core services, such as data transport, security, and replica cataloging, one can construct various higher-level services. In this paper, we discuss the design and implementation of a high-level replica selection service that uses information regarding replica location and user preferences to guide selection from among storage replica alternatives. We first present a basic replica selection service design, then show how dynamic information collected using Globus information service capabilities concerning storage system properties can help improve and optimize the selection process. We demonstrate the use of Condor's ClassAds resource description and matchmaking mechanism as an efficient tool for representing and matching storage resource capabilities and policies against application requirements.<|reference_end|> | arxiv | @article{vazhkudai2001replica,
title={Replica Selection in the Globus Data Grid},
author={Sudharshan Vazhkudai, Steven Tuecke, and Ian Foster},
journal={arXiv preprint arXiv:cs/0104002},
year={2001},
number={ANL/MCS-P869-0201},
archivePrefix={arXiv},
eprint={cs/0104002},
primaryClass={cs.DC}
} | vazhkudai2001replica |
arxiv-669894 | cs/0104003 | Chain Programs for Writing Deterministic Metainterpreters | <|reference_start|>Chain Programs for Writing Deterministic Metainterpreters: Many metainterpreters found in the logic programming literature are nondeterministic in the sense that the selection of program clauses is not determined. Examples are the familiar "demo" and "vanilla" metainterpreters. For some applications this nondeterminism is convenient. In some cases, however, a deterministic metainterpreter, having an explicit selection of clauses, is needed. Such cases include (1) conversion of OR parallelism into AND parallelism for "committed-choice" processors, (2) logic-based, imperative-language implementation of search strategies, and (3) simulation of bounded-resource reasoning. Deterministic metainterpreters are difficult to write because the programmer must be concerned about the set of unifiers of the children of a node in the derivation tree. We argue that it is both possible and advantageous to write these metainterpreters by reasoning in terms of object programs converted into a syntactically restricted form that we call "chain" form, where we can forget about unification, except for unit clauses. We give two transformations converting logic programs into chain form, one for "moded" programs (implicit in two existing exhaustive-traversal methods for committed-choice execution), and one for arbitrary definite programs. As illustrations of our approach we show examples of the three applications mentioned above.<|reference_end|> | arxiv | @article{rosenblueth2001chain,
title={Chain Programs for Writing Deterministic Metainterpreters},
author={David A. Rosenblueth},
journal={arXiv preprint arXiv:cs/0104003},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104003},
primaryClass={cs.LO cs.PL}
} | rosenblueth2001chain |
arxiv-669895 | cs/0104004 | Secure Counting: counting members of a subset without revealing their identities | <|reference_start|>Secure Counting: counting members of a subset without revealing their identities: Suppose there is a group of N people some of whom possess a specific property. For example, their wealth is above or below a threshold, they voted for a particular candidate, they have a certain disease, etc. The group wants to find out how many of its members posses the property -- without revealing the identities. Unless of course it turns out that all members do or do not have the attribute of interest. However, in all other cases the counting algorithm should guarantee that nobody can find out if a particular individual possesses the property unless all the other N-1 members of the group collude. The present article describes a method to solve the confidential counting problem with only 3*N-2 pairwise communications, or 2*N broadcasts (the last N-1 pairwise communications are merely to announce the result). The counting algorithm does not require any trusted third parties. All communications between parties involved can be conducted in public without compromising the security of counting.<|reference_end|> | arxiv | @article{kiselyov2001secure,
title={Secure Counting: counting members of a subset without revealing their
identities},
author={Oleg Kiselyov},
journal={arXiv preprint arXiv:cs/0104004},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104004},
primaryClass={cs.CR}
} | kiselyov2001secure |
arxiv-669896 | cs/0104005 | Bootstrapping Structure using Similarity | <|reference_start|>Bootstrapping Structure using Similarity: In this paper a new similarity-based learning algorithm, inspired by string edit-distance (Wagner and Fischer, 1974), is applied to the problem of bootstrapping structure from scratch. The algorithm takes a corpus of unannotated sentences as input and returns a corpus of bracketed sentences. The method works on pairs of unstructured sentences or sentences partially bracketed by the algorithm that have one or more words in common. It finds parts of sentences that are interchangeable (i.e. the parts of the sentences that are different in both sentences). These parts are taken as possible constituents of the same type. While this corresponds to the basic bootstrapping step of the algorithm, further structure may be learned from comparison with other (similar) sentences. We used this method for bootstrapping structure from the flat sentences of the Penn Treebank ATIS corpus, and compared the resulting structured sentences to the structured sentences in the ATIS corpus. Similarly, the algorithm was tested on the OVIS corpus. We obtained 86.04 % non-crossing brackets precision on the ATIS corpus and 89.39 % non-crossing brackets precision on the OVIS corpus.<|reference_end|> | arxiv | @article{van zaanen2001bootstrapping,
title={Bootstrapping Structure using Similarity},
author={Menno van Zaanen},
journal={Computational Linguistics in the Netherlands 1999 - Selected
Papers from the Tenth CLIN Meeting, pages 235-245},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104005},
primaryClass={cs.LG cs.CL}
} | van zaanen2001bootstrapping |
arxiv-669897 | cs/0104006 | ABL: Alignment-Based Learning | <|reference_start|>ABL: Alignment-Based Learning: This paper introduces a new type of grammar learning algorithm, inspired by string edit distance (Wagner and Fischer, 1974). The algorithm takes a corpus of flat sentences as input and returns a corpus of labelled, bracketed sentences. The method works on pairs of unstructured sentences that have one or more words in common. When two sentences are divided into parts that are the same in both sentences and parts that are different, this information is used to find parts that are interchangeable. These parts are taken as possible constituents of the same type. After this alignment learning step, the selection learning step selects the most probable constituents from all possible constituents. This method was used to bootstrap structure on the ATIS corpus (Marcus et al., 1993) and on the OVIS (Openbaar Vervoer Informatie Systeem (OVIS) stands for Public Transport Information System.) corpus (Bonnema et al., 1997). While the results are encouraging (we obtained up to 89.25 % non-crossing brackets precision), this paper will point out some of the shortcomings of our approach and will suggest possible solutions.<|reference_end|> | arxiv | @article{van zaanen2001abl:,
title={ABL: Alignment-Based Learning},
author={Menno van Zaanen},
journal={Proceedings of the 18th International Conference on Computational
Linguistics (COLING); Saarbrucken, Germany. pages 961-967},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104006},
primaryClass={cs.LG cs.CL}
} | van zaanen2001abl: |
arxiv-669898 | cs/0104007 | Bootstrapping Syntax and Recursion using Alignment-Based Learning | <|reference_start|>Bootstrapping Syntax and Recursion using Alignment-Based Learning: This paper introduces a new type of unsupervised learning algorithm, based on the alignment of sentences and Harris's (1951) notion of interchangeability. The algorithm is applied to an untagged, unstructured corpus of natural language sentences, resulting in a labelled, bracketed version of the corpus. Firstly, the algorithm aligns all sentences in the corpus in pairs, resulting in a partition of the sentences consisting of parts of the sentences that are similar in both sentences and parts that are dissimilar. This information is used to find (possibly overlapping) constituents. Next, the algorithm selects (non-overlapping) constituents. Several instances of the algorithm are applied to the ATIS corpus (Marcus et al., 1993) and the OVIS (Openbaar Vervoer Informatie Systeem (OVIS) stands for Public Transport Information System.) corpus (Bonnema et al., 1997). Apart from the promising numerical results, the most striking result is that even the simplest algorithm based on alignment learns recursion.<|reference_end|> | arxiv | @article{van zaanen2001bootstrapping,
title={Bootstrapping Syntax and Recursion using Alignment-Based Learning},
author={Menno van Zaanen},
journal={Proceedings of the Seventeenth International Conference on Machine
Learning. pages 1063-1070},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104007},
primaryClass={cs.LG cs.CL}
} | van zaanen2001bootstrapping |
arxiv-669899 | cs/0104008 | Event Indexing Systems for Efficient Selection and Analysis of HERA Data | <|reference_start|>Event Indexing Systems for Efficient Selection and Analysis of HERA Data: The design and implementation of two software systems introduced to improve the efficiency of offline analysis of event data taken with the ZEUS Detector at the HERA electron-proton collider at DESY are presented. Two different approaches were made, one using a set of event directories and the other using a tag database based on a commercial object-oriented database management system. These are described and compared. Both systems provide quick direct access to individual collision events in a sequential data store of several terabytes, and they both considerably improve the event analysis efficiency. In particular the tag database provides a very flexible selection mechanism and can dramatically reduce the computing time needed to extract small subsamples from the total event sample. Gains as large as a factor 20 have been obtained.<|reference_end|> | arxiv | @article{bauerdick2001event,
title={Event Indexing Systems for Efficient Selection and Analysis of HERA Data},
author={L.A.T. Bauerdick, Adrian Fox-Murphy, Tobias Haas, Stefan Stonjek,
Enrico Tassi},
journal={Comput.Phys.Commun. 137 (2001) 236-246},
year={2001},
doi={10.1016/S0010-4655(01)00162-X},
number={DESY 01-045},
archivePrefix={arXiv},
eprint={cs/0104008},
primaryClass={cs.DB cs.IR}
} | bauerdick2001event |
arxiv-669900 | cs/0104009 | Evaluating Recommendation Algorithms by Graph Analysis | <|reference_start|>Evaluating Recommendation Algorithms by Graph Analysis: We present a novel framework for evaluating recommendation algorithms in terms of the `jumps' that they make to connect people to artifacts. This approach emphasizes reachability via an algorithm within the implicit graph structure underlying a recommender dataset, and serves as a complement to evaluation in terms of predictive accuracy. The framework allows us to consider questions relating algorithmic parameters to properties of the datasets. For instance, given a particular algorithm `jump,' what is the average path length from a person to an artifact? Or, what choices of minimum ratings and jumps maintain a connected graph? We illustrate the approach with a common jump called the `hammock' using movie recommender datasets.<|reference_end|> | arxiv | @article{mirza2001evaluating,
title={Evaluating Recommendation Algorithms by Graph Analysis},
author={Batul J. Mirza, Benjamin J. Keller, and Naren Ramakrishnan},
journal={arXiv preprint arXiv:cs/0104009},
year={2001},
archivePrefix={arXiv},
eprint={cs/0104009},
primaryClass={cs.IR cs.DM cs.DS}
} | mirza2001evaluating |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.