corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-671701
cs/0402048
Transformation Rules for Locally Stratified Constraint Logic Programs
<|reference_start|>Transformation Rules for Locally Stratified Constraint Logic Programs: We propose a set of transformation rules for constraint logic programs with negation. We assume that every program is locally stratified and, thus, it has a unique perfect model. We give sufficient conditions which ensure that the proposed set of transformation rules preserves the perfect model of the programs. Our rules extend in some respects the rules for logic programs and constraint logic programs already considered in the literature and, in particular, they include a rule for unfolding a clause with respect to a negative literal.<|reference_end|>
arxiv
@article{fioravanti2004transformation, title={Transformation Rules for Locally Stratified Constraint Logic Programs}, author={Fabio Fioravanti, Alberto Pettorossi, Maurizio Proietti}, journal={arXiv preprint arXiv:cs/0402048}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402048}, primaryClass={cs.PL cs.LO} }
fioravanti2004transformation
arxiv-671702
cs/0402049
An architecture for massive parallelization of the compact genetic algorithm
<|reference_start|>An architecture for massive parallelization of the compact genetic algorithm: This paper presents an architecture which is suitable for a massive parallelization of the compact genetic algorithm. The resulting scheme has three major advantages. First, it has low synchronization costs. Second, it is fault tolerant, and third, it is scalable. The paper argues that the benefits that can be obtained with the proposed approach is potentially higher than those obtained with traditional parallel genetic algorithms. In addition, the ideas suggested in the paper may also be relevant towards parallelizing more complex probabilistic model building genetic algorithms.<|reference_end|>
arxiv
@article{lobo2004an, title={An architecture for massive parallelization of the compact genetic algorithm}, author={Fernando G. Lobo, Claudio F. Lima, Hugo Martires}, journal={arXiv preprint arXiv:cs/0402049}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402049}, primaryClass={cs.NE} }
lobo2004an
arxiv-671703
cs/0402050
A philosophical essay on life and its connections with genetic algorithms
<|reference_start|>A philosophical essay on life and its connections with genetic algorithms: This paper makes a number of connections between life and various facets of genetic and evolutionary algorithms research. Specifically, it addresses the topics of adaptation, multiobjective optimization, decision making, deception, and search operators, among others. It argues that human life, from birth to death, is an adaptive or dynamic optimization problem where people are continuously searching for happiness. More important, the paper speculates that genetic algorithms can be used as a source of inspiration for helping people make decisions in their everyday life.<|reference_end|>
arxiv
@article{lobo2004a, title={A philosophical essay on life and its connections with genetic algorithms}, author={Fernando G. Lobo}, journal={arXiv preprint arXiv:cs/0402050}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402050}, primaryClass={cs.NE} }
lobo2004a
arxiv-671704
cs/0402051
Nested Intervals Tree Encoding with Continued Fractions
<|reference_start|>Nested Intervals Tree Encoding with Continued Fractions: We introduce a new variation of Tree Encoding with Nested Intervals, find connections with Materialized Path, and suggest a method for moving parts of the hierarchy.<|reference_end|>
arxiv
@article{tropashko2004nested, title={Nested Intervals Tree Encoding with Continued Fractions}, author={Vadim Tropashko}, journal={arXiv preprint arXiv:cs/0402051}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402051}, primaryClass={cs.DB} }
tropashko2004nested
arxiv-671705
cs/0402052
Continued fractions and RSA with small secret exponent
<|reference_start|>Continued fractions and RSA with small secret exponent: Extending the classical Legendre's result, we describe all solutions of the inequality |x - a/b| < c/b^2 in terms of convergents of continued fraction expansion of x. Namely, we show that a/b = (rp_{m+1} +- sp_m) / (rq_{m+1} +- sq_m) for some nonnegative integers m,r,s such that rs < 2c. As an application of this result, we describe a modification of Verheul and van Tilborg variant of Wiener's attack on RSA cryptosystem with small secret exponent.<|reference_end|>
arxiv
@article{dujella2004continued, title={Continued fractions and RSA with small secret exponent}, author={Andrej Dujella}, journal={Tatra Mt. Math. Publ. 29 (2004), 101-112.}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402052}, primaryClass={cs.CR math.NT} }
dujella2004continued
arxiv-671706
cs/0402053
The Complexity of Modified Instances
<|reference_start|>The Complexity of Modified Instances: In this paper we study the complexity of solving a problem when a solution of a similar instance is known. This problem is relevant whenever instances may change from time to time, and known solutions may not remain valid after the change. We consider two scenarios: in the first one, what is known is only a solution of the problem before the change; in the second case, we assume that some additional information, found during the search for this solution, is also known. In the first setting, the techniques from the theory of NP-completeness suffice to show complexity results. In the second case, negative results can only be proved using the techniques of compilability, and are often related to the size of considered changes.<|reference_end|>
arxiv
@article{liberatore2004the, title={The Complexity of Modified Instances}, author={Paolo Liberatore}, journal={arXiv preprint arXiv:cs/0402053}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402053}, primaryClass={cs.CC cs.AI} }
liberatore2004the
arxiv-671707
cs/0402054
On the Security of the Yi-Tan-Siew Chaos-Based Cipher
<|reference_start|>On the Security of the Yi-Tan-Siew Chaos-Based Cipher: This paper presents a comprehensive analysis on the security of the Yi-Tan-Siew chaotic cipher proposed in [IEEE TCAS-I 49(12):1826-1829 (2002)]. A differential chosen-plaintext attack and a differential chosen-ciphertext attack are suggested to break the sub-key K, under the assumption that the time stamp can be altered by the attacker, which is reasonable in such attacks. Also, some security Problems about the sub-keys $\alpha$ and $\beta$ are clarified, from both theoretical and experimental points of view. Further analysis shows that the security of this cipher is independent of the use of the chaotic tent map, once the sub-key $K$ is removed via the proposed suggested differential chosen-plaintext attack.<|reference_end|>
arxiv
@article{li2004on, title={On the Security of the Yi-Tan-Siew Chaos-Based Cipher}, author={Shujun Li, Guanrong Chen and Xuanqin Mou}, journal={IEEE Trans. CAS-II, vol. 51, no. 12, pp. 665-669, 2004}, year={2004}, doi={10.1109/TCSII.2004.838657}, archivePrefix={arXiv}, eprint={cs/0402054}, primaryClass={cs.CR cs.PF nlin.CD} }
li2004on
arxiv-671708
cs/0402055
Lexical Base as a Compressed Language Model of the World (on the material of the Ukrainian language)
<|reference_start|>Lexical Base as a Compressed Language Model of the World (on the material of the Ukrainian language): In the article the fact is verified that the list of words selected by formal statistical methods (frequency and functional genre unrestrictedness) is not a conglomerate of non-related words. It creates a system of interrelated items and it can be named "lexical base of language". This selected list of words covers all the spheres of human activities. To verify this statement the invariant synoptical scheme common for ideographic dictionaries of different language was determined.<|reference_end|>
arxiv
@article{buk2004lexical, title={Lexical Base as a Compressed Language Model of the World (on the material of the Ukrainian language)}, author={Solomiya Buk}, journal={Psychology of Language and Communication. 2009, vol. 13, no. 2, pp. 35-44}, year={2004}, doi={10.2478/v10057-009-0008-3}, archivePrefix={arXiv}, eprint={cs/0402055}, primaryClass={cs.CL} }
buk2004lexical
arxiv-671709
cs/0402056
Digital Signal Transmission with Chaotic Encryption: Design and Evaluation of a FPGA Realization
<|reference_start|>Digital Signal Transmission with Chaotic Encryption: Design and Evaluation of a FPGA Realization: A discrete-time discrete-value pseudo-chaotic encoder/decoder system is presented. The pseudo-chaotic module is a 3D discrete version of the well-known Lorenz dynamical system. Scaling and biasing transformations as well as natural number arithmetics are employed in order to simplify realizations on a small size Field Programmable Gate Array (FPGA. The encryption ability is improved by using only the least significant byte of one of the pseudo chaotic state variables as the key to encrypt the plain text. The key is periodically perturbed by another chaotic state variable. The statistical properties of the pseudo chaotic cipher are compared with those of other pseudo-random generators available in the literature. As an example of applicability of the technique, a full duplex communication system is designed and constructed using FPGA's as technological framework.<|reference_end|>
arxiv
@article{gonzalez2004digital, title={Digital Signal Transmission with Chaotic Encryption: Design and Evaluation of a FPGA Realization}, author={C.M. Gonzalez, H.A. Larrondo, C.A. Gayoso, L.J. Arnone, E.I. Boemo}, journal={arXiv preprint arXiv:cs/0402056}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402056}, primaryClass={cs.CR} }
gonzalez2004digital
arxiv-671710
cs/0402057
Integrating Defeasible Argumentation and Machine Learning Techniques
<|reference_start|>Integrating Defeasible Argumentation and Machine Learning Techniques: The field of machine learning (ML) is concerned with the question of how to construct algorithms that automatically improve with experience. In recent years many successful ML applications have been developed, such as datamining programs, information-filtering systems, etc. Although ML algorithms allow the detection and extraction of interesting patterns of data for several kinds of problems, most of these algorithms are based on quantitative reasoning, as they rely on training data in order to infer so-called target functions. In the last years defeasible argumentation has proven to be a sound setting to formalize common-sense qualitative reasoning. This approach can be combined with other inference techniques, such as those provided by machine learning theory. In this paper we outline different alternatives for combining defeasible argumentation and machine learning techniques. We suggest how different aspects of a generic argument-based framework can be integrated with other ML-based approaches.<|reference_end|>
arxiv
@article{gomez2004integrating, title={Integrating Defeasible Argumentation and Machine Learning Techniques}, author={Sergio Alejandro Gomez and Carlos Ivan Ches~nevar}, journal={Procs. WICC 2003 . Pp. 787-791. Tandil, Argentina, Mayo 2003}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402057}, primaryClass={cs.AI} }
gomez2004integrating
arxiv-671711
cs/0402058
A Tribute to Alain Colmerauer
<|reference_start|>A Tribute to Alain Colmerauer: The paper describes the contributions of Alain Colmerauer to the areas of logic programs (LP) and constraint logic programs (CLP).<|reference_end|>
arxiv
@article{cohen2004a, title={A Tribute to Alain Colmerauer}, author={Jacques Cohen}, journal={arXiv preprint arXiv:cs/0402058}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402058}, primaryClass={cs.PL} }
cohen2004a
arxiv-671712
cs/0402059
Light types for polynomial time computation in lambda-calculus
<|reference_start|>Light types for polynomial time computation in lambda-calculus: We propose a new type system for lambda-calculus ensuring that well-typed programs can be executed in polynomial time: Dual light affine logic (DLAL). DLAL has a simple type language with a linear and an intuitionistic type arrow, and one modality. It corresponds to a fragment of Light affine logic (LAL). We show that contrarily to LAL, DLAL ensures good properties on lambda-terms: subject reduction is satisfied and a well-typed term admits a polynomial bound on the reduction by any strategy. We establish that as LAL, DLAL allows to represent all polytime functions. Finally we give a type inference procedure for propositional DLAL.<|reference_end|>
arxiv
@article{baillot2004light, title={Light types for polynomial time computation in lambda-calculus}, author={Patrick Baillot, Kazushige Terui}, journal={arXiv preprint arXiv:cs/0402059}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402059}, primaryClass={cs.LO} }
baillot2004light
arxiv-671713
cs/0402060
Pseudorandom number generation by p-adic ergodic transformations: an addendum
<|reference_start|>Pseudorandom number generation by p-adic ergodic transformations: an addendum: The paper study counter-dependent pseudorandom number generators based on $m$-variate ($m>1$) ergodic mappings of the space of 2-adic integers $\Z_2$. The sequence of internal states of these generators is defined by the recurrence law $\mathbf x_{i+1}= H^B_i(\mathbf x_i)\bmod{2^n}$, whereas their output sequence is %while its output sequence is of the $\mathbf z_{i}=F^B_i(\mathbf x_i)\mod 2^n$; here $\mathbf x_j, \mathbf z_j$ are $m$-dimensional vectors over $\Z_2$. It is shown how the results obtained for a univariate case could be extended to a multivariate case.<|reference_end|>
arxiv
@article{anashin2004pseudorandom, title={Pseudorandom number generation by p-adic ergodic transformations: an addendum}, author={Vladimir Anashin}, journal={"Applied Algebraic Dynamics", volume 49 of de Gruyter Expositions in Mathematics, 2009, 269-304}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402060}, primaryClass={cs.CR} }
anashin2004pseudorandom
arxiv-671714
cs/0402061
A Correlation-Based Distance
<|reference_start|>A Correlation-Based Distance: In this short technical report, we define on the sample space R^D a distance between data points which depends on their correlation. We also derive an expression for the center of mass of a set of points with respect to this distance.<|reference_end|>
arxiv
@article{falcone2004a, title={A Correlation-Based Distance}, author={Jean-Luc Falcone and Paul Albuquerque}, journal={arXiv preprint arXiv:cs/0402061}, year={2004}, archivePrefix={arXiv}, eprint={cs/0402061}, primaryClass={cs.IR} }
falcone2004a
arxiv-671715
cs/0403001
Evolving a Stigmergic Self-Organized Data-Mining
<|reference_start|>Evolving a Stigmergic Self-Organized Data-Mining: Self-organizing complex systems typically are comprised of a large number of frequently similar components or events. Through their process, a pattern at the global-level of a system emerges solely from numerous interactions among the lower-level components of the system. Moreover, the rules specifying interactions among the system's components are executed using only local information, without reference to the global pattern, which, as in many real-world problems is not easily accessible or possible to be found. Stigmergy, a kind of indirect communication and learning by the environment found in social insects is a well know example of self-organization, providing not only vital clues in order to understand how the components can interact to produce a complex pattern, as can pinpoint simple biological non-linear rules and methods to achieve improved artificial intelligent adaptive categorization systems, critical for Data-Mining. On the present work it is our intention to show that a new type of Data-Mining can be designed based on Stigmergic paradigms, taking profit of several natural features of this phenomenon. By hybridizing bio-inspired Swarm Intelligence with Evolutionary Computation we seek for an entire distributed, adaptive, collective and cooperative self-organized Data-Mining. As a real-world, real-time test bed for our proposal, World-Wide-Web Mining will be used. Having that purpose in mind, Web usage Data was collected from the Monash University's Web site (Australia), with over 7 million hits every week. Results are compared to other recent systems, showing that the system presented is by far promising.<|reference_end|>
arxiv
@article{ramos2004evolving, title={Evolving a Stigmergic Self-Organized Data-Mining}, author={Vitorino Ramos, Ajith Abraham}, journal={arXiv preprint arXiv:cs/0403001}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403001}, primaryClass={cs.AI cs.IR} }
ramos2004evolving
arxiv-671716
cs/0403002
Epistemic Foundation of Stable Model Semantics
<|reference_start|>Epistemic Foundation of Stable Model Semantics: Stable model semantics has become a very popular approach for the management of negation in logic programming. This approach relies mainly on the closed world assumption to complete the available knowledge and its formulation has its basis in the so-called Gelfond-Lifschitz transformation. The primary goal of this work is to present an alternative and epistemic-based characterization of stable model semantics, to the Gelfond-Lifschitz transformation. In particular, we show that stable model semantics can be defined entirely as an extension of the Kripke-Kleene semantics. Indeed, we show that the closed world assumption can be seen as an additional source of `falsehood' to be added cumulatively to the Kripke-Kleene semantics. Our approach is purely algebraic and can abstract from the particular formalism of choice as it is based on monotone operators (under the knowledge order) over bilattices only.<|reference_end|>
arxiv
@article{loyer2004epistemic, title={Epistemic Foundation of Stable Model Semantics}, author={Y. Loyer and U. Straccia}, journal={arXiv preprint arXiv:cs/0403002}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403002}, primaryClass={cs.AI} }
loyer2004epistemic
arxiv-671717
cs/0403003
Genetic Algorithms and Quantum Computation
<|reference_start|>Genetic Algorithms and Quantum Computation: Recently, researchers have applied genetic algorithms (GAs) to address some problems in quantum computation. Also, there has been some works in the designing of genetic algorithms based on quantum theoretical concepts and techniques. The so called Quantum Evolutionary Programming has two major sub-areas: Quantum Inspired Genetic Algorithms (QIGAs) and Quantum Genetic Algorithms (QGAs). The former adopts qubit chromosomes as representations and employs quantum gates for the search of the best solution. The later tries to solve a key question in this field: what GAs will look like as an implementation on quantum hardware? As we shall see, there is not a complete answer for this question. An important point for QGAs is to build a quantum algorithm that takes advantage of both the GA and quantum computing parallelism as well as true randomness provided by quantum computers. In the first part of this paper we present a survey of the main works in GAs plus quantum computing including also our works in this area. Henceforth, we review some basic concepts in quantum computation and GAs and emphasize their inherent parallelism. Next, we review the application of GAs for learning quantum operators and circuit design. Then, quantum evolutionary programming is considered. Finally, we present our current research in this field and some perspectives.<|reference_end|>
arxiv
@article{giraldi2004genetic, title={Genetic Algorithms and Quantum Computation}, author={Gilson A. Giraldi, Renato Portugal, Ricardo N. Thess}, journal={arXiv preprint arXiv:cs/0403003}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403003}, primaryClass={cs.NE} }
giraldi2004genetic
arxiv-671718
cs/0403004
New Visualization of Surfaces in Parallel Coordinates - Eliminating Ambiguity and Some "Over-Plotting"
<|reference_start|>New Visualization of Surfaces in Parallel Coordinates - Eliminating Ambiguity and Some "Over-Plotting": $\cal{A}$ point $P \in \Real^n$ is represented in Parallel Coordinates by a polygonal line $\bar{P}$ (see \cite{Insel99a} for a recent survey). Earlier \cite{inselberg85plane}, a surface $\sigma$ was represented as the {\em envelope} of the polygonal lines representing it's points. This is ambiguous in the sense that {\em different} surfaces can provide the {\em same} envelopes. Here the ambiguity is eliminated by considering the surface $\sigma$ as the envelope of it's {\em tangent planes} and in turn, representing each of these planes by $n$-1 points \cite{Insel99a}. This, with some future extension, can yield a new and unambiguous representation, $\bar{\sigma}$, of the surface consisting of $n$-1 planar regions whose properties correspond lead to the {\em recognition} of the surfaces' properties i.e. developable, ruled etc. \cite{hung92smooth}) and {\em classification} criteria. It is further shown that the image (i.e. representation) of an algebraic surface of degree 2 in $\Real^n$ is a region whose boundary is also an algebraic curve of degree 2. This includes some {\em non-convex} surfaces which with the previous ambiguous representation could not be treated. An efficient construction algorithm for the representation of the quadratic surfaces (given either by {\em explicit} or {\em implicit} equation) is provided. The results obtained are suitable for applications, to be presented in a future paper, and in particular for the approximation of complex surfaces based on their {\em planar} images. An additional benefit is the elimination of the ``over-plotting'' problem i.e. the ``bunching'' of polygonal lines which often obscure part of the parallel-coordinate display.<|reference_end|>
arxiv
@article{izhakian2004new, title={New Visualization of Surfaces in Parallel Coordinates - Eliminating Ambiguity and Some "Over-Plotting"}, author={Zur Izhakian}, journal={Journal of WSCG, 2004, volume 1-3, no 12, pp 183-191, ISSN 1213-6972}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403004}, primaryClass={cs.OH} }
izhakian2004new
arxiv-671719
cs/0403005
Algebraic Curves in Parallel Coordinates - Avoiding the "Over-Plotting" Problem
<|reference_start|>Algebraic Curves in Parallel Coordinates - Avoiding the "Over-Plotting" Problem: ${\cal U}$ntil now the representation (i.e. plotting) of curve in Parallel Coordinates is constructed from the point $\leftrightarrow$ line duality. The result is a ``line-curve'' which is seen as the envelope of it's tangents. Usually this gives an unclear image and is at the heart of the ``over-plotting'' problem; a barrier in the effective use of Parallel Coordinates. This problem is overcome by a transformation which provides directly the ``point-curve'' representation of a curve. Earlier this was applied to conics and their generalizations. Here the representation, also called dual, is extended to all planar algebraic curves. Specifically, it is shown that the dual of an algebraic curve of degree $n$ is an algebraic of degree at most $n(n - 1)$ in the absence of singular points. The result that conics map into conics follows as an easy special case. An algorithm, based on algebraic geometry using resultants and homogeneous polynomials, is obtained which constructs the dual image of the curve. This approach has potential generalizations to multi-dimensional algebraic surfaces and their approximation. The ``trade-off'' price then for obtaining {\em planar} representation of multidimensional algebraic curves and hyper-surfaces is the higher degree of the image's boundary which is also an algebraic curve in $\|$-coords.<|reference_end|>
arxiv
@article{izhakian2004algebraic, title={Algebraic Curves in Parallel Coordinates - Avoiding the "Over-Plotting" Problem}, author={Zur Izhakian}, journal={arXiv preprint arXiv:cs/0403005}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403005}, primaryClass={cs.OH} }
izhakian2004algebraic
arxiv-671720
cs/0403006
The role of behavior modifiers in representation development
<|reference_start|>The role of behavior modifiers in representation development: We address the problem of the development of representations and their relationship to the environment. We study a software agent which develops in a network a representation of its simple environment which captures and integrates the relationships between agent and environment through a closure mechanism. The inclusion of a variable behavior modifier allows better representation development. This can be confirmed with an internal description of the closure mechanism, and with an external description of the properties of the representation network.<|reference_end|>
arxiv
@article{b.2004the, title={The role of behavior modifiers in representation development}, author={Carlos R. de la Mora B., Carlos Gershenson, Angelica Garcia-Vega}, journal={arXiv preprint arXiv:cs/0403006}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403006}, primaryClass={cs.AI} }
b.2004the
arxiv-671721
cs/0403007
End-User Effects of Microreboots in Three-Tiered Internet Systems
<|reference_start|>End-User Effects of Microreboots in Three-Tiered Internet Systems: Microreboots restart fine-grained components of software systems "with a clean slate," and only take a fraction of the time needed for full system reboot. Microreboots provide an application-generic recovery technique for Internet services, which can be supported entirely in middleware and requires no changes to the applications or any a priori knowledge of application semantics. This paper investigates the effect of microreboots on end-users of an eBay-like online auction application; we find that microreboots are nearly as effective as full reboots, but are significantly less disruptive in terms of downtime and lost work. In our experiments, microreboots reduced the number of failed user requests by 65% and the perceived downtime by 78% compared to a server process restart. We also show how to replace user-visible transient failures with transparent call-retry, at the cost of a slight increase in end-user-visible latency during recovery. Due to their low cost, microreboots can be used aggressively, even when their necessity is less than certain, hence adding to the reduced recovery time a reduction in the fault detection time, which further improves availability.<|reference_end|>
arxiv
@article{candea2004end-user, title={End-User Effects of Microreboots in Three-Tiered Internet Systems}, author={George Candea and Armando Fox}, journal={arXiv preprint arXiv:cs/0403007}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403007}, primaryClass={cs.OS cs.AR cs.NI} }
candea2004end-user
arxiv-671722
cs/0403008
Polynomial-time computing over quadratic maps I: sampling in real algebraic sets
<|reference_start|>Polynomial-time computing over quadratic maps I: sampling in real algebraic sets: Given a quadratic map Q : K^n -> K^k defined over a computable subring D of a real closed field K, and a polynomial p(Y_1,...,Y_k) of degree d, we consider the zero set Z=Z(p(Q(X)),K^n) of the polynomial p(Q(X_1,...,X_n)). We present a procedure that computes, in (dn)^O(k) arithmetic operations in D, a set S of (real univariate representations of) sampling points in K^n that intersects nontrivially each connected component of Z. As soon as k=o(n), this is faster than the standard methods that all have exponential dependence on n in the complexity. In particular, our procedure is polynomial-time for constant k. In contrast, the best previously known procedure (due to A.Barvinok) is only capable of deciding in n^O(k^2) operations the nonemptiness (rather than constructing sampling points) of the set Z in the case of p(Y)=sum_i Y_i^2 and homogeneous Q. A by-product of our procedure is a bound (dn)^O(k) on the number of connected components of Z. The procedure consists of exact symbolic computations in D and outputs vectors of algebraic numbers. It involves extending K by infinitesimals and subsequent limit computation by a novel procedure that utilizes knowledge of an explicit isomorphism between real algebraic sets.<|reference_end|>
arxiv
@article{grigoriev2004polynomial-time, title={Polynomial-time computing over quadratic maps I: sampling in real algebraic sets}, author={Dima Grigoriev, Dmitrii V. Pasechnik}, journal={Computational Complexity 14(2005) 20-52}, year={2004}, doi={10.1007/s00037-005-0189-7}, archivePrefix={arXiv}, eprint={cs/0403008}, primaryClass={cs.SC cs.CG math.AG} }
grigoriev2004polynomial-time
arxiv-671723
cs/0403009
Demolishing Searle's Chinese Room
<|reference_start|>Demolishing Searle's Chinese Room: Searle's Chinese Room argument is refuted by showing that he has actually given two different versions of the room, which fail for different reasons. Hence, Searle does not achieve his stated goal of showing ``that a system could have input and output capabilities that duplicated those of a native Chinese speaker and still not understand Chinese''.<|reference_end|>
arxiv
@article{schmied2004demolishing, title={Demolishing Searle's Chinese Room}, author={Wolfram Schmied}, journal={arXiv preprint arXiv:cs/0403009}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403009}, primaryClass={cs.AI cs.GL} }
schmied2004demolishing
arxiv-671724
cs/0403010
Polymorphic lemmas and definitions in Lambda Prolog and Twelf
<|reference_start|>Polymorphic lemmas and definitions in Lambda Prolog and Twelf: Lambda Prolog is known to be well-suited for expressing and implementing logics and inference systems. We show that lemmas and definitions in such logics can be implemented with a great economy of expression. We encode a higher-order logic using an encoding that maps both terms and types of the object logic (higher-order logic) to terms of the metalanguage (Lambda Prolog). We discuss both the Terzo and Teyjus implementations of Lambda Prolog. We also encode the same logic in Twelf and compare the features of these two metalanguages for our purposes.<|reference_end|>
arxiv
@article{appel2004polymorphic, title={Polymorphic lemmas and definitions in Lambda Prolog and Twelf}, author={Andrew W. Appel and Amy P. Felty}, journal={Andrew W. Appel and Amy P. Felty, Polymorphic Lemmas and Definitions in Lambda Prolog and Twelf, Theory and Practice of Logic Programming, 4(1&2):1-39, January & March 2004}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403010}, primaryClass={cs.LO cs.PL} }
appel2004polymorphic
arxiv-671725
cs/0403011
Specialization of Functional Logic Programs Based on Needed Narrowing
<|reference_start|>Specialization of Functional Logic Programs Based on Needed Narrowing: Many functional logic languages are based on narrowing, a unification-based goal-solving mechanism which subsumes the reduction mechanism of functional languages and the resolution principle of logic languages. Needed narrowing is an optimal evaluation strategy which constitutes the basis of modern (narrowing-based) lazy functional logic languages. In this work, we present the fundamentals of partial evaluation in such languages. We provide correctness results for partial evaluation based on needed narrowing and show that the nice properties of this strategy are essential for the specialization process. In particular, the structure of the original program is preserved by partial evaluation and, thus, the same evaluation strategy can be applied for the execution of specialized programs. This is in contrast to other partial evaluation schemes for lazy functional logic programs which may change the program structure in a negative way. Recent proposals for the partial evaluation of declarative multi-paradigm programs use (some form of) needed narrowing to perform computations at partial evaluation time. Therefore, our results constitute the basis for the correctness of such partial evaluators.<|reference_end|>
arxiv
@article{alpuente2004specialization, title={Specialization of Functional Logic Programs Based on Needed Narrowing}, author={Maria Alpuente, Michael Hanus, Salvador Lucas, German Vidal}, journal={arXiv preprint arXiv:cs/0403011}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403011}, primaryClass={cs.PL} }
alpuente2004specialization
arxiv-671726
cs/0403012
Distributed Control by Lagrangian Steepest Descent
<|reference_start|>Distributed Control by Lagrangian Steepest Descent: Often adaptive, distributed control can be viewed as an iterated game between independent players. The coupling between the players' mixed strategies, arising as the system evolves from one instant to the next, is determined by the system designer. Information theory tells us that the most likely joint strategy of the players, given a value of the expectation of the overall control objective function, is the minimizer of a Lagrangian function of the joint strategy. So the goal of the system designer is to speed evolution of the joint strategy to that Lagrangian minimizing point, lower the expectated value of the control objective function, and repeat. Here we elaborate the theory of algorithms that do this using local descent procedures, and that thereby achieve efficient, adaptive, distributed control.<|reference_end|>
arxiv
@article{wolpert2004distributed, title={Distributed Control by Lagrangian Steepest Descent}, author={David H. Wolpert and Stefan Bieniawski}, journal={arXiv preprint arXiv:cs/0403012}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403012}, primaryClass={cs.MA cs.GT nlin.AO} }
wolpert2004distributed
arxiv-671727
cs/0403013
Predictable Software -- A Shortcut to Dependable Computing ?
<|reference_start|>Predictable Software -- A Shortcut to Dependable Computing ?: Many dependability techniques expect certain behaviors from the underlying subsystems and fail in chaotic ways if these expectations are not met. Under expected circumstances, however, software tends to work quite well. This paper suggests that, instead of fixing elusive bugs or rewriting software, we improve the predictability of conditions faced by our programs. This approach might be a cheaper and faster way to improve dependability of software. After identifying some of the common triggers of unpredictability, the paper describes three engineering principles that hold promise in combating unpredictability, suggests a way to benchmark predictability, and outlines a brief research agenda.<|reference_end|>
arxiv
@article{candea2004predictable, title={Predictable Software -- A Shortcut to Dependable Computing ?}, author={George Candea}, journal={WIP Session, USENIX Technical Conference, Boston, MA, June 2004}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403013}, primaryClass={cs.OS cs.DC} }
candea2004predictable
arxiv-671728
cs/0403014
Search Efficiency in Indexing Structures for Similarity Searching
<|reference_start|>Search Efficiency in Indexing Structures for Similarity Searching: Similarity searching finds application in a wide variety of domains including multilingual databases, computational biology, pattern recognition and text retrieval. Similarity is measured in terms of a distance function, edit distance, in general metric spaces, which is expensive to compute. Indexing techniques can be used reduce the number of distance computations. We present an analysis of various existing similarity indexing structures for the same. The performance obtained using the index structures studied was found to be unsatisfactory . We propose an indexing technique that combines the features of clustering with M tree(MTB) and the results indicate that this gives better performance.<|reference_end|>
arxiv
@article{motwani2004search, title={Search Efficiency in Indexing Structures for Similarity Searching}, author={Girish Motwani, Sandhya G. Nair}, journal={arXiv preprint arXiv:cs/0403014}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403014}, primaryClass={cs.DB} }
motwani2004search
arxiv-671729
cs/0403015
Belle Computing System
<|reference_start|>Belle Computing System: We describe the present status of the computing system in the Belle experiment at the KEKB $e^+e^-$ asymmetric-energy collider. So far, we have logged more than 160 fb$^{-1}$ of data, corresponding to the world's largest data sample of 170M $B\bar{B}$ pairs at the $\Upsilon(4S)$ energy region. A large amount of event data has to be processed to produce an analysis event sample in a timely fashion. In addition, Monte Carlo events have to be created to control systematic errors accurately. This requires stable and efficient usage of computing resources. Here we review our computing model and then describe how we efficiently proceed DST/MC productions in our system.<|reference_end|>
arxiv
@article{adachi2004belle, title={Belle Computing System}, author={Ichiro Adachi, Taisuke Hibino, Luc Hinz, Ryosuke Itoh, Nobu Katayama, Shohei Nishida, Frederic Ronga, Toshifumi Tsukamoto and Masahiko Yokoyama}, journal={Nucl.Instrum.Meth. A534 (2004) 53-58}, year={2004}, doi={10.1016/j.nima.2004.07.058}, archivePrefix={arXiv}, eprint={cs/0403015}, primaryClass={cs.DC} }
adachi2004belle
arxiv-671730
cs/0403016
A Comparative Study of Arithmetic Constraints on Integer Intervals
<|reference_start|>A Comparative Study of Arithmetic Constraints on Integer Intervals: We propose here a number of approaches to implement constraint propagation for arithmetic constraints on integer intervals. To this end we introduce integer interval arithmetic. Each approach is explained using appropriate proof rules that reduce the variable domains. We compare these approaches using a set of benchmarks.<|reference_end|>
arxiv
@article{apt2004a, title={A Comparative Study of Arithmetic Constraints on Integer Intervals}, author={Krzysztof R. Apt and Peter Zoeteweij}, journal={arXiv preprint arXiv:cs/0403016}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403016}, primaryClass={cs.PL cs.AI} }
apt2004a
arxiv-671731
cs/0403017
Extending the SDSS Batch Query System to the National Virtual Observatory Grid
<|reference_start|>Extending the SDSS Batch Query System to the National Virtual Observatory Grid: The Sloan Digital Sky Survey science database is approaching 2TB. While the vast majority of queries normally execute in seconds or minutes, this interactive execution time can be disproportionately increased by a small fraction of queries that take hours or days to run; either because they require non-index scans of the largest tables or because they request very large result sets. In response to this, we added a multi-queue job submission and tracking system. The transfer of very large result sets from queries over the network is another serious problem. Statistics suggested that much of this data transfer is unnecessary; users would prefer to store results locally in order to allow further cross matching and filtering. To allow local analysis, we implemented a system that gives users their own personal database (MyDB) at the portal site. Users may transfer data to their MyDB, and then perform further analysis before extracting it to their own machine. We intend to extend the MyDB and asynchronous query ideas to multiple NVO nodes. This implies development, in a distributed manner, of several features, which have been demonstrated for a single node in the SDSS Batch Query System (CasJobs). The generalization of asynchronous queries necessitates some form of MyDB storage as well as workflow tracking services on each node and coordination strategies among nodes.<|reference_end|>
arxiv
@article{nieto-santisteban2004extending, title={Extending the SDSS Batch Query System to the National Virtual Observatory Grid}, author={Maria A. Nieto-Santisteban, William O'Mullane, Jim Gray, Nolan Li, Tamas Budavari, Alexander S. Szalay, Aniruddha R. Thakar}, journal={arXiv preprint arXiv:cs/0403017}, year={2004}, number={MSR-TR-2004-12}, archivePrefix={arXiv}, eprint={cs/0403017}, primaryClass={cs.DB} }
nieto-santisteban2004extending
arxiv-671732
cs/0403018
The World Wide Telescope: An Archetype for Online Science
<|reference_start|>The World Wide Telescope: An Archetype for Online Science: Most scientific data will never be directly examined by scientists; rather it will be put into online databases where it will be analyzed and summarized by computer programs. Scientists increasingly see their instruments through online scientific archives and analysis tools, rather than examining the raw data. Today this analysis is primarily driven by scientists asking queries, but scientific archives are becoming active databases that self-organize and recognize interesting and anomalous facts as data arrives. In some fields, data from many different archives can be cross-correlated to produce new insights. Astronomy presents an excellent example of these trends; and, federating Astronomy archives presents interesting challenges for computer scientists.<|reference_end|>
arxiv
@article{gray2004the, title={The World Wide Telescope: An Archetype for Online Science}, author={Jim Gray, Alexander S. Szalay}, journal={CACM, V. 45.11, pp. 50-54, Nov. 2002}, year={2004}, number={MSR-TR-2002-75}, archivePrefix={arXiv}, eprint={cs/0403018}, primaryClass={cs.DB} }
gray2004the
arxiv-671733
cs/0403019
Distributed Computing Economics
<|reference_start|>Distributed Computing Economics: Computing economics are changing. Today there is rough price parity between (1) one database access, (2) ten bytes of network traffic, (3) 100,000 instructions, (4) 10 bytes of disk storage, and (5) a megabyte of disk bandwidth. This has implications for how one structures Internet-scale distributed computing: one puts computing as close to the data as possible in order to avoid expensive network traffic.<|reference_end|>
arxiv
@article{gray2004distributed, title={Distributed Computing Economics}, author={Jim Gray}, journal={arXiv preprint arXiv:cs/0403019}, year={2004}, number={MSR-TR-2003-24}, archivePrefix={arXiv}, eprint={cs/0403019}, primaryClass={cs.NI cs.DC} }
gray2004distributed
arxiv-671734
cs/0403020
The Sloan Digital Sky Survey Science Archive: Migrating a Multi-Terabyte Astronomical Archive from Object to Relational DBMS
<|reference_start|>The Sloan Digital Sky Survey Science Archive: Migrating a Multi-Terabyte Astronomical Archive from Object to Relational DBMS: The Sloan Digital Sky Survey Science Archive is the first in a series of multi-Terabyte digital archives in Astronomy and other data-intensive sciences. To facilitate data mining in the SDSS archive, we adapted a commercial database engine and built specialized tools on top of it. Originally we chose an object-oriented database management system due to its data organization capabilities, platform independence, query performance and conceptual fit to the data. However, after using the object database for the first couple of years of the project, it soon began to fall short in terms of its query support and data mining performance. This was as much due to the inability of the database vendor to respond our demands for features and bug fixes as it was due to their failure to keep up with the rapid improvements in hardware performance, particularly faster RAID disk systems. In the end, we were forced to abandon the object database and migrate our data to a relational database. We describe below the technical issues that we faced with the object database and how and why we migrated to relational technology.<|reference_end|>
arxiv
@article{thakar2004the, title={The Sloan Digital Sky Survey Science Archive: Migrating a Multi-Terabyte Astronomical Archive from Object to Relational DBMS}, author={Aniruddha R. Thakar, Alexander S. Szalay, Peter Z. Kunszt, Jim Gray}, journal={Comput.Sci.Eng. 5 (2003) 16-29}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403020}, primaryClass={cs.DB} }
thakar2004the
arxiv-671735
cs/0403021
A Quick Look at SATA Disk Performance
<|reference_start|>A Quick Look at SATA Disk Performance: We have been investigating the use of low-cost, commodity components for multi-terabyte SQL Server databases. Dubbed storage bricks, these servers are white box PCs containing the largest ATA drives, value-priced AMD or Intel processors, and inexpensive ECC memory. One issue has been the wiring mess, air flow problems, length restrictions, and connector failures created by seven or more parallel ATA (PATA) ribbon cables and drives in]a tower or 3U rack-mount chassis. Large capacity Serial ATA (SATA) drives have recently become widely available for the PC environment at a reasonable price. In addition to being faster, the SATA connectors seem more reliable, have a more reasonable length restriction (1m) and allow better airflow. We tested two drive brands along with two RAID controllers to evaluate SATA drive performance and reliablility. This paper documents our results so far.<|reference_end|>
arxiv
@article{barclay2004a, title={A Quick Look at SATA Disk Performance}, author={Tom Barclay, Wyman Chong, Jim Gray}, journal={arXiv preprint arXiv:cs/0403021}, year={2004}, number={MSR-TR-2003-70, Oct. 2003}, archivePrefix={arXiv}, eprint={cs/0403021}, primaryClass={cs.DB cs.PF} }
barclay2004a
arxiv-671736
cs/0403022
Fast Multipoint-Evaluation of Bivariate Polynomials
<|reference_start|>Fast Multipoint-Evaluation of Bivariate Polynomials: We generalize univariate multipoint evaluation of polynomials of degree n at sublinear amortized cost per point. More precisely, it is shown how to evaluate a bivariate polynomial p of maximum degree less than n, specified by its n^2 coefficients, simultaneously at n^2 given points using a total of O(n^{2.667}) arithmetic operations. In terms of the input size N being quadratic in n, this amounts to an amortized cost of O(N^{0.334}) per point.<|reference_end|>
arxiv
@article{nüsken2004fast, title={Fast Multipoint-Evaluation of Bivariate Polynomials}, author={Michael N"usken, Martin Ziegler}, journal={arXiv preprint arXiv:cs/0403022}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403022}, primaryClass={cs.DS} }
nüsken2004fast
arxiv-671737
cs/0403023
Secure Transmission of Sensitive data using multiple channels
<|reference_start|>Secure Transmission of Sensitive data using multiple channels: A new scheme for transmitting sensitive data is proposed, the proposed scheme depends on partitioning the output of a block encryption module using the Chinese Remainder Theorem among a set of channels. The purpose of using the Chinese Remainder Theorem is to hide the cipher text in order to increase the difficulty of attacking the cipher. The theory, implementation and the security of this scheme are described in this paper.<|reference_end|>
arxiv
@article{belal2004secure, title={Secure Transmission of Sensitive data using multiple channels}, author={Ahmed A. Belal, Abdelhamid S. Abdelhamid}, journal={arXiv preprint arXiv:cs/0403023}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403023}, primaryClass={cs.CR} }
belal2004secure
arxiv-671738
cs/0403024
Uniform Proofs of Order Independence for Various Strategy Elimination Procedures
<|reference_start|>Uniform Proofs of Order Independence for Various Strategy Elimination Procedures: We provide elementary and uniform proofs of order independence for various strategy elimination procedures for finite strategic games, both for dominance by pure and by mixed strategies. The proofs follow the same pattern and focus on the structural properties of the dominance relations. They rely on Newman's Lemma established in 1942 and related results on the abstract reduction systems.<|reference_end|>
arxiv
@article{apt2004uniform, title={Uniform Proofs of Order Independence for Various Strategy Elimination Procedures}, author={Krzysztof R. Apt}, journal={Contributions to Theoretical Economics, Vol. 4: No. 1, Article 5, 2004, http://www.bepress.com/bejte/contributions/vol4/iss1/art5}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403024}, primaryClass={cs.GT cs.LO} }
apt2004uniform
arxiv-671739
cs/0403025
Distribution of Mutual Information from Complete and Incomplete Data
<|reference_start|>Distribution of Mutual Information from Complete and Incomplete Data: Mutual information is widely used, in a descriptive way, to measure the stochastic dependence of categorical random variables. In order to address questions such as the reliability of the descriptive value, one must consider sample-to-population inferential approaches. This paper deals with the posterior distribution of mutual information, as obtained in a Bayesian framework by a second-order Dirichlet prior distribution. The exact analytical expression for the mean, and analytical approximations for the variance, skewness and kurtosis are derived. These approximations have a guaranteed accuracy level of the order O(1/n^3), where n is the sample size. Leading order approximations for the mean and the variance are derived in the case of incomplete samples. The derived analytical expressions allow the distribution of mutual information to be approximated reliably and quickly. In fact, the derived expressions can be computed with the same order of complexity needed for descriptive mutual information. This makes the distribution of mutual information become a concrete alternative to descriptive mutual information in many applications which would benefit from moving to the inductive side. Some of these prospective applications are discussed, and one of them, namely feature selection, is shown to perform significantly better when inductive mutual information is used.<|reference_end|>
arxiv
@article{hutter2004distribution, title={Distribution of Mutual Information from Complete and Incomplete Data}, author={Marcus Hutter and Marco Zaffalon}, journal={Computational Statistics & Data Analysis, Vol.48, No.3, March 2005, pages 633--657}, year={2004}, number={IDSIA-11-02}, archivePrefix={arXiv}, eprint={cs/0403025}, primaryClass={cs.LG cs.AI cs.IT math.IT math.ST stat.TH} }
hutter2004distribution
arxiv-671740
cs/0403026
What we should teach, but don't: Proposal for a cross pollinated HCI-SE curriculum
<|reference_start|>What we should teach, but don't: Proposal for a cross pollinated HCI-SE curriculum: Software engineering (SE) and usability engineering (UE), as disciplines, have reached substantial levels of maturity. Each of these two disciplines is now well represented with respect to most computer science (CS) curricula. But, the two disciplines are practiced almost independently - missing oppurtunities to collaborate, coordinate and communicate about the overall design - and thereby contributing to system failures. Today, a confluence of several ingredients contribute to these failures: the increasing importance of the user interface (UI) component in the overall system, the independent maturation of the human computer interaction area, and the lack of a cohesive process model to integrate the UI experts' UE development efforts with that of SE. This in turn, we believe, is a result of a void in computing curricula: a lack of education and training regarding the importance of communication, collaboration and coordination between the SE and UE processes. In this paper we describe the current approach to teaching SE and UE and its shortcomings. We identify and analyze the barriers and issues involved in developing systems having substantial interactive components. We then propose four major themes of learning for a comprehensive computing curriculum integrating SE, UE, and system architectures in a project environment.<|reference_end|>
arxiv
@article{pyla2004what, title={What we should teach, but don't: Proposal for a cross pollinated HCI-SE curriculum}, author={Pardha S. Pyla, Manuel A. Perez-Quinones, James D. Arthur and H. Rex Hartson}, journal={arXiv preprint arXiv:cs/0403026}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403026}, primaryClass={cs.OH} }
pyla2004what
arxiv-671741
cs/0403027
An approach to membrane computing under inexactitude
<|reference_start|>An approach to membrane computing under inexactitude: In this paper we introduce a fuzzy version of symport/antiport membrane systems. Our fuzzy membrane systems handle possibly inexact copies of reactives and their rules are endowed with threshold functions that determine whether a rule can be applied or not to a given set of objects, depending of the degree of accuracy of these objects to the reactives specified in the rule. We prove that these fuzzy membrane systems generate exactly the recursively enumerable finite-valued fuzzy sets of natural numbers.<|reference_end|>
arxiv
@article{casasnovas2004an, title={An approach to membrane computing under inexactitude}, author={Jaume Casasnovas, Joe Miro, Manuel Moya, Francesc Rossello}, journal={arXiv preprint arXiv:cs/0403027}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403027}, primaryClass={cs.OH cs.NE} }
casasnovas2004an
arxiv-671742
cs/0403028
An Application of Rational Trees in a Logic Programming Interpreter for a Procedural Language
<|reference_start|>An Application of Rational Trees in a Logic Programming Interpreter for a Procedural Language: We describe here a simple application of rational trees to the implementation of an interpreter for a procedural language written in a logic programming language. This is possible in languages designed to support rational trees (such as Prolog II and its descendants), but also in traditional Prolog, whose data structures are initially based on Herbrand terms, but in which implementations often omit the occurs check needed to avoid the creation of infinite data structures. We provide code implementing two interpreters, one of which needs non-occurs-check unification, which makes it faster (and more economic). We provide experimental data supporting this, and we argue that rational trees are interesting enough as to receive thorough support inside the language.<|reference_end|>
arxiv
@article{carro2004an, title={An Application of Rational Trees in a Logic Programming Interpreter for a Procedural Language}, author={Manuel Carro}, journal={arXiv preprint arXiv:cs/0403028}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403028}, primaryClass={cs.DS cs.LO} }
carro2004an
arxiv-671743
cs/0403029
Characterization of the Burst Stabilization Protocol for the RR/RR CICQ Switch
<|reference_start|>Characterization of the Burst Stabilization Protocol for the RR/RR CICQ Switch: Input buffered switches with Virtual Output Queueing (VOQ) can be unstable when presented with unbalanced loads. Existing scheduling algorithms, including iSLIP for Input Queued (IQ) switches and Round Robin (RR) for Combined Input and Crossbar Queued (CICQ) switches, exhibit instability for some schedulable loads. We investigate the use of a queue length threshold and bursting mechanism to achieve stability without requiring internal speed-up. An analytical model is developed to prove that the burst stabilization protocol achieves stability and to predict the minimum burst value needed as a function of offered load. The analytical model is shown to have very good agreement with simulation results. These results show the advantage of the RR/RR CICQ switch as a contender for the next generation of high-speed switches.<|reference_end|>
arxiv
@article{gunther2004characterization, title={Characterization of the Burst Stabilization Protocol for the RR/RR CICQ Switch}, author={Neil J. Gunther, Kenneth J. Christensen and Kenji Yoshigoe}, journal={arXiv preprint arXiv:cs/0403029}, year={2004}, doi={10.1109/LCN.2003.1243135}, archivePrefix={arXiv}, eprint={cs/0403029}, primaryClass={cs.NI cs.PF} }
gunther2004characterization
arxiv-671744
cs/0403030
Performance Evaluation of Packet-to-Cell Segmentation Schemes in Input Buffered Packet Switches
<|reference_start|>Performance Evaluation of Packet-to-Cell Segmentation Schemes in Input Buffered Packet Switches: Most input buffered packet switches internally segment variable-length packets into fixed-length cells. The last cell in a segmented packet will contain overhead bytes if the packet length is not evenly divisible by the cell length. Switch speed-up is used to compensate for this overhead. In this paper, we develop an analytical model of a single-server queue where an input stream of packets is segmented into cells for service. Analytical models are developed for M/M/1, M/H2/1, and M/E2/1 queues with a discretized (or quantized) service time. These models and simulation using real packet traces are used to evaluate the effect of speed-up on mean queue length. We propose and evaluate a new method of segmenting a packet trailer and subsequent packet header into a single cell. This cell merging method reduces the required speed-up. No changes to switch-matrix scheduling algorithms are needed. Simulation with a packet trace shows a reduction in the needed speed-up for an iSLIP scheduled input buffered switch.<|reference_end|>
arxiv
@article{christensen2004performance, title={Performance Evaluation of Packet-to-Cell Segmentation Schemes in Input Buffered Packet Switches}, author={K. J. Christensen, K. Yoshigoe, A. Roginsky and N. J. Gunther}, journal={arXiv preprint arXiv:cs/0403030}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403030}, primaryClass={cs.NI cs.PF} }
christensen2004performance
arxiv-671745
cs/0403031
Concept of E-machine: How does a "dynamical" brain learn to process "symbolic" information? Part I
<|reference_start|>Concept of E-machine: How does a "dynamical" brain learn to process "symbolic" information? Part I: The human brain has many remarkable information processing characteristics that deeply puzzle scientists and engineers. Among the most important and the most intriguing of these characteristics are the brain's broad universality as a learning system and its mysterious ability to dynamically change (reconfigure) its behavior depending on a combinatorial number of different contexts. This paper discusses a class of hypothetically brain-like dynamically reconfigurable associative learning systems that shed light on the possible nature of these brain's properties. The systems are arranged on the general principle referred to as the concept of E-machine. The paper addresses the following questions: 1. How can "dynamical" neural networks function as universal programmable "symbolic" machines? 2. What kind of a universal programmable symbolic machine can form arbitrarily complex software in the process of programming similar to the process of biological associative learning? 3. How can a universal learning machine dynamically reconfigure its software depending on a combinatorial number of possible contexts?<|reference_end|>
arxiv
@article{eliashberg2004concept, title={Concept of E-machine: How does a "dynamical" brain learn to process "symbolic" information? Part I}, author={Victor Eliashberg}, journal={arXiv preprint arXiv:cs/0403031}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403031}, primaryClass={cs.AI cs.LG} }
eliashberg2004concept
arxiv-671746
cs/0403032
Where Fail-Safe Default Logics Fail
<|reference_start|>Where Fail-Safe Default Logics Fail: Reiter's original definition of default logic allows for the application of a default that contradicts a previously applied one. We call failure this condition. The possibility of generating failures has been in the past considered as a semantical problem, and variants have been proposed to solve it. We show that it is instead a computational feature that is needed to encode some domains into default logic.<|reference_end|>
arxiv
@article{liberatore2004where, title={Where Fail-Safe Default Logics Fail}, author={Paolo Liberatore}, journal={arXiv preprint arXiv:cs/0403032}, year={2004}, doi={10.1145/1227839.1227842}, archivePrefix={arXiv}, eprint={cs/0403032}, primaryClass={cs.AI cs.LO} }
liberatore2004where
arxiv-671747
cs/0403033
Integrating design synthesis and assembly of structured objects in a visual design language
<|reference_start|>Integrating design synthesis and assembly of structured objects in a visual design language: Computer Aided Design systems provide tools for building and manipulating models of solid objects. Some also provide access to programming languages so that parametrised designs can be expressed. There is a sharp distinction, therefore, between building models, a concrete graphical editing activity, and programming, an abstract, textual, algorithm-construction activity. The recently proposed Language for Structured Design (LSD) was motivated by a desire to combine the design and programming activities in one language. LSD achieves this by extending a visual logic programming language to incorporate the notions of solids and operations on solids. Here we investigate another aspect of the LSD approach; namely, that by using visual logic programming as the engine to drive the parametrised assembly of objects, we also gain the powerful symbolic problem-solving capability that is the forte of logic programming languages. This allows the designer/programmer to work at a higher level, giving declarative specifications of a design in order to obtain the design descriptions. Hence LSD integrates problem solving, design synthesis, and prototype assembly in a single homogeneous programming/design environment. We demonstrate this specification-to-final-assembly capability using the masterkeying problem for designing systems of locks and keys.<|reference_end|>
arxiv
@article{banyasad2004integrating, title={Integrating design synthesis and assembly of structured objects in a visual design language}, author={Omid Banyasad, Philip T. Cox}, journal={arXiv preprint arXiv:cs/0403033}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403033}, primaryClass={cs.LO cs.PL} }
banyasad2004integrating
arxiv-671748
cs/0403034
Phantom Types and Subtyping
<|reference_start|>Phantom Types and Subtyping: We investigate a technique from the literature, called the phantom-types technique, that uses parametric polymorphism, type constraints, and unification of polymorphic types to model a subtyping hierarchy. Hindley-Milner type systems, such as the one found in Standard ML, can be used to enforce the subtyping relation, at least for first-order values. We show that this technique can be used to encode any finite subtyping hierarchy (including hierarchies arising from multiple interface inheritance). We formally demonstrate the suitability of the phantom-types technique for capturing first-order subtyping by exhibiting a type-preserving translation from a simple calculus with bounded polymorphism to a calculus embodying the type system of SML.<|reference_end|>
arxiv
@article{fluet2004phantom, title={Phantom Types and Subtyping}, author={Matthew Fluet, Riccardo Pucella}, journal={arXiv preprint arXiv:cs/0403034}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403034}, primaryClass={cs.PL} }
fluet2004phantom
arxiv-671749
cs/0403035
Web pages search engine based on DNS
<|reference_start|>Web pages search engine based on DNS: Search engine is main access to the largest information source in this world, Internet. Now Internet is changing every aspect of our life. Information retrieval service may be its most important services. But for common user, internet search service is still far from our expectation, too many unrelated search results, old information, etc. To solve these problems, a new system, search engine based on DNS is proposed. The original idea, detailed content and implementation of this system all are introduced in this paper.<|reference_end|>
arxiv
@article{liang2004web, title={Web pages search engine based on DNS}, author={Wang Liang, Guo Yi-Ping, Fang Ming}, journal={arXiv preprint arXiv:cs/0403035}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403035}, primaryClass={cs.NI cs.IR} }
liang2004web
arxiv-671750
cs/0403036
Domain resource integration system
<|reference_start|>Domain resource integration system: Domain Resource Integrated System (DRIS) is introduced in this paper. DRIS is a hierarchical distributed Internet information retrieval system. This system will solve some bottleneck problems such as long update interval, poor coverage in current web search system. DRIS will build the information retrieval infrastructure of Internet, but not a commercial search engine. The protocol series of DRIS are also detailed in this paper.<|reference_end|>
arxiv
@article{liang2004domain, title={Domain resource integration system}, author={Wang Liang, Guo Yi-Ping, Fang Ming}, journal={arXiv preprint arXiv:cs/0403036}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403036}, primaryClass={cs.NI cs.DL} }
liang2004domain
arxiv-671751
cs/0403037
Schedulers and Redundancy for a Class of Constraint Propagation Rules
<|reference_start|>Schedulers and Redundancy for a Class of Constraint Propagation Rules: We study here schedulers for a class of rules that naturally arise in the context of rule-based constraint programming. We systematically derive a scheduler for them from a generic iteration algorithm of [Apt 2000]. We apply this study to so-called membership rules of [Apt and Monfroy 2001]. This leads to an implementation that yields a considerably better performance for these rules than their execution as standard CHR rules. Finally, we show how redundant rules can be identified and how appropriately reduced sets of rules can be computed.<|reference_end|>
arxiv
@article{brand2004schedulers, title={Schedulers and Redundancy for a Class of Constraint Propagation Rules}, author={Sebastian Brand and Krzysztof R. Apt}, journal={arXiv preprint arXiv:cs/0403037}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403037}, primaryClass={cs.DS cs.PL} }
brand2004schedulers
arxiv-671752
cs/0403038
Tournament versus Fitness Uniform Selection
<|reference_start|>Tournament versus Fitness Uniform Selection: In evolutionary algorithms a critical parameter that must be tuned is that of selection pressure. If it is set too low then the rate of convergence towards the optimum is likely to be slow. Alternatively if the selection pressure is set too high the system is likely to become stuck in a local optimum due to a loss of diversity in the population. The recent Fitness Uniform Selection Scheme (FUSS) is a conceptually simple but somewhat radical approach to addressing this problem - rather than biasing the selection towards higher fitness, FUSS biases selection towards sparsely populated fitness levels. In this paper we compare the relative performance of FUSS with the well known tournament selection scheme on a range of problems.<|reference_end|>
arxiv
@article{legg2004tournament, title={Tournament versus Fitness Uniform Selection}, author={Shane Legg and Marcus Hutter and Akshat Kumar}, journal={Proc. 2004 Congress on Evolutionary Computation (CEC-2004), pages 2144--2151}, year={2004}, number={IDSIA-04-04}, archivePrefix={arXiv}, eprint={cs/0403038}, primaryClass={cs.LG cs.AI} }
legg2004tournament
arxiv-671753
cs/0403039
A Flexible Rule Compiler for Speech Synthesis
<|reference_start|>A Flexible Rule Compiler for Speech Synthesis: We present a flexible rule compiler developed for a text-to-speech (TTS) system. The compiler converts a set of rules into a finite-state transducer (FST). The input and output of the FST are subject to parameterization, so that the system can be applied to strings and sequences of feature-structures. The resulting transducer is guaranteed to realize a function (as opposed to a relation), and therefore can be implemented as a deterministic device (either a deterministic FST or a bimachine).<|reference_end|>
arxiv
@article{skut2004a, title={A Flexible Rule Compiler for Speech Synthesis}, author={Wojciech Skut, Stefan Ulrich, Kathrine Hammervold}, journal={In: Klopotek, Mieczyslaw A.; Wierzchon, Slawomir T.; Trojanowski, Krzysztof (Eds.): "Intelligent Information Processing and Web Mining - Proceedings of the International IIS:IIPWM?04 Conference"; Springer Verlag, 2004}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403039}, primaryClass={cs.CL cs.AI} }
skut2004a
arxiv-671754
cs/0403040
Generating connected acyclic digraphs uniformly at random
<|reference_start|>Generating connected acyclic digraphs uniformly at random: We describe a simple algorithm based on a Markov chain process to generate simply connected acyclic directed graphs over a fixed set of vertices. This algorithm is an extension of a previous one, designed to generate acyclic digraphs, non necessarily connected.<|reference_end|>
arxiv
@article{melancon2004generating, title={Generating connected acyclic digraphs uniformly at random}, author={Guy Melancon, Fabrice Philippe}, journal={arXiv preprint arXiv:cs/0403040}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403040}, primaryClass={cs.DM cs.DS} }
melancon2004generating
arxiv-671755
cs/0403041
A Theory of Computation Based on Quantum Logic (I)
<|reference_start|>A Theory of Computation Based on Quantum Logic (I): The (meta)logic underlying classical theory of computation is Boolean (two-valued) logic. Quantum logic was proposed by Birkhoff and von Neumann as a logic of quantum mechanics more than sixty years ago. The major difference between Boolean logic and quantum logic is that the latter does not enjoy distributivity in general. The rapid development of quantum computation in recent years stimulates us to establish a theory of computation based on quantum logic. The present paper is the first step toward such a new theory and it focuses on the simplest models of computation, namely finite automata. It is found that the universal validity of many properties of automata depend heavily upon the distributivity of the underlying logic. This indicates that these properties does not universally hold in the realm of quantum logic. On the other hand, we show that a local validity of them can be recovered by imposing a certain commutativity to the (atomic) statements about the automata under consideration. This reveals an essential difference between the classical theory of computation and the computation theory based on quantum logic.<|reference_end|>
arxiv
@article{ying2004a, title={A Theory of Computation Based on Quantum Logic (I)}, author={Mingsheng Ying}, journal={Theoretical Computer Science 344(2-3): 134-207 (2005)}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403041}, primaryClass={cs.LO} }
ying2004a
arxiv-671756
cs/0403042
Protecting Public-Access Sites Against Distributed Denial-of-Service Attacks
<|reference_start|>Protecting Public-Access Sites Against Distributed Denial-of-Service Attacks: A distributed denial-of-service (DDoS) attack can flood a victim site with malicious traffic, causing service disruption or even complete failure. Public-access sites like amazon or ebay are particularly vulnerable to such attacks, because they have no way of a priori blocking unauthorized traffic. We present Active Internet Traffic Filtering (AITF), a mechanism that protects public-access sites from highly distributed attacks by causing undesired traffic to be blocked as close as possible to its sources. We identify filters as a scarce resource and show that AITF protects a significant amount of the victim's bandwidth, while requiring from each participating router a number of filters that can be accommodated by today's routers. AITF is incrementally deployable, because it offers a substantial benefit even to the first sites that deploy it.<|reference_end|>
arxiv
@article{argyraki2004protecting, title={Protecting Public-Access Sites Against Distributed Denial-of-Service Attacks}, author={Katerina J. Argyraki, David R. Cheriton}, journal={Updated versions in Proc. USENIX Annual Technical Conference, April 2005, and IEEE/ACM Transactions on Networking, 17(4):1284-1297, August 2009}, year={2004}, doi={10.1109/TNET.2008.2007431}, archivePrefix={arXiv}, eprint={cs/0403042}, primaryClass={cs.NI} }
argyraki2004protecting
arxiv-671757
cs/0403043
Stream cipher based on quasigroup string transformations in $Z_p^*$
<|reference_start|>Stream cipher based on quasigroup string transformations in $Z_p^*$: In this paper we design a stream cipher that uses the algebraic structure of the multiplicative group $\bbbz_p^*$ (where p is a big prime number used in ElGamal algorithm), by defining a quasigroup of order $p-1$ and by doing quasigroup string transformations. The cryptographical strength of the proposed stream cipher is based on the fact that breaking it would be at least as hard as solving systems of multivariate polynomial equations modulo big prime number $p$ which is NP-hard problem and there are no known fast randomized or deterministic algorithms for solving it. Unlikely the speed of known ciphers that work in $\bbbz_p^*$ for big prime numbers $p$, the speed of this stream cipher both in encryption and decryption phase is comparable with the fastest symmetric-key stream ciphers.<|reference_end|>
arxiv
@article{gligoroski2004stream, title={Stream cipher based on quasigroup string transformations in $Z_p^*$}, author={Danilo Gligoroski}, journal={arXiv preprint arXiv:cs/0403043}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403043}, primaryClass={cs.CR} }
gligoroski2004stream
arxiv-671758
cs/0403044
Scalable Probabilistic Models for 80211 Protocol Verification
<|reference_start|>Scalable Probabilistic Models for 80211 Protocol Verification: The IEEE 802.11 protocol is a popular standard for wireless local area networks. Its medium access control layer (MAC) is a carrier sense multiple access with collision avoidance (CSMA/CA) design and includes an exponential backoff mechanism that makes it a possible target for probabilistic model checking. In this work, we identify ways to increase the scope of application of probabilistic model checking to the 802.11 MAC. Current techniques do not scale to networks of even moderate size. To work around this problem, we identify properties of the protocol that can be used to simplify the models and make verification feasible. Using these observations, we directly optimize the probabilistic timed automata models while preserving probabilistic reachability measures. We substantiate our claims of significant reduction by our results from using the probabilistic model checker PRISM.<|reference_end|>
arxiv
@article{roy2004scalable, title={Scalable Probabilistic Models for 802.11 Protocol Verification}, author={Amitabha Roy and K. Gopinath}, journal={arXiv preprint arXiv:cs/0403044}, year={2004}, archivePrefix={arXiv}, eprint={cs/0403044}, primaryClass={cs.LO cs.NI} }
roy2004scalable
arxiv-671759
cs/0404001
On the Practicality of Intrinsic Reconfiguration As a Fault Recovery Method in Analog Systems
<|reference_start|>On the Practicality of Intrinsic Reconfiguration As a Fault Recovery Method in Analog Systems: Evolvable hardware combines the powerful search capability of evolutionary algorithms with the flexibility of reprogrammable devices, thereby providing a natural framework for reconfiguration. This framework has generated an interest in using evolvable hardware for fault-tolerant systems because reconfiguration can effectively deal with hardware faults whenever it is impossible to provide spares. But systems cannot tolerate faults indefinitely, which means reconfiguration does have a deadline. The focus of previous evolvable hardware research relating to fault-tolerance has been primarily restricted to restoring functionality, with no real consideration of time constraints. In this paper we are concerned with evolvable hardware performing reconfiguration under deadline constraints. In particular, we investigate reconfigurable hardware that undergoes intrinsic evolution. We show that fault recovery done by intrinsic reconfiguration has some restrictions, which designers cannot ignore.<|reference_end|>
arxiv
@article{greenwood2004on, title={On the Practicality of Intrinsic Reconfiguration As a Fault Recovery Method in Analog Systems}, author={Garrison W. Greenwood}, journal={arXiv preprint arXiv:cs/0404001}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404001}, primaryClass={cs.PF cs.NE} }
greenwood2004on
arxiv-671760
cs/0404002
Mathematical Analysis of Multi-Agent Systems
<|reference_start|>Mathematical Analysis of Multi-Agent Systems: We review existing approaches to mathematical modeling and analysis of multi-agent systems in which complex collective behavior arises out of local interactions between many simple agents. Though the behavior of an individual agent can be considered to be stochastic and unpredictable, the collective behavior of such systems can have a simple probabilistic description. We show that a class of mathematical models that describe the dynamics of collective behavior of multi-agent systems can be written down from the details of the individual agent controller. The models are valid for Markov or memoryless agents, in which each agents future state depends only on its present state and not any of the past states. We illustrate the approach by analyzing in detail applications from the robotics domain: collaboration and foraging in groups of robots.<|reference_end|>
arxiv
@article{lerman2004mathematical, title={Mathematical Analysis of Multi-Agent Systems}, author={Kristina Lerman, Aram Galstyan, Tad Hogg}, journal={arXiv preprint arXiv:cs/0404002}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404002}, primaryClass={cs.RO cs.MA} }
lerman2004mathematical
arxiv-671761
cs/0404003
Enhancing the expressive power of the U-Datalog language
<|reference_start|>Enhancing the expressive power of the U-Datalog language: U-Datalog has been developed with the aim of providing a set-oriented logical update language, guaranteeing update parallelism in the context of a Datalog-like language. In U-Datalog, updates are expressed by introducing constraints (+p(X), to denote insertion, and [minus sign]p(X), to denote deletion) inside Datalog rules. A U-Datalog program can be interpreted as a CLP program. In this framework, a set of updates (constraints) is satisfiable if it does not represent an inconsistent theory, that is, it does not require the insertion and the deletion of the same fact. This approach resembles a very simple form of negation. However, on the other hand, U-Datalog does not provide any mechanism to explicitly deal with negative information, resulting in a language with limited expressive power. In this paper, we provide a semantics, based on stratification, handling the use of negated atoms in U-Datalog programs, and we show which problems arise in defining a compositional semantics.<|reference_end|>
arxiv
@article{bertino2004enhancing, title={Enhancing the expressive power of the U-Datalog language}, author={Elisa Bertino, Barbara Catania, Roberta Gori}, journal={Theory and Practice of Logic Programming, vol. 1, no. 1, 2001}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404003}, primaryClass={cs.DB} }
bertino2004enhancing
arxiv-671762
cs/0404004
Dealing With Curious Players in Secure Networks
<|reference_start|>Dealing With Curious Players in Secure Networks: In secure communications networks there are a great number of user behavioural problems, which need to be dealt with. Curious players pose a very real and serious threat to the integrity of such a network. By traversing a network a Curious player could uncover secret information, which that user has no need to know, by simply posing as a loyalty check. Loyalty checks are done simply to gauge the integrity of the network with respect to players who act in a malicious manner. We wish to propose a method, which can deal with Curious players trying to obtain "Need to Know" information using a combined Fault-tolerant, Cryptographic and Game Theoretic Approach.<|reference_end|>
arxiv
@article{wagner2004dealing, title={Dealing With Curious Players in Secure Networks}, author={Liam Wagner}, journal={arXiv preprint arXiv:cs/0404004}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404004}, primaryClass={cs.CR cs.GT cs.MA} }
wagner2004dealing
arxiv-671763
cs/0404005
Government mandated blocking of foreign Web content
<|reference_start|>Government mandated blocking of foreign Web content: Blocking of foreign Web content by Internet access providers has been a hot topic for the last 18 months in Germany. Since fall 2001 the state of North-Rhine-Westphalia very actively tries to mandate such blocking. This paper will take a technical view on the problems imposed by the blocking orders and blocking content at access or network provider level in general. It will also give some empirical data on the effects of the blocking orders to help in the legal assessment of the orders.<|reference_end|>
arxiv
@article{dornseif2004government, title={Government mandated blocking of foreign Web content}, author={Maximillian Dornseif}, journal={In: Jan von Knop, Wilhelm Haverkamp, Eike Jessen (Editors) Security, E-Learning, E-Services: Proceedings of the 17. DFN-Arbeitstagung ueber Kommunikationsnetze, Duesseldorf 2003, ISBN 3-88579-373-3; Series: Lecture Notes in Informatics ISSN 1617-5468; Pages 617-648}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404005}, primaryClass={cs.CY cs.NI} }
dornseif2004government
arxiv-671764
cs/0404006
Delimited continuations in natural language: quantification and polarity sensitivity
<|reference_start|>Delimited continuations in natural language: quantification and polarity sensitivity: Making a linguistic theory is like making a programming language: one typically devises a type system to delineate the acceptable utterances and a denotational semantics to explain observations on their behavior. Via this connection, the programming language concept of delimited continuations can help analyze natural language phenomena such as quantification and polarity sensitivity. Using a logical metalanguage whose syntax includes control operators and whose semantics involves evaluation order, these analyses can be expressed in direct style rather than continuation-passing style, and these phenomena can be thought of as computational side effects.<|reference_end|>
arxiv
@article{shan2004delimited, title={Delimited continuations in natural language: quantification and polarity sensitivity}, author={Chung-chieh Shan}, journal={In CW'04: Proceedings of the 4th ACM SIGPLAN workshop on continuations, ed. Hayo Thielecke, 55-64. Technical report CSR-04-1, School of Computer Science, University of Birmingham (2004)}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404006}, primaryClass={cs.CL cs.PL} }
shan2004delimited
arxiv-671765
cs/0404007
Polarity sensitivity and evaluation order in type-logical grammar
<|reference_start|>Polarity sensitivity and evaluation order in type-logical grammar: We present a novel, type-logical analysis of_polarity sensitivity_: how negative polarity items (like "any" and "ever") or positive ones (like "some") are licensed or prohibited. It takes not just scopal relations but also linear order into account, using the programming-language notions of delimited continuations and evaluation order, respectively. It thus achieves greater empirical coverage than previous proposals.<|reference_end|>
arxiv
@article{shan2004polarity, title={Polarity sensitivity and evaluation order in type-logical grammar}, author={Chung-chieh Shan}, journal={Proceedings of the 2004 Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404007}, primaryClass={cs.CL} }
shan2004polarity
arxiv-671766
cs/0404008
Efficient dot product over word-size finite fields
<|reference_start|>Efficient dot product over word-size finite fields: We want to achieve efficiency for the exact computation of the dot product of two vectors over word-size finite fields. We therefore compare the practical behaviors of a wide range of implementation techniques using different representations. The techniques used include oating point representations, discrete logarithms, tabulations, Montgomery reduction, delayed modulus.<|reference_end|>
arxiv
@article{dumas2004efficient, title={Efficient dot product over word-size finite fields}, author={Jean-Guillaume Dumas}, journal={arXiv preprint arXiv:cs/0404008}, year={2004}, number={IMAG - LMC RR n 1064 - I}, archivePrefix={arXiv}, eprint={cs/0404008}, primaryClass={cs.SC} }
dumas2004efficient
arxiv-671767
cs/0404009
Tabular Parsing
<|reference_start|>Tabular Parsing: This is a tutorial on tabular parsing, on the basis of tabulation of nondeterministic push-down automata. Discussed are Earley's algorithm, the Cocke-Kasami-Younger algorithm, tabular LR parsing, the construction of parse trees, and further issues.<|reference_end|>
arxiv
@article{nederhof2004tabular, title={Tabular Parsing}, author={Mark-Jan Nederhof, Giorgio Satta}, journal={M.-J. Nederhof and G. Satta. Tabular Parsing. In C. Martin-Vide, V. Mitrana, and G. Paun, editors, Formal Languages and Applications, Studies in Fuzziness and Soft Computing 148, pages 529-549. Springer, 2004}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404009}, primaryClass={cs.CL} }
nederhof2004tabular
arxiv-671768
cs/0404010
On the universality of rank distributions of website popularity
<|reference_start|>On the universality of rank distributions of website popularity: We present an extensive analysis of long-term statistics of the queries to websites using logs collected on several web caches in Russian academic networks and on US IRCache caches. We check the sensitivity of the statistics to several parameters: (1) duration of data collection, (2) geographical location of the cache server collecting data, and (3) the year of data collection. We propose a two-parameter modification of the Zipf law and interpret the parameters. We find that the rank distribution of websites is stable when approximated by the modified Zipf law. We suggest that website popularity may be a universal property of Internet.<|reference_end|>
arxiv
@article{krashakov2004on, title={On the universality of rank distributions of website popularity}, author={Serge A. Krashakov, Anton B. Teslyuk, Lev N. Shchur}, journal={Computer Networks, 50, 1769-1780 (2006)}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404010}, primaryClass={cs.NI cond-mat.stat-mech} }
krashakov2004on
arxiv-671769
cs/0404011
Parametric external predicates for the DLV System
<|reference_start|>Parametric external predicates for the DLV System: This document describes syntax, semantics and implementation guidelines in order to enrich the DLV system with the possibility to make external C function calls. This feature is realized by the introduction of parametric external predicates, whose extension is not specified through a logic program but implicitly computed through external code.<|reference_end|>
arxiv
@article{ianni2004parametric, title={Parametric external predicates for the DLV System}, author={G. Ianni, F. Calimeri, A. Pietramala, M.C. Santoro}, journal={arXiv preprint arXiv:cs/0404011}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404011}, primaryClass={cs.AI} }
ianni2004parametric
arxiv-671770
cs/0404012
Toward the Implementation of Functions in the DLV System (Preliminary Technical Report)
<|reference_start|>Toward the Implementation of Functions in the DLV System (Preliminary Technical Report): This document describes the functions as they are treated in the DLV system. We give first the language, then specify the main implementation issues.<|reference_end|>
arxiv
@article{calimeri2004toward, title={Toward the Implementation of Functions in the DLV System (Preliminary Technical Report)}, author={Francesco Calimeri, Nicola Leone}, journal={arXiv preprint arXiv:cs/0404012}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404012}, primaryClass={cs.AI} }
calimeri2004toward
arxiv-671771
cs/0404013
Tycoon: A Distributed Market-based Resource Allocation System
<|reference_start|>Tycoon: A Distributed Market-based Resource Allocation System: P2P clusters like the Grid and PlanetLab enable in principle the same statistical multiplexing efficiency gains for computing as the Internet provides for networking. The key unsolved problem is resource allocation. Existing solutions are not economically efficient and require high latency to acquire resources. We designed and implemented Tycoon, a market based distributed resource allocation system based on an Auction Share scheduling algorithm. Preliminary results show that Tycoon achieves low latency and high fairness while providing incentives for truth-telling on the part of strategic users.<|reference_end|>
arxiv
@article{lai2004tycoon:, title={Tycoon: A Distributed Market-based Resource Allocation System}, author={Kevin Lai, Bernardo A. Huberman, and Leslie Fine}, journal={arXiv preprint arXiv:cs/0404013}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404013}, primaryClass={cs.DC cs.MA} }
lai2004tycoon:
arxiv-671772
cs/0404014
A Modular and Fault-Tolerant Data Transport Framework
<|reference_start|>A Modular and Fault-Tolerant Data Transport Framework: The High Level Trigger (HLT) of the future ALICE heavy-ion experiment has to reduce its input data rate of up to 25 GB/s to at most 1.25 GB/s for output before the data is written to permanent storage. To cope with these data rates a large PC cluster system is being designed to scale to several 1000 nodes, connected by a fast network. For the software that will run on these nodes a flexible data transport and distribution software framework, described in this thesis, has been developed. The framework consists of a set of separate components, that can be connected via a common interface. This allows to construct different configurations for the HLT, that are even changeable at runtime. To ensure a fault-tolerant operation of the HLT, the framework includes a basic fail-over mechanism that allows to replace whole nodes after a failure. The mechanism will be further expanded in the future, utilizing the runtime reconnection feature of the framework's component interface. To connect cluster nodes a communication class library is used that abstracts from the actual network technology and protocol used to retain flexibility in the hardware choice. It contains already two working prototype versions for the TCP protocol as well as SCI network adapters. Extensions can be added to the library without modifications to other parts of the framework. Extensive tests and measurements have been performed with the framework. Their results as well as conclusions drawn from them are also presented in this thesis. Performance tests show very promising results for the system, indicating that it can fulfill ALICE's requirements concerning the data transport.<|reference_end|>
arxiv
@article{steinbeck2004a, title={A Modular and Fault-Tolerant Data Transport Framework}, author={Timm M. Steinbeck}, journal={arXiv preprint arXiv:cs/0404014}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404014}, primaryClass={cs.DC} }
steinbeck2004a
arxiv-671773
cs/0404015
The study of distributed computing algorithms by multithread applications
<|reference_start|>The study of distributed computing algorithms by multithread applications: The material in this note is used as an introduction to distributed algorithms in a four year course on software and automatic control system in the computer technology department of the Komsomolsk-on-Amur state technical university. All our the program examples are written in Borland C/C++ 5.02 for Windows 95/98/2000/NT/XP, and hence suit to compile and execute by Visual C/C++. We consider the following approaches of the distributed computing: the conversion of recursive algorithms to multithread applications, a realization of the pairing algorithm, the building of wave systems by Petri nets and object oriented programming.<|reference_end|>
arxiv
@article{husainov2004the, title={The study of distributed computing algorithms by multithread applications}, author={Ahmet A. Husainov}, journal={arXiv preprint arXiv:cs/0404015}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404015}, primaryClass={cs.DC} }
husainov2004the
arxiv-671774
cs/0404016
Parrondo's games with chaotic switching
<|reference_start|>Parrondo's games with chaotic switching: This paper investigates the different effects of chaotic switching on Parrondo's games, as compared to random and periodic switching. The rate of winning of Parrondo's games with chaotic switching depends on coefficient(s) defining the chaotic generator, initial conditions of the chaotic sequence and the proportion of Game A played. Maximum rate of winning can be obtained with all the above mentioned factors properly set, and this occurs when chaotic switching approaches periodic behavior.<|reference_end|>
arxiv
@article{tang2004parrondo's, title={Parrondo's games with chaotic switching}, author={T.W. Tang, A. Allison and D. Abbott}, journal={arXiv preprint arXiv:cs/0404016}, year={2004}, doi={10.1117/12.561307}, archivePrefix={arXiv}, eprint={cs/0404016}, primaryClass={cs.GT} }
tang2004parrondo's
arxiv-671775
cs/0404017
Exploring tradeoffs in pleiotropy and redundancy using evolutionary computing
<|reference_start|>Exploring tradeoffs in pleiotropy and redundancy using evolutionary computing: Evolutionary computation algorithms are increasingly being used to solve optimization problems as they have many advantages over traditional optimization algorithms. In this paper we use evolutionary computation to study the trade-off between pleiotropy and redundancy in a client-server based network. Pleiotropy is a term used to describe components that perform multiple tasks, while redundancy refers to multiple components performing one same task. Pleiotropy reduces cost but lacks robustness, while redundancy increases network reliability but is more costly, as together, pleiotropy and redundancy build flexibility and robustness into systems. Therefore it is desirable to have a network that contains a balance between pleiotropy and redundancy. We explore how factors such as link failure probability, repair rates, and the size of the network influence the design choices that we explore using genetic algorithms.<|reference_end|>
arxiv
@article{berryman2004exploring, title={Exploring tradeoffs in pleiotropy and redundancy using evolutionary computing}, author={Matthew J. Berryman, Wei-Li Khoo, Hiep Nguyen, Erin O'Neill, Andrew Allison and Derek Abbott}, journal={Proc. SPIE 5275, BioMEMS and Nanotechnology, Ed. Dan V. Nicolau, Perth, Australia, Dec. 2003, pp49-58}, year={2004}, doi={10.1117/12.548001}, archivePrefix={arXiv}, eprint={cs/0404017}, primaryClass={cs.NE cs.NI} }
berryman2004exploring
arxiv-671776
cs/0404018
NLML--a Markup Language to Describe the Unlimited English Grammar
<|reference_start|>NLML--a Markup Language to Describe the Unlimited English Grammar: In this paper we present NLML (Natural Language Markup Language), a markup language to describe the syntactic and semantic structure of any grammatically correct English expression. At first the related works are analyzed to demonstrate the necessity of the NLML: simple form, easy management and direct storage. Then the description of the English grammar with NLML is introduced in details in three levels: sentences (with different complexities, voices, moods, and tenses), clause (relative clause and noun clause) and phrase (noun phrase, verb phrase, prepositional phrase, adjective phrase, adverb phrase and predicate phrase). At last the application fields of the NLML in NLP are shown with two typical examples: NLOJM (Natural Language Object Modal in Java) and NLDB (Natural Language Database).<|reference_end|>
arxiv
@article{jia2004nlml--a, title={NLML--a Markup Language to Describe the Unlimited English Grammar}, author={Jiyou Jia}, journal={arXiv preprint arXiv:cs/0404018}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404018}, primaryClass={cs.CL cs.AI} }
jia2004nlml--a
arxiv-671777
cs/0404019
Optimizing genetic algorithm strategies for evolving networks
<|reference_start|>Optimizing genetic algorithm strategies for evolving networks: This paper explores the use of genetic algorithms for the design of networks, where the demands on the network fluctuate in time. For varying network constraints, we find the best network using the standard genetic algorithm operators such as inversion, mutation and crossover. We also examine how the choice of genetic algorithm operators affects the quality of the best network found. Such networks typically contain redundancy in servers, where several servers perform the same task and pleiotropy, where servers perform multiple tasks. We explore this trade-off between pleiotropy versus redundancy on the cost versus reliability as a measure of the quality of the network.<|reference_end|>
arxiv
@article{berryman2004optimizing, title={Optimizing genetic algorithm strategies for evolving networks}, author={Matthew J. Berryman, Andrew Allison and Derek Abbott}, journal={arXiv preprint arXiv:cs/0404019}, year={2004}, doi={10.1117/12.548122}, archivePrefix={arXiv}, eprint={cs/0404019}, primaryClass={cs.NE cs.NI} }
berryman2004optimizing
arxiv-671778
cs/0404020
A treatment of higher-order features in logic programming
<|reference_start|>A treatment of higher-order features in logic programming: The logic programming paradigm provides the basis for a new intensional view of higher-order notions. This view is realized primarily by employing the terms of a typed lambda calculus as representational devices and by using a richer form of unification for probing their structures. These additions have important meta-programming applications but they also pose non-trivial implementation problems. One issue concerns the machine representation of lambda terms suitable to their intended use: an adequate encoding must facilitate comparison operations over terms in addition to supporting the usual reduction computation. Another aspect relates to the treatment of a unification operation that has a branching character and that sometimes calls for the delaying of the solution of unification problems. A final issue concerns the execution of goals whose structures become apparent only in the course of computation. These various problems are exposed in this paper and solutions to them are described. A satisfactory representation for lambda terms is developed by exploiting the nameless notation of de Bruijn as well as explicit encodings of substitutions. Special mechanisms are molded into the structure of traditional Prolog implementations to support branching in unification and carrying of unification problems over other computation steps; a premium is placed in this context on exploiting determinism and on emulating usual first-order behaviour. An extended compilation model is presented that treats higher-order unification and also handles dynamically emergent goals. The ideas described here have been employed in the Teyjus implementation of the Lambda Prolog language, a fact that is used to obtain a preliminary assessment of their efficacy.<|reference_end|>
arxiv
@article{nadathur2004a, title={A treatment of higher-order features in logic programming}, author={Gopalan Nadathur}, journal={arXiv preprint arXiv:cs/0404020}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404020}, primaryClass={cs.PL} }
nadathur2004a
arxiv-671779
cs/0404021
Decidability and Universality in Symbolic Dynamical Systems
<|reference_start|>Decidability and Universality in Symbolic Dynamical Systems: Many different definitions of computational universality for various types of dynamical systems have flourished since Turing's work. We propose a general definition of universality that applies to arbitrary discrete time symbolic dynamical systems. Universality of a system is defined as undecidability of a model-checking problem. For Turing machines, counter machines and tag systems, our definition coincides with the classical one. It yields, however, a new definition for cellular automata and subshifts. Our definition is robust with respect to initial condition, which is a desirable feature for physical realizability. We derive necessary conditions for undecidability and universality. For instance, a universal system must have a sensitive point and a proper subsystem. We conjecture that universal systems have infinite number of subsystems. We also discuss the thesis according to which computation should occur at the `edge of chaos' and we exhibit a universal chaotic system.<|reference_end|>
arxiv
@article{delvenne2004decidability, title={Decidability and Universality in Symbolic Dynamical Systems}, author={Jean-Charles Delvenne, Petr Kurka and Vincent Blondel}, journal={arXiv preprint arXiv:cs/0404021}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404021}, primaryClass={cs.CC cs.LO} }
delvenne2004decidability
arxiv-671780
cs/0404022
An Algorithm for Transforming Color Images into Tactile Graphics
<|reference_start|>An Algorithm for Transforming Color Images into Tactile Graphics: This paper presents an algorithm that transforms color visual images, like photographs or paintings, into tactile graphics. In the algorithm, the edges of objects are detected and colors of the objects are estimated. Then, the edges and the colors are encoded into lines and textures in the output tactile image. Design of the method is substantiated by various qualities of haptic recognizing of images. Also, means of presentation of the tactile images in printouts are discussed. Example translated images are shown.<|reference_end|>
arxiv
@article{rataj2004an, title={An Algorithm for Transforming Color Images into Tactile Graphics}, author={Artur Rataj}, journal={arXiv preprint arXiv:cs/0404022}, year={2004}, number={IITiS-20040408-1-1}, archivePrefix={arXiv}, eprint={cs/0404022}, primaryClass={cs.GR} }
rataj2004an
arxiv-671781
cs/0404023
Propositional computability logic I
<|reference_start|>Propositional computability logic I: In the same sense as classical logic is a formal theory of truth, the recently initiated approach called computability logic is a formal theory of computability. It understands (interactive) computational problems as games played by a machine against the environment, their computability as existence of a machine that always wins the game, logical operators as operations on computational problems, and validity of a logical formula as being a scheme of "always computable" problems. The present contribution gives a detailed exposition of a soundness and completeness proof for an axiomatization of one of the most basic fragments of computability logic. The logical vocabulary of this fragment contains operators for the so called parallel and choice operations, and its atoms represent elementary problems, i.e. predicates in the standard sense. This article is self-contained as it explains all relevant concepts. While not technically necessary, however, familiarity with the foundational paper "Introduction to computability logic" [Annals of Pure and Applied Logic 123 (2003), pp.1-99] would greatly help the reader in understanding the philosophy, underlying motivations, potential and utility of computability logic, -- the context that determines the value of the present results. Online introduction to the subject is available at http://www.cis.upenn.edu/~giorgi/cl.html and http://www.csc.villanova.edu/~japaridz/CL/gsoll.html .<|reference_end|>
arxiv
@article{japaridze2004propositional, title={Propositional computability logic I}, author={Giorgi Japaridze}, journal={ACM Transactions on Computational Logic 7 (2006), pp. 302-330}, year={2004}, doi={10.1145/1131313.1131318}, archivePrefix={arXiv}, eprint={cs/0404023}, primaryClass={cs.LO math.LO} }
japaridze2004propositional
arxiv-671782
cs/0404024
Computability Logic: a formal theory of interaction
<|reference_start|>Computability Logic: a formal theory of interaction: Computability logic is a formal theory of (interactive) computability in the same sense as classical logic is a formal theory of truth. This approach was initiated very recently in "Introduction to computability logic" (Annals of Pure and Applied Logic 123 (2003), pp.1-99). The present paper reintroduces computability logic in a more compact and less technical way. It is written in a semitutorial style with a general computer science, logic or mathematics audience in mind. An Internet source on the subject is available at http://www.cis.upenn.edu/~giorgi/cl.html, and additional material at http://www.csc.villanova.edu/~japaridz/CL/gsoll.html .<|reference_end|>
arxiv
@article{japaridze2004computability, title={Computability Logic: a formal theory of interaction}, author={Giorgi Japaridze}, journal={Interactive Computation: The New Paradigm. D.Goldin, S.Smolka and P.Wegner, eds. Springer Verlag, Berlin 2006, pp. 183-223}, year={2004}, doi={10.1007/3-540-34874-3_9}, archivePrefix={arXiv}, eprint={cs/0404024}, primaryClass={cs.LO cs.AI math.LO} }
japaridze2004computability
arxiv-671783
cs/0404025
Test Collections for Patent-to-Patent Retrieval and Patent Map Generation in NTCIR-4 Workshop
<|reference_start|>Test Collections for Patent-to-Patent Retrieval and Patent Map Generation in NTCIR-4 Workshop: This paper describes the Patent Retrieval Task in the Fourth NTCIR Workshop, and the test collections produced in this task. We perform the invalidity search task, in which each participant group searches a patent collection for the patents that can invalidate the demand in an existing claim. We also perform the automatic patent map generation task, in which the patents associated with a specific topic are organized in a multi-dimensional matrix.<|reference_end|>
arxiv
@article{fujii2004test, title={Test Collections for Patent-to-Patent Retrieval and Patent Map Generation in NTCIR-4 Workshop}, author={Atsushi Fujii, Makoto Iwayama, and Noriko Kando}, journal={Proceedings of the 4th International Conference on Language Resources and Evaluation (LREC-2004), pp.1643-1646, May. 2004.}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404025}, primaryClass={cs.CL} }
fujii2004test
arxiv-671784
cs/0404026
DAB Content Annotation and Receiver Hardware Control with XML
<|reference_start|>DAB Content Annotation and Receiver Hardware Control with XML: The Eureka-147 Digital Audio Broadcasting (DAB) standard defines the 'dynamic labels' data field for holding information about the transmission content. However, this information does not follow a well-defined structure since it is designed to carry text for direct output to displays, for human interpretation. This poses a problem when machine interpretation of DAB content information is desired. Extensible Markup Language (XML) was developed to allow for the well-defined, structured machine-to-machine exchange of data over computer networks. This article proposes a novel technique of machine-interpretable DAB content annotation and receiver hardware control, involving the utilisation of XML as metadata in the transmitted DAB frames.<|reference_end|>
arxiv
@article{nathan2004dab, title={DAB Content Annotation and Receiver Hardware Control with XML}, author={Darran Nathan, Eva Rosdiana, Chua Beng Koon}, journal={arXiv preprint arXiv:cs/0404026}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404026}, primaryClass={cs.GL cs.CL} }
nathan2004dab
arxiv-671785
cs/0404027
The Gridbus Toolkit for Service Oriented Grid and Utility Computing: An Overview and Status Report
<|reference_start|>The Gridbus Toolkit for Service Oriented Grid and Utility Computing: An Overview and Status Report: Grids aim at exploiting synergies that result from cooperation of autonomous distributed entities. The synergies that result from grid cooperation include the sharing, exchange, selection, and aggregation of geographically distributed resources such as computers, data bases, software, and scientific instruments for solving large-scale problems in science, engineering, and commerce. For this cooperation to be sustainable, participants need to have economic incentive. Therefore, "incentive" mechanisms should be considered as one of key design parameters of Grid architectures. In this article, we present an overview and status of an open source Grid toolkit, called Gridbus, whose architecture is fundamentally driven by the requirements of Grid economy. Gridbus technologies provide services for both computational and data grids that power the emerging eScience and eBusiness applications.<|reference_end|>
arxiv
@article{buyya2004the, title={The Gridbus Toolkit for Service Oriented Grid and Utility Computing: An Overview and Status Report}, author={Rajkumar Buyya and Srikumar Venugopal}, journal={arXiv preprint arXiv:cs/0404027}, year={2004}, number={Technical Report, GRIDS-TR-2004-2, Grid Computing and Distributed Systems Laboratory, University of Melbourne, Australia, April 2004}, archivePrefix={arXiv}, eprint={cs/0404027}, primaryClass={cs.DC} }
buyya2004the
arxiv-671786
cs/0404028
The Random Buffer Tree : A Randomized Technique for I/O-efficient Algorithms
<|reference_start|>The Random Buffer Tree : A Randomized Technique for I/O-efficient Algorithms: In this paper, we present a probabilistic self-balancing dictionary data structure for massive data sets, and prove expected amortized I/O-optimal bounds on the dictionary operations. We show how to use the structure as an I/O-optimal priority queue. The data structure, which we call as the random buffer tree, abstracts the properties of the random treap and the buffer tree and has the same expected I/O-bounds as the buffer tree.<|reference_end|>
arxiv
@article{dominic2004the, title={The Random Buffer Tree : A Randomized Technique for I/O-efficient Algorithms}, author={Saju Jude Dominic and G. Sajith}, journal={arXiv preprint arXiv:cs/0404028}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404028}, primaryClass={cs.DS} }
dominic2004the
arxiv-671787
cs/0404029
The Effect of Faults on Network Expansion
<|reference_start|>The Effect of Faults on Network Expansion: In this paper we study the problem of how resilient networks are to node faults. Specifically, we investigate the question of how many faults a network can sustain so that it still contains a large (i.e. linear-sized) connected component that still has approximately the same expansion as the original fault-free network. For this we apply a pruning technique which culls away parts of the faulty network which have poor expansion. This technique can be applied to both adversarial faults and to random faults. For adversarial faults we prove that for every network with expansion alpha, a large connected component with basically the same expansion as the original network exists for up to a constant times alpha n faults. This result is tight in the sense that every graph G of size n and uniform expansion alpha(.), i.e. G has an expansion of alpha(n) and every subgraph G' of size m of G has an expansion of O(alpha(m)), can be broken into sublinear components with omega(alpha(n) n) faults. For random faults we observe that the situation is significantly different, because in this case the expansion of a graph only gives a very weak bound on its resilience to random faults. More specifically, there are networks of uniform expansion O(sqrt{n}) that are resilient against a constant fault probability but there are also networks of uniform expansion Omega(1/log n) that are not resilient against a O(1/log n) fault probability. Thus, a different parameter is needed. For this we introduce the span of a graph which allows us to determine the maximum fault probability in a much better way than the expansion can. We use the span to show the first known results for the effect of random faults on the expansion of d-dimensional meshes.<|reference_end|>
arxiv
@article{bagchi2004the, title={The Effect of Faults on Network Expansion}, author={Amitabha Bagchi, Ankur Bhargava, Amitabh Chaudhary, David Eppstein, and Christian Scheideler}, journal={Theor. Comput. Syst. 39(6):903-928. November 2006}, year={2004}, doi={10.1007/s00224-006-1349-0}, archivePrefix={arXiv}, eprint={cs/0404029}, primaryClass={cs.DC cs.DM} }
bagchi2004the
arxiv-671788
cs/0404030
XML framework for concept description and knowledge representation
<|reference_start|>XML framework for concept description and knowledge representation: An XML framework for concept description is given, based upon the fact that the tree structure of XML implies the logical structure of concepts as defined by attributional calculus. Especially, the attribute-value representation is implementable in the XML framework. Since the attribute-value representation is an important way to represent knowledge in AI, the framework offers a further and simpler way than the powerful RDF technology.<|reference_end|>
arxiv
@article{de vries2004xml, title={XML framework for concept description and knowledge representation}, author={Andreas de Vries}, journal={arXiv preprint arXiv:cs/0404030}, year={2004}, doi={10.2370/255_205}, archivePrefix={arXiv}, eprint={cs/0404030}, primaryClass={cs.AI cs.LO} }
de vries2004xml
arxiv-671789
cs/0404031
Characterisations of Intersection Graphs by Vertex Orderings
<|reference_start|>Characterisations of Intersection Graphs by Vertex Orderings: Characterisations of interval graphs, comparability graphs, co-comparability graphs, permutation graphs, and split graphs in terms of linear orderings of the vertex set are presented. As an application, it is proved that interval graphs, co-comparability graphs, AT-free graphs, and split graphs have bandwidth bounded by their maximum degree.<|reference_end|>
arxiv
@article{wood2004characterisations, title={Characterisations of Intersection Graphs by Vertex Orderings}, author={David R. Wood}, journal={Australasian J. Combinatorics 34:261-268, 2006}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404031}, primaryClass={cs.DM} }
wood2004characterisations
arxiv-671790
cs/0404032
When Do Differences Matter? On-Line Feature Extraction Through Cognitive Economy
<|reference_start|>When Do Differences Matter? On-Line Feature Extraction Through Cognitive Economy: For an intelligent agent to be truly autonomous, it must be able to adapt its representation to the requirements of its task as it interacts with the world. Most current approaches to on-line feature extraction are ad hoc; in contrast, this paper presents an algorithm that bases judgments of state compatibility and state-space abstraction on principled criteria derived from the psychological principle of cognitive economy. The algorithm incorporates an active form of Q-learning, and partitions continuous state-spaces by merging and splitting Voronoi regions. The experiments illustrate a new methodology for testing and comparing representations by means of learning curves. Results from the puck-on-a-hill task demonstrate the algorithm's ability to learn effective representations, superior to those produced by some other, well-known, methods.<|reference_end|>
arxiv
@article{finton2004when, title={When Do Differences Matter? On-Line Feature Extraction Through Cognitive Economy}, author={David J. Finton}, journal={arXiv preprint arXiv:cs/0404032}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404032}, primaryClass={cs.LG cs.AI cs.NE} }
finton2004when
arxiv-671791
cs/0404033
The Persistent Buffer Tree : An I/O-efficient Index for Temporal Data
<|reference_start|>The Persistent Buffer Tree : An I/O-efficient Index for Temporal Data: In a variety of applications, we need to keep track of the development of a data set over time. For maintaining and querying this multi version data I/O-efficiently, external memory data structures are required. In this paper, we present a probabilistic self-balancing persistent data structure in external memory called the persistent buffer tree, which supports insertions, updates and deletions of data items at the present version and range queries for any version, past or present. The persistent buffer tree is I/O-optimal in the sense that the expected amortized I/O performance bounds are asymptotically the same as the deterministic amortized bounds of the (single version) buffer tree in the worst case.<|reference_end|>
arxiv
@article{dominic2004the, title={The Persistent Buffer Tree : An I/O-efficient Index for Temporal Data}, author={Saju Jude Dominic and G. Sajith}, journal={arXiv preprint arXiv:cs/0404033}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404033}, primaryClass={cs.GL cs.DB} }
dominic2004the
arxiv-671792
cs/0404034
Propagation by Selective Initialization and Its Application to Numerical Constraint Satisfaction Problems
<|reference_start|>Propagation by Selective Initialization and Its Application to Numerical Constraint Satisfaction Problems: Numerical analysis has no satisfactory method for the more realistic optimization models. However, with constraint programming one can compute a cover for the solution set to arbitrarily close approximation. Because the use of constraint propagation for composite arithmetic expressions is computationally expensive, consistency is computed with interval arithmetic. In this paper we present theorems that support, selective initialization, a simple modification of constraint propagation that allows composite arithmetic expressions to be handled efficiently.<|reference_end|>
arxiv
@article{van emden2004propagation, title={Propagation by Selective Initialization and Its Application to Numerical Constraint Satisfaction Problems}, author={M.H. van Emden and B. Moa}, journal={arXiv preprint arXiv:cs/0404034}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404034}, primaryClass={cs.NA} }
van emden2004propagation
arxiv-671793
cs/0404035
Elements for Response Time Statistics in ERP Transaction Systems
<|reference_start|>Elements for Response Time Statistics in ERP Transaction Systems: We present some measurements and ideas for response time statistics in ERP systems. It is shown that the response time distribution of a given transaction in a given system is generically a log-normal distribution or, in some situations, a sum of two or more log-normal distributions. We present some arguments for this form of the distribution based on heuristic rules for response times, and we show data from performance measurements in actual systems to support the log-normal form. Deviations of the log-normal form can often be traced back to performance problems in the system. Consequences for the interpretation of response time data and for service level agreements are discussed.<|reference_end|>
arxiv
@article{mielke2004elements, title={Elements for Response Time Statistics in ERP Transaction Systems}, author={Andreas Mielke}, journal={Performance Evaluation 64, 635-653 (2006)}, year={2004}, doi={10.1016/j.peva.2005.05.006}, archivePrefix={arXiv}, eprint={cs/0404035}, primaryClass={cs.PF} }
mielke2004elements
arxiv-671794
cs/0404036
Online Searching with an Autonomous Robot
<|reference_start|>Online Searching with an Autonomous Robot: We discuss online strategies for visibility-based searching for an object hidden behind a corner, using Kurt3D, a real autonomous mobile robot. This task is closely related to a number of well-studied problems. Our robot uses a three-dimensional laser scanner in a stop, scan, plan, go fashion for building a virtual three-dimensional environment. Besides planning trajectories and avoiding obstacles, Kurt3D is capable of identifying objects like a chair. We derive a practically useful and asymptotically optimal strategy that guarantees a competitive ratio of 2, which differs remarkably from the well-studied scenario without the need of stopping for surveying the environment. Our strategy is used by Kurt3D, documented in a separate video.<|reference_end|>
arxiv
@article{fekete2004online, title={Online Searching with an Autonomous Robot}, author={Sandor P. Fekete and Rolf Klein and Andreas Nuechter}, journal={arXiv preprint arXiv:cs/0404036}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404036}, primaryClass={cs.RO cs.DS} }
fekete2004online
arxiv-671795
cs/0404037
Model-checking Driven Black-box Testing Algorithms for Systems with Unspecified Components
<|reference_start|>Model-checking Driven Black-box Testing Algorithms for Systems with Unspecified Components: Component-based software development has posed a serious challenge to system verification since externally-obtained components could be a new source of system failures. This issue can not be completely solved by either model-checking or traditional software testing techniques alone due to several reasons: 1) externally obtained components are usually unspecified/partially specified; 2)it is generally difficult to establish an adequacy criteria for testing a component; 3)components may be used to dynamically upgrade a system. This paper introduces a new approach (called {\em model-checking driven black-box testing}) that combines model-checking with traditional black-box software testing to tackle the problem in a complete, sound, and automatic way. The idea is to, with respect to some requirement (expressed in CTL or LTL) about the system, use model-checking techniques to derive a condition (expressed in communication graphs) for an unspecified component such that the system satisfies the requirement iff the condition is satisfied by the component, and which can be established by testing the component with test cases generated from the condition on-the-fly. In this paper, we present model-checking driven black-box testing algorithms to handle both CTL and LTL requirements. We also illustrate the idea through some examples.<|reference_end|>
arxiv
@article{xie2004model-checking, title={Model-checking Driven Black-box Testing Algorithms for Systems with Unspecified Components}, author={Gaoyan Xie and Zhe Dang}, journal={arXiv preprint arXiv:cs/0404037}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404037}, primaryClass={cs.SE cs.LO} }
xie2004model-checking
arxiv-671796
cs/0404038
2-Sat Sub-Clauses and the Hypernodal Structure of the 3-Sat Problem
<|reference_start|>2-Sat Sub-Clauses and the Hypernodal Structure of the 3-Sat Problem: Like simpler graphs, nested (hypernodal) graphs consist of two components: a set of nodes and a set of edges, where each edge connects a pair of nodes. In the hypernodal graph model, however, a node may contain other graphs, so that a node may be contained in a graph that it contains. The inherently recursive structure of the hypernodal graph model aptly characterizes both the structure and dynamic of the 3-sat problem, a broadly applicable, though intractable, computer science problem. In this paper I first discuss the structure of the 3-sat problem, analyzing the relation of 3-sat to 2-sat, a related, though tractable problem. I then discuss sub-clauses and sub-clause thresholds and the transformation of sub-clauses into implication graphs, demonstrating how combinations of implication graphs are equivalent to hypernodal graphs. I conclude with a brief discussion of the use of hypernodal graphs to model the 3-sat problem, illustrating how hypernodal graphs model both the conditions for satisfiability and the process by which particular 3-sat assignments either succeed or fail.<|reference_end|>
arxiv
@article{powell20042-sat, title={2-Sat Sub-Clauses and the Hypernodal Structure of the 3-Sat Problem}, author={D. B. Powell}, journal={arXiv preprint arXiv:cs/0404038}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404038}, primaryClass={cs.CC cs.AI} }
powell20042-sat
arxiv-671797
cs/0404039
Algorithms for Estimating Information Distance with Application to Bioinformatics and Linguistics
<|reference_start|>Algorithms for Estimating Information Distance with Application to Bioinformatics and Linguistics: After reviewing unnormalized and normalized information distances based on incomputable notions of Kolmogorov complexity, we discuss how Kolmogorov complexity can be approximated by data compression algorithms. We argue that optimal algorithms for data compression with side information can be successfully used to approximate the normalized distance. Next, we discuss an alternative information distance, which is based on relative entropy rate (also known as Kullback-Leibler divergence), and compression-based algorithms for its estimation. Based on available biological and linguistic data, we arrive to unexpected conclusion that in Bioinformatics and Computational Linguistics this alternative distance is more relevant and important than the ones based on Kolmogorov complexity.<|reference_end|>
arxiv
@article{kaltchenko2004algorithms, title={Algorithms for Estimating Information Distance with Application to Bioinformatics and Linguistics}, author={Alexei Kaltchenko}, journal={arXiv preprint arXiv:cs/0404039}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404039}, primaryClass={cs.CC cs.CE q-bio.GN} }
kaltchenko2004algorithms
arxiv-671798
cs/0404040
Near Rationality and Competitive Equilibria in Networked Systems
<|reference_start|>Near Rationality and Competitive Equilibria in Networked Systems: A growing body of literature in networked systems research relies on game theory and mechanism design to model and address the potential lack of cooperation between self-interested users. Most game-theoretic models applied to system research only describe competitive equilibria in terms of pure Nash equilibria, that is, a situation where the strategy of each user is deterministic, and is her best response to the strategies of all the other users. However, the assumptions necessary for a pure Nash equilibrium to hold may be too stringent for practical systems. Using three case studies on computer security, TCP congestion control, and network formation, we outline the limits of game-theoretic models relying on Nash equilibria, and we argue that considering competitive equilibria of a more general form may help reconcile predictions from game-theoretic models with empirically observed behavior.<|reference_end|>
arxiv
@article{christin2004near, title={Near Rationality and Competitive Equilibria in Networked Systems}, author={Nicolas Christin, Jens Grossklags and John Chuang}, journal={arXiv preprint arXiv:cs/0404040}, year={2004}, number={p2pecon TR-2004-04-CGC}, archivePrefix={arXiv}, eprint={cs/0404040}, primaryClass={cs.GT cs.NI} }
christin2004near
arxiv-671799
cs/0404041
NLOMJ--Natural Language Object Model in Java
<|reference_start|>NLOMJ--Natural Language Object Model in Java: In this paper we present NLOMJ--a natural language object model in Java with English as the experiment language. This modal describes the grammar elements of any permissible expression in a natural language and their complicated relations with each other with the concept "Object" in OOP(Object Oriented Programming). Directly mapped to the syntax and semantics of the natural language, it can be used in information retrieval as a linguistic method. Around the UML diagram of the NLOMJ the important classes(Sentence, Clause and Phrase) and their sub classes are introduced and their syntactic and semantic meanings are explained.<|reference_end|>
arxiv
@article{jia2004nlomj--natural, title={NLOMJ--Natural Language Object Model in Java}, author={Jiyou Jia}, journal={arXiv preprint arXiv:cs/0404041}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404041}, primaryClass={cs.CL cs.PL} }
jia2004nlomj--natural
arxiv-671800
cs/0404042
Extraction of topological features from communication network topological patterns using self-organizing feature maps
<|reference_start|>Extraction of topological features from communication network topological patterns using self-organizing feature maps: Different classes of communication network topologies and their representation in the form of adjacency matrix and its eigenvalues are presented. A self-organizing feature map neural network is used to map different classes of communication network topological patterns. The neural network simulation results are reported.<|reference_end|>
arxiv
@article{ali2004extraction, title={Extraction of topological features from communication network topological patterns using self-organizing feature maps}, author={W. Ali, R.J. Mondragon and F. Alavi}, journal={arXiv preprint arXiv:cs/0404042}, year={2004}, archivePrefix={arXiv}, eprint={cs/0404042}, primaryClass={cs.NE cs.CV} }
ali2004extraction