corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-673401
cs/0510030
A Near Maximum Likelihood Decoding Algorithm for MIMO Systems Based on Semi-Definite Programming
<|reference_start|>A Near Maximum Likelihood Decoding Algorithm for MIMO Systems Based on Semi-Definite Programming: In Multi-Input Multi-Output (MIMO) systems, Maximum-Likelihood (ML) decoding is equivalent to finding the closest lattice point in an N-dimensional complex space. In general, this problem is known to be NP hard. In this paper, we propose a quasi-maximum likelihood algorithm based on Semi-Definite Programming (SDP). We introduce several SDP relaxation models for MIMO systems, with increasing complexity. We use interior-point methods for solving the models and obtain a near-ML performance with polynomial computational complexity. Lattice basis reduction is applied to further reduce the computational complexity of solving these models. The proposed relaxation models are also used for soft output decoding in MIMO systems.<|reference_end|>
arxiv
@article{mobasher2005a, title={A Near Maximum Likelihood Decoding Algorithm for MIMO Systems Based on Semi-Definite Programming}, author={Amin Mobasher, Mahmoud Taherzadeh, Renata Sotirov, and Amir K. Khandani}, journal={arXiv preprint arXiv:cs/0510030}, year={2005}, number={UW-E&CE#2005-12}, archivePrefix={arXiv}, eprint={cs/0510030}, primaryClass={cs.IT math.IT} }
mobasher2005a
arxiv-673402
cs/0510031
Computing Pure Nash Equilibria via Markov Random Fields
<|reference_start|>Computing Pure Nash Equilibria via Markov Random Fields: In this paper we present a novel generic mapping between Graphical Games and Markov Random Fields so that pure Nash equilibria in the former can be found by statistical inference on the latter. Thus, the problem of deciding whether a graphical game has a pure Nash equilibrium, a well-known intractable problem, can be attacked by well-established algorithms such as Belief Propagation, Junction Trees, Markov Chain Monte Carlo and Simulated Annealing. Large classes of graphical games become thus tractable, including all classes already known, but also new classes such as the games with O(log n) treewidth.<|reference_end|>
arxiv
@article{daskalakis2005computing, title={Computing Pure Nash Equilibria via Markov Random Fields}, author={Constantinos Daskalakis}, journal={arXiv preprint arXiv:cs/0510031}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510031}, primaryClass={cs.GT} }
daskalakis2005computing
arxiv-673403
cs/0510032
Polar Polytopes and Recovery of Sparse Representations
<|reference_start|>Polar Polytopes and Recovery of Sparse Representations: Suppose we have a signal y which we wish to represent using a linear combination of a number of basis atoms a_i, y=sum_i x_i a_i = Ax. The problem of finding the minimum L0 norm representation for y is a hard problem. The Basis Pursuit (BP) approach proposes to find the minimum L1 norm representation instead, which corresponds to a linear program (LP) that can be solved using modern LP techniques, and several recent authors have given conditions for the BP (minimum L1 norm) and sparse (minimum L0 solutions) representations to be identical. In this paper, we explore this sparse representation problem} using the geometry of convex polytopes, as recently introduced into the field by Donoho. By considering the dual LP we find that the so-called polar polytope P of the centrally-symmetric polytope P whose vertices are the atom pairs +-a_i is particularly helpful in providing us with geometrical insight into optimality conditions given by Fuchs and Tropp for non-unit-norm atom sets. In exploring this geometry we are able to tighten some of these earlier results, showing for example that the Fuchs condition is both necessary and sufficient for L1-unique-optimality, and that there are situations where Orthogonal Matching Pursuit (OMP) can eventually find all L1-unique-optimal solutions with m nonzeros even if ERC fails for m, if allowed to run for more than m steps.<|reference_end|>
arxiv
@article{plumbley2005polar, title={Polar Polytopes and Recovery of Sparse Representations}, author={Mark D. Plumbley}, journal={On Polar Polytopes and the Recovery of Sparse Representations. IEEE Trans. Inf. Theory 53(9), 3188-3195, 2007}, year={2005}, doi={10.1109/TIT.2007.903129}, archivePrefix={arXiv}, eprint={cs/0510032}, primaryClass={cs.IT math.IT} }
plumbley2005polar
arxiv-673404
cs/0510033
Coding for the Optical Channel: the Ghost-Pulse Constraint
<|reference_start|>Coding for the Optical Channel: the Ghost-Pulse Constraint: We consider a number of constrained coding techniques that can be used to mitigate a nonlinear effect in the optical fiber channel that causes the formation of spurious pulses, called ``ghost pulses.'' Specifically, if $b_1 b_2 ... b_{n}$ is a sequence of bits sent across an optical channel, such that $b_k=b_l=b_m=1$ for some $k,l,m$ (not necessarily all distinct) but $b_{k+l-m} = 0$, then the ghost-pulse effect causes $b_{k+l-m}$ to change to 1, thereby creating an error. We design and analyze several coding schemes using binary and ternary sequences constrained so as to avoid patterns that give rise to ghost pulses. We also discuss the design of encoders and decoders for these coding schemes.<|reference_end|>
arxiv
@article{kashyap2005coding, title={Coding for the Optical Channel: the Ghost-Pulse Constraint}, author={Navin Kashyap, Paul H. Siegel and Alexander Vardy}, journal={arXiv preprint arXiv:cs/0510033}, year={2005}, doi={10.1109/TIT.2005.860409}, archivePrefix={arXiv}, eprint={cs/0510033}, primaryClass={cs.IT cs.DM math.IT} }
kashyap2005coding
arxiv-673405
cs/0510034
COMODI: On the Graphical User Interface
<|reference_start|>COMODI: On the Graphical User Interface: We propose a series of features for the graphical user interface (GUI) of the COmputational MOdule Integrator (COMODI) \cite{Synasc05a}\cite{COMODI}. In view of the special requirements that a COMODI type of framework for scientific computing imposes and inspiring from existing solutions that provide advanced graphical visual programming environments, we identify those elements and associated behaviors that will have to find their way into the first release of COMODI.<|reference_end|>
arxiv
@article{lázár2005comodi:, title={COMODI: On the Graphical User Interface}, author={Zsolt I. L'az'ar, Andreea Fanea, Dragoc{s} Petrac{s}cu, Vladiela Ciobotariu-Boer and Bazil P^arv}, journal={arXiv preprint arXiv:cs/0510034}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510034}, primaryClass={cs.HC cs.CE cs.MS} }
lázár2005comodi:
arxiv-673406
cs/0510035
Design and Performance Analysis of a New Class of Rate Compatible Serial Concatenated Convolutional Codes
<|reference_start|>Design and Performance Analysis of a New Class of Rate Compatible Serial Concatenated Convolutional Codes: In this paper, we provide a performance analysis of a new class of serial concatenated convolutional codes (SCCC) where the inner encoder can be punctured beyond the unitary rate. The puncturing of the inner encoder is not limited to inner coded bits, but extended to systematic bits. Moreover, it is split into two different puncturings, in correspondence with inner code systematic bits and parity bits. We derive the analytical upper bounds to the error probability of this particular code structure and address suitable design guidelines for the inner code puncturing patterns. We show that the percentile of systematic and parity bits to be deleted strongly depends on the SNR region of interest. In particular, to lower the error floor it is advantageous to put more puncturing on inner systematic bits. Furthermore, we show that puncturing of inner systematic bits should be interleaver dependent. Based on these considerations, we derive design guidelines to obtain well-performing rate-compatible SCCC families. Throughout the paper, the performance of the proposed codes are compared with analytical bounds, and with the performance of PCCC and SCCC proposed in the literature.<|reference_end|>
arxiv
@article{amat2005design, title={Design and Performance Analysis of a New Class of Rate Compatible Serial Concatenated Convolutional Codes}, author={Alexandre Graell i Amat, Guido Montorsi, Francesca Vatta}, journal={arXiv preprint arXiv:cs/0510035}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510035}, primaryClass={cs.IT math.IT} }
amat2005design
arxiv-673407
cs/0510036
Semantic Optimization Techniques for Preference Queries
<|reference_start|>Semantic Optimization Techniques for Preference Queries: Preference queries are relational algebra or SQL queries that contain occurrences of the winnow operator ("find the most preferred tuples in a given relation"). Such queries are parameterized by specific preference relations. Semantic optimization techniques make use of integrity constraints holding in the database. In the context of semantic optimization of preference queries, we identify two fundamental properties: containment of preference relations relative to integrity constraints and satisfaction of order axioms relative to integrity constraints. We show numerous applications of those notions to preference query evaluation and optimization. As integrity constraints, we consider constraint-generating dependencies, a class generalizing functional dependencies. We demonstrate that the problems of containment and satisfaction of order axioms can be captured as specific instances of constraint-generating dependency entailment. This makes it possible to formulate necessary and sufficient conditions for the applicability of our techniques as constraint validity problems. We characterize the computational complexity of such problems.<|reference_end|>
arxiv
@article{chomicki2005semantic, title={Semantic Optimization Techniques for Preference Queries}, author={Jan Chomicki}, journal={arXiv preprint arXiv:cs/0510036}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510036}, primaryClass={cs.DB cs.AI cs.LO} }
chomicki2005semantic
arxiv-673408
cs/0510037
Hi\'erarchisation des r\`egles d'association en fouille de textes
<|reference_start|>Hi\'erarchisation des r\`egles d'association en fouille de textes: Extraction of association rules is widely used as a data mining method. However, one of the limit of this approach comes from the large number of extracted rules and the difficulty for a human expert to deal with the totality of these rules. We propose to solve this problem by structuring the set of rules into hierarchy. The expert can then therefore explore the rules, access from one rule to another one more general when we raise up in the hierarchy, and in other hand, or a more specific rules. Rules are structured at two levels. The global level aims at building a hierarchy from the set of rules extracted. Thus we define a first type of rule-subsomption relying on Galois lattices. The second level consists in a local and more detailed analysis of each rule. It generate for a given rule a set of generalization rules structured into a local hierarchy. This leads to the definition of a second type of subsomption. This subsomption comes from inductive logic programming and integrates a terminological model.<|reference_end|>
arxiv
@article{bendaoud2005hi\'{e}rarchisation, title={Hi\'{e}rarchisation des r\`{e}gles d'association en fouille de textes}, author={Rokia Bendaoud (INRIA Lorraine - LORIA), Yannick Toussaint (INRIA Lorraine - LORIA), Amedeo Napoli (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0510037}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510037}, primaryClass={cs.IR cs.AI} }
bendaoud2005hi\'{e}rarchisation
arxiv-673409
cs/0510038
Learning Unions of $\omega(1)$-Dimensional Rectangles
<|reference_start|>Learning Unions of $\omega(1)$-Dimensional Rectangles: We consider the problem of learning unions of rectangles over the domain $[b]^n$, in the uniform distribution membership query learning setting, where both b and n are "large". We obtain poly$(n, \log b)$-time algorithms for the following classes: - poly$(n \log b)$-way Majority of $O(\frac{\log(n \log b)} {\log \log(n \log b)})$-dimensional rectangles. - Union of poly$(\log(n \log b))$ many $O(\frac{\log^2 (n \log b)} {(\log \log(n \log b) \log \log \log (n \log b))^2})$-dimensional rectangles. - poly$(n \log b)$-way Majority of poly$(n \log b)$-Or of disjoint $O(\frac{\log(n \log b)} {\log \log(n \log b)})$-dimensional rectangles. Our main algorithmic tool is an extension of Jackson's boosting- and Fourier-based Harmonic Sieve algorithm [Jackson 1997] to the domain $[b]^n$, building on work of [Akavia, Goldwasser, Safra 2003]. Other ingredients used to obtain the results stated above are techniques from exact learning [Beimel, Kushilevitz 1998] and ideas from recent work on learning augmented $AC^{0}$ circuits [Jackson, Klivans, Servedio 2002] and on representing Boolean functions as thresholds of parities [Klivans, Servedio 2001].<|reference_end|>
arxiv
@article{atici2005learning, title={Learning Unions of $\omega(1)$-Dimensional Rectangles}, author={Alp Atici and Rocco A. Servedio}, journal={Theoretical Computer Science, Vol. 405, No. 3, 209--222 (2008)}, year={2005}, doi={10.1016/j.tcs.2008.06.036}, archivePrefix={arXiv}, eprint={cs/0510038}, primaryClass={cs.LG} }
atici2005learning
arxiv-673410
cs/0510039
DyNoC: A Dynamic Infrastructure for Communication in Dynamically Reconfigurable Devices
<|reference_start|>DyNoC: A Dynamic Infrastructure for Communication in Dynamically Reconfigurable Devices: A new paradigm to support the communication among modules dynamically placed on a reconfigurable device at run-time is presented. Based on the network on chip (NoC) infrastructure, we developed a dynamic communication infrastructure as well as routing methodologies capable to handle routing in a NoC with obstacles created by dynamically placed components. We prove the unrestricted reachability of components and pins, the deadlock-freeness and we finally show the feasibility of our approach by means on real life example applications.<|reference_end|>
arxiv
@article{bobda2005dynoc:, title={DyNoC: A Dynamic Infrastructure for Communication in Dynamically Reconfigurable Devices}, author={Christophe Bobda, Ali Ahmadinia, Mateusz Majer, Juergen Teich, Sandor P. Fekete, Jan van der Veen}, journal={arXiv preprint arXiv:cs/0510039}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510039}, primaryClass={cs.AR} }
bobda2005dynoc:
arxiv-673411
cs/0510040
The "system of constraints"
<|reference_start|>The "system of constraints": This paper proposes that the mathematical relationship between an entropy distribution and its limit offers some new insight into system performance. This relationship is used to quantify variation among the entities of a system, where variation is defined as tolerance, option, specification or implementation variation among the entities of a system. Variation has a significnt and increasing impact on communications system performance. This paper introduces means to identify, quantify and reduce such performance variations.<|reference_end|>
arxiv
@article{krechmer2005the, title={The "...system of constraints"}, author={Ken Krechmer}, journal={arXiv preprint arXiv:cs/0510040}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510040}, primaryClass={cs.IT math.IT} }
krechmer2005the
arxiv-673412
cs/0510041
Feynman graphs and related Hopf algebras
<|reference_start|>Feynman graphs and related Hopf algebras: In a recent series of communications we have shown that the reordering problem of bosons leads to certain combinatorial structures. These structures may be associated with a certain graphical description. In this paper, we show that there is a Hopf Algebra structure associated with this problem which is, in a certain sense, unique.<|reference_end|>
arxiv
@article{duchamp2005feynman, title={Feynman graphs and related Hopf algebras}, author={G'erard Henry Edmond Duchamp (LIPN), Pawel Blasiak (LPTL), Andrzej Horzela (LPTL), Karol A. Penson (LPTL), Allan I. Solomon (LPTL)}, journal={arXiv preprint arXiv:cs/0510041}, year={2005}, doi={10.1088/1742-6596/30/1/014}, archivePrefix={arXiv}, eprint={cs/0510041}, primaryClass={cs.SC cs.DM math-ph math.CO math.MP quant-ph} }
duchamp2005feynman
arxiv-673413
cs/0510042
Secure and \sl Practical Identity-Based Encryption
<|reference_start|>Secure and \sl Practical Identity-Based Encryption: In this paper, we present a variant of Waters' Identity-Based Encryption scheme with a much smaller public-key size (only a few kilobytes). We show that this variant is semantically secure against passive adversaries in the standard model.\smallskip In essence, the new scheme divides Waters' public key size by a factor $\ell$ at the cost of (negligibly) reducing security by $\ell$ bits. Therefore, our construction settles an open question asked by Waters and constitutes the first fully secure {\sl practical} Identity-Based Encryption scheme<|reference_end|>
arxiv
@article{naccache2005secure, title={Secure and {\sl Practical} Identity-Based Encryption}, author={David Naccache}, journal={arXiv preprint arXiv:cs/0510042}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510042}, primaryClass={cs.CR} }
naccache2005secure
arxiv-673414
cs/0510043
On Minimal Pseudo-Codewords of Tanner Graphs from Projective Planes
<|reference_start|>On Minimal Pseudo-Codewords of Tanner Graphs from Projective Planes: We would like to better understand the fundamental cone of Tanner graphs derived from finite projective planes. Towards this goal, we discuss bounds on the AWGNC and BSC pseudo-weight of minimal pseudo-codewords of such Tanner graphs, on one hand, and study the structure of minimal pseudo-codewords, on the other.<|reference_end|>
arxiv
@article{vontobel2005on, title={On Minimal Pseudo-Codewords of Tanner Graphs from Projective Planes}, author={Pascal O. Vontobel, Roxana Smarandache}, journal={Proc. 43rd Allerton Conference on Communications, Control, and Computing, Allerton House, Monticello, Illinois, USA, Sept. 28-30, 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510043}, primaryClass={cs.IT cs.DM math.IT} }
vontobel2005on
arxiv-673415
cs/0510044
Belief Propagation Based Multi--User Detection
<|reference_start|>Belief Propagation Based Multi--User Detection: We apply belief propagation (BP) to multi--user detection in a spread spectrum system, under the assumption of Gaussian symbols. We prove that BP is both convergent and allows to estimate the correct conditional expectation of the input symbols. It is therefore an optimal --minimum mean square error-- detection algorithm. This suggests the possibility of designing BP detection algorithms for more general systems. As a byproduct we rederive the Tse-Hanly formula for minimum mean square error without any recourse to random matrix theory.<|reference_end|>
arxiv
@article{montanari2005belief, title={Belief Propagation Based Multi--User Detection}, author={Andrea Montanari, Balaji Prabhakar, David Tse}, journal={arXiv preprint arXiv:cs/0510044}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510044}, primaryClass={cs.IT math.IT} }
montanari2005belief
arxiv-673416
cs/0510045
Why We Can Not Surpass Capacity: The Matching Condition
<|reference_start|>Why We Can Not Surpass Capacity: The Matching Condition: We show that iterative coding systems can not surpass capacity using only quantities which naturally appear in density evolution. Although the result in itself is trivial, the method which we apply shows that in order to achieve capacity the various components in an iterative coding system have to be perfectly matched. This generalizes the perfect matching condition which was previously known for the case of transmission over the binary erasure channel to the general class of binary-input memoryless output-symmetric channels. Potential applications of this perfect matching condition are the construction of capacity-achieving degree distributions and the determination of the number required iterations as a function of the multiplicative gap to capacity.<|reference_end|>
arxiv
@article{measson2005why, title={Why We Can Not Surpass Capacity: The Matching Condition}, author={Cyril Measson, Andrea Montanari, Rudiger Urbanke}, journal={arXiv preprint arXiv:cs/0510045}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510045}, primaryClass={cs.IT math.IT} }
measson2005why
arxiv-673417
cs/0510046
Defining a Comprehensive Threat Model for High Performance Computational Clusters
<|reference_start|>Defining a Comprehensive Threat Model for High Performance Computational Clusters: Over the past decade, high performance computational (HPC) clusters have become mainstream in academic and industrial settings as accessible means of computation. Throughout their proliferation, HPC security has been a secondary concern to performance. It is evident, however, that ensuring HPC security presents different challenges than the ones faced when dealing with traditional networks. To design suitable security measures for high performance computing, it is necessary to first realize the threats faced by such an environment. This task can be accomplished by the means of constructing a comprehensive threat model. To our knowledge, no such threat model exists with regards to Cluster Computing. In this paper, we explore the unique challenges of securing HPCs and propose a threat model based on the classical Confidentiality, Integrity and Availability security principles.<|reference_end|>
arxiv
@article{mogilevsky2005defining, title={Defining a Comprehensive Threat Model for High Performance Computational Clusters}, author={Dmitry Mogilevsky, Adam Lee, William Yurcik}, journal={arXiv preprint arXiv:cs/0510046}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510046}, primaryClass={cs.CR cs.DC} }
mogilevsky2005defining
arxiv-673418
cs/0510047
Geometrical relations between space time block code designs and complexity reduction
<|reference_start|>Geometrical relations between space time block code designs and complexity reduction: In this work, the geometric relation between space time block code design for the coherent channel and its non-coherent counterpart is exploited to get an analogue of the information theoretic inequality $I(X;S)\le I((X,H);S)$ in terms of diversity. It provides a lower bound on the performance of non-coherent codes when used in coherent scenarios. This leads in turn to a code design decomposition result splitting coherent code design into two complexity reduced sub tasks. Moreover a geometrical criterion for high performance space time code design is derived.<|reference_end|>
arxiv
@article{henkel2005geometrical, title={Geometrical relations between space time block code designs and complexity reduction}, author={Oliver Henkel}, journal={IEEE Trans. Inform. Theory, vol. 52, no. 12 (Dec 2006), 5324-5335}, year={2005}, doi={10.1109/TIT.2006.885457}, archivePrefix={arXiv}, eprint={cs/0510047}, primaryClass={cs.IT math.IT} }
henkel2005geometrical
arxiv-673419
cs/0510048
Deterministic boundary recognition and topology extraction for large sensor networks
<|reference_start|>Deterministic boundary recognition and topology extraction for large sensor networks: We present a new framework for the crucial challenge of self-organization of a large sensor network. The basic scenario can be described as follows: Given a large swarm of immobile sensor nodes that have been scattered in a polygonal region, such as a street network. Nodes have no knowledge of size or shape of the environment or the position of other nodes. Moreover, they have no way of measuring coordinates, geometric distances to other nodes, or their direction. Their only way of interacting with other nodes is to send or to receive messages from any node that is within communication range. The objective is to develop algorithms and protocols that allow self-organization of the swarm into large-scale structures that reflect the structure of the street network, setting the stage for global routing, tracking and guiding algorithms.<|reference_end|>
arxiv
@article{kroeller2005deterministic, title={Deterministic boundary recognition and topology extraction for large sensor networks}, author={Alexander Kroeller and Sandor P. Fekete and Dennis Pfisterer and Stefan Fischer}, journal={arXiv preprint arXiv:cs/0510048}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510048}, primaryClass={cs.DC cs.CG} }
kroeller2005deterministic
arxiv-673420
cs/0510049
Bounds on the Pseudo-Weight of Minimal Pseudo-Codewords of Projective Geometry Codes
<|reference_start|>Bounds on the Pseudo-Weight of Minimal Pseudo-Codewords of Projective Geometry Codes: In this paper we focus our attention on a family of finite geometry codes, called type-I projective geometry low-density parity-check (PG-LDPC) codes, that are constructed based on the projective planes PG{2,q). In particular, we study their minimal codewords and pseudo-codewords, as it is known that these vectors characterize completely the code performance under maximum-likelihood decoding and linear programming decoding, respectively. The main results of this paper consist of upper and lower bounds on the pseudo-weight of the minimal pseudo-codewords of type-I PG-LDPC codes.<|reference_end|>
arxiv
@article{smarandache2005bounds, title={Bounds on the Pseudo-Weight of Minimal Pseudo-Codewords of Projective Geometry Codes}, author={Roxana Smarandache and Marcel Wauer}, journal={arXiv preprint arXiv:cs/0510049}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510049}, primaryClass={cs.IT cs.DM math.IT} }
smarandache2005bounds
arxiv-673421
cs/0510050
Integration of the DOLCE top-level ontology into the OntoSpec methodology
<|reference_start|>Integration of the DOLCE top-level ontology into the OntoSpec methodology: This report describes a new version of the OntoSpec methodology for ontology building. Defined by the LaRIA Knowledge Engineering Team (University of Picardie Jules Verne, Amiens, France), OntoSpec aims at helping builders to model ontological knowledge (upstream of formal representation). The methodology relies on a set of rigorously-defined modelling primitives and principles. Its application leads to the elaboration of a semi-informal ontology, which is independent of knowledge representation languages. We recently enriched the OntoSpec methodology by endowing it with a new resource, the DOLCE top-level ontology defined at the LOA (IST-CNR, Trento, Italy). The goal of this integration is to provide modellers with additional help in structuring application ontologies, while maintaining independence vis-\`{a}-vis formal representation languages. In this report, we first provide an overview of the OntoSpec methodology's general principles and then describe the DOLCE re-engineering process. A complete version of DOLCE-OS (i.e. a specification of DOLCE in the semi-informal OntoSpec language) is presented in an appendix.<|reference_end|>
arxiv
@article{kassel2005integration, title={Integration of the DOLCE top-level ontology into the OntoSpec methodology}, author={Gilles Kassel (LaRIA)}, journal={arXiv preprint arXiv:cs/0510050}, year={2005}, number={LRR-2005-08}, archivePrefix={arXiv}, eprint={cs/0510050}, primaryClass={cs.AI} }
kassel2005integration
arxiv-673422
cs/0510051
Numerical resolution of some BVP using Bernstein polynomials
<|reference_start|>Numerical resolution of some BVP using Bernstein polynomials: In this work we present a method, based on the use of Bernstein polynomials, for the numerical resolution of some boundary values problems. The computations have not need of particular approximations of derivatives, such as finite differences, or particular techniques, such as finite elements. Also, the method doesn't require the use of matrices, as in resolution of linear algebraic systems, nor the use of like-Newton algorithms, as in resolution of non linear sets of equations. An initial equation is resolved only once, then the method is based on iterated evaluations of appropriate polynomials.<|reference_end|>
arxiv
@article{argentini2005numerical, title={Numerical resolution of some BVP using Bernstein polynomials}, author={Gianluca Argentini}, journal={Posted since 2005-11-29 at Applied Mathematics E-Notes, http://www.math.nthu.edu.tw/~amen/}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510051}, primaryClass={cs.NA cs.MS math.CA physics.comp-ph} }
argentini2005numerical
arxiv-673423
cs/0510052
Remarks on "Toward Compact Interdomain Routing"
<|reference_start|>Remarks on "Toward Compact Interdomain Routing": This paper critically examines some propositions and arguments of cs.NI/0508021 regarding applicability of hierarchical routing and perspectives of compact routing. Arguments against the former are found to be inaccurate while the latter is found to be equivalent to well-known deployed solutions. Also, multiple (stacked) application of compact-routing solutions is found to be equivalent to hierarchical routing.<|reference_end|>
arxiv
@article{grishchenko2005remarks, title={Remarks on "Toward Compact Interdomain Routing"}, author={Victor S. Grishchenko}, journal={arXiv preprint arXiv:cs/0510052}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510052}, primaryClass={cs.NI} }
grishchenko2005remarks
arxiv-673424
cs/0510053
A pair of trees without a simultaneous geometric embedding in the plane
<|reference_start|>A pair of trees without a simultaneous geometric embedding in the plane: Any planar graph has a crossing-free straight-line drawing in the plane. A simultaneous geometric embedding of two n-vertex graphs is a straight-line drawing of both graphs on a common set of n points, such that the edges withing each individual graph do not cross. We consider simultaneous embeddings of two labeled trees, with predescribed vertex correspondences, and present an instance of such a pair that cannot be embedded. Further we provide an example of a planar graph that cannot be embedded together with a path when vertex correspondences are given.<|reference_end|>
arxiv
@article{kutz2005a, title={A pair of trees without a simultaneous geometric embedding in the plane}, author={Martin Kutz}, journal={arXiv preprint arXiv:cs/0510053}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510053}, primaryClass={cs.CG} }
kutz2005a
arxiv-673425
cs/0510054
The Nature of Novelty Detection
<|reference_start|>The Nature of Novelty Detection: Sentence level novelty detection aims at reducing redundant sentences from a sentence list. In the task, sentences appearing later in the list with no new meanings are eliminated. Aiming at a better accuracy for detecting redundancy, this paper reveals the nature of the novelty detection task currently overlooked by the Novelty community $-$ Novelty as a combination of the partial overlap (PO, two sentences sharing common facts) and complete overlap (CO, the first sentence covers all the facts of the second sentence) relations. By formalizing novelty detection as a combination of the two relations between sentences, new viewpoints toward techniques dealing with Novelty are proposed. Among the methods discussed, the similarity, overlap, pool and language modeling approaches are commonly used. Furthermore, a novel approach, selected pool method is provided, which is immediate following the nature of the task. Experimental results obtained on all the three currently available novelty datasets showed that selected pool is significantly better or no worse than the current methods. Knowledge about the nature of the task also affects the evaluation methodologies. We propose new evaluation measures for Novelty according to the nature of the task, as well as possible directions for future study.<|reference_end|>
arxiv
@article{zhao2005the, title={The Nature of Novelty Detection}, author={Le Zhao, Min Zhang, Shaoping Ma}, journal={arXiv preprint arXiv:cs/0510054}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510054}, primaryClass={cs.IR cs.CL} }
zhao2005the
arxiv-673426
cs/0510055
Degrees of Freedom in Multiuser MIMO
<|reference_start|>Degrees of Freedom in Multiuser MIMO: We explore the available degrees of freedom for various multiuser MIMO communication scenarios such as the multiple access, broadcast, interference, relay, X and Z channels. For the two user MIMO interference channel, we find a general inner bound and a genie-aided outer bound that give us the exact number of degrees of freedom in many cases. We also study a share-and-transmit scheme for transmitter cooperation. For the share-and-transmit scheme, we show how the gains of transmitter cooperation are entirely offset by the cost of enabling that cooperation so that the available degrees of freedom are not increased.<|reference_end|>
arxiv
@article{jafar2005degrees, title={Degrees of Freedom in Multiuser MIMO}, author={Syed A. Jafar, Maralle J. Fakhereddin}, journal={arXiv preprint arXiv:cs/0510055}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510055}, primaryClass={cs.IT math.IT} }
jafar2005degrees
arxiv-673427
cs/0510056
First-Order Modeling and Stability Analysis of Illusory Contours
<|reference_start|>First-Order Modeling and Stability Analysis of Illusory Contours: In visual cognition, illusions help elucidate certain intriguing latent perceptual functions of the human vision system, and their proper mathematical modeling and computational simulation are therefore deeply beneficial to both biological and computer vision. Inspired by existent prior works, the current paper proposes a first-order energy-based model for analyzing and simulating illusory contours. The lower complexity of the proposed model facilitates rigorous mathematical analysis on the detailed geometric structures of illusory contours. After being asymptotically approximated by classical active contours, the proposed model is then robustly computed using the celebrated level-set method of Osher and Sethian (J. Comput. Phys., 79:12-49, 1988) with a natural supervising scheme. Potential cognitive implications of the mathematical results are addressed, and generic computational examples are demonstrated and discussed.<|reference_end|>
arxiv
@article{jung2005first-order, title={First-Order Modeling and Stability Analysis of Illusory Contours}, author={Yoon-Mo Jung and Jianhong Shen}, journal={arXiv preprint arXiv:cs/0510056}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510056}, primaryClass={cs.CV cs.AI} }
jung2005first-order
arxiv-673428
cs/0510057
Towards a diagrammatic modeling of the LinBox C++ linear algebra library
<|reference_start|>Towards a diagrammatic modeling of the LinBox C++ linear algebra library: We propose a new diagrammatic modeling language, DML. The paradigm used is that of the category theory and in particular of the pushout tool. We show that most of the object-oriented structures can be described with this tool and have many examples in C++, ranging from virtual inheritance and polymorphism to template genericity. With this powerful tool, we propose a quite simple description of the C++ LinBox library. This library has been designed for efficiency and genericity and therefore makes heavy usage of complex template and polymorphic mecanism. Be reverse engineering, we are able to describe in a simple manner the complex structure of archetypes in LinBox.<|reference_end|>
arxiv
@article{dumas2005towards, title={Towards a diagrammatic modeling of the LinBox C++ linear algebra library}, author={Jean-Guillaume Dumas (LMC - IMAG), Dominique Duval (LMC - IMAG)}, journal={arXiv preprint arXiv:cs/0510057}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510057}, primaryClass={cs.SC} }
dumas2005towards
arxiv-673429
cs/0510058
Precoding for 2x2 Doubly-Dispersive WSSUS Channels
<|reference_start|>Precoding for 2x2 Doubly-Dispersive WSSUS Channels: Optimal link adaption to the scattering function of wide sense stationary uncorrelated scattering (WSSUS) mobile communication channels is still an unsolved problem despite its importance for next-generation system design. In multicarrier transmission such link adaption is performed by pulse shaping which in turn is equivalent to precoding with respect to the second order channel statistics. In the present framework a translation of the precoder optimization problem into an optimization problem over trace class operators is used. This problem which is also well-known in the context of quantum information theory is unsolved in general due to its non-convex nature. However in very low dimension the problem formulation reveals an additional analytic structure which again admits the solution to the optimal precoder and multiplexing scheme. Hence, in this contribution the analytic solution of the problem for the 2x2 doubly--dispersive WSSUS channel is presented.<|reference_end|>
arxiv
@article{jung2005precoding, title={Precoding for 2x2 Doubly-Dispersive WSSUS Channels}, author={Peter Jung}, journal={arXiv preprint arXiv:cs/0510058}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510058}, primaryClass={cs.IT math.IT} }
jung2005precoding
arxiv-673430
cs/0510059
Cybercars : Past, Present and Future of the Technology
<|reference_start|>Cybercars : Past, Present and Future of the Technology: Automobile has become the dominant transport mode in the world in the last century. In order to meet a continuously growing demand for transport, one solution is to change the control approach for vehicle to full driving automation, which removes the driver from the control loop to improve efficiency and reduce accidents. Recent work shows that there are several realistic paths towards this deployment : driving assistance on passenger cars, automated commercial vehicles on dedicated infrastructures, and new forms of urban transport (car-sharing and cybercars). Cybercars have already been put into operation in Europe, and it seems that this approach could lead the way towards full automation on most urban, and later interurban infrastructures. The European project CyberCars has brought many improvements in the technology needed to operate cybercars over the last three years. A new, larger European project is now being prepared to carry this work further in order to meet more ambitious objectives in terms of safety and efficiency. This paper will present past and present technologies and will focus on the future developments.<|reference_end|>
arxiv
@article{parent2005cybercars, title={Cybercars : Past, Present and Future of the Technology}, author={Michel Parent (INRIA Rocquencourt), Arnaud De La Fortelle (INRIA Rocquencourt)}, journal={Dans ITS World Congress 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510059}, primaryClass={cs.RO} }
parent2005cybercars
arxiv-673431
cs/0510060
Optimal Transmit Covariance for Ergodic MIMO Channels
<|reference_start|>Optimal Transmit Covariance for Ergodic MIMO Channels: In this paper we consider the computation of channel capacity for ergodic multiple-input multiple-output channels with additive white Gaussian noise. Two scenarios are considered. Firstly, a time-varying channel is considered in which both the transmitter and the receiver have knowledge of the channel realization. The optimal transmission strategy is water-filling over space and time. It is shown that this may be achieved in a causal, indeed instantaneous fashion. In the second scenario, only the receiver has perfect knowledge of the channel realization, while the transmitter has knowledge of the channel gain probability law. In this case we determine an optimality condition on the input covariance for ergodic Gaussian vector channels with arbitrary channel distribution under the condition that the channel gains are independent of the transmit signal. Using this optimality condition, we find an iterative algorithm for numerical computation of optimal input covariance matrices. Applications to correlated Rayleigh and Ricean channels are given.<|reference_end|>
arxiv
@article{hanlen2005optimal, title={Optimal Transmit Covariance for Ergodic MIMO Channels}, author={Leif W Hanlen and Alex J Grant}, journal={arXiv preprint arXiv:cs/0510060}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510060}, primaryClass={cs.IT math.IT} }
hanlen2005optimal
arxiv-673432
cs/0510061
Nonmonotonic Trust Management for P2P Applications
<|reference_start|>Nonmonotonic Trust Management for P2P Applications: Community decisions about access control in virtual communities are non-monotonic in nature. This means that they cannot be expressed in current, monotonic trust management languages such as the family of Role Based Trust Management languages (RT). To solve this problem we propose RT-, which adds a restricted form of negation to the standard RT language, thus admitting a controlled form of non-monotonicity. The semantics of RT- is discussed and presented in terms of the well-founded semantics for Logic Programs. Finally we discuss how chain discovery can be accomplished for RT-.<|reference_end|>
arxiv
@article{czenko2005nonmonotonic, title={Nonmonotonic Trust Management for P2P Applications}, author={M. Czenko, H. Tran, J. Doumen, S. Etalle, P. Hartel, J. den Hartog}, journal={arXiv preprint arXiv:cs/0510061}, year={2005}, number={TR-CTIT-05-22}, archivePrefix={arXiv}, eprint={cs/0510061}, primaryClass={cs.LO} }
czenko2005nonmonotonic
arxiv-673433
cs/0510062
Using Interval Particle Filtering for Marker less 3D Human Motion Capture
<|reference_start|>Using Interval Particle Filtering for Marker less 3D Human Motion Capture: In this paper we present a new approach for marker less human motion capture from conventional camera feeds. The aim of our study is to recover 3D positions of key points of the body that can serve for gait analysis. Our approach is based on foreground segmentation, an articulated body model and particle filters. In order to be generic and simple no restrictive dynamic modelling was used. A new modified particle filtering algorithm was introduced. It is used efficiently to search the model configuration space. This new algorithm which we call Interval Particle Filtering reorganizes the configurations search space in an optimal deterministic way and proved to be efficient in tracking natural human movement. Results for human motion capture from a single camera are presented and compared to results obtained from a marker based system. The system proved to be able to track motion successfully even in partial occlusions.<|reference_end|>
arxiv
@article{saboune2005using, title={Using Interval Particle Filtering for Marker less 3D Human Motion Capture}, author={Jamal Saboune (INRIA Lorraine - LORIA), Franc{c}ois Charpillet (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0510062}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510062}, primaryClass={cs.AI} }
saboune2005using
arxiv-673434
cs/0510063
Markerless Human Motion Capture for Gait Analysis
<|reference_start|>Markerless Human Motion Capture for Gait Analysis: The aim of our study is to detect balance disorders and a tendency towards the falls in the elderly, knowing gait parameters. In this paper we present a new tool for gait analysis based on markerless human motion capture, from camera feeds. The system introduced here, recovers the 3D positions of several key points of the human body while walking. Foreground segmentation, an articulated body model and particle filtering are basic elements of our approach. No dynamic model is used thus this system can be described as generic and simple to implement. A modified particle filtering algorithm, which we call Interval Particle Filtering, is used to reorganise and search through the model's configurations search space in a deterministic optimal way. This algorithm was able to perform human movement tracking with success. Results from the treatment of a single cam feeds are shown and compared to results obtained using a marker based human motion capture system.<|reference_end|>
arxiv
@article{saboune2005markerless, title={Markerless Human Motion Capture for Gait Analysis}, author={Jamal Saboune (INRIA Lorraine - LORIA), Franc{c}ois Charpillet (INRIA Lorraine - LORIA)}, journal={arXiv preprint arXiv:cs/0510063}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510063}, primaryClass={cs.AI} }
saboune2005markerless
arxiv-673435
cs/0510064
Acyclic orientations with path constraints
<|reference_start|>Acyclic orientations with path constraints: Many well-known combinatorial optimization problems can be stated over the set of acyclic orientations of an undirected graph. For example, acyclic orientations with certain diameter constraints are closely related to the optimal solutions of the vertex coloring and frequency assignment problems. In this paper we introduce a linear programming formulation of acyclic orientations with path constraints, and discuss its use in the solution of the vertex coloring problem and some versions of the frequency assignment problem. A study of the polytope associated with the formulation is presented, including proofs of which constraints of the formulation are facet-defining and the introduction of new classes of valid inequalities.<|reference_end|>
arxiv
@article{figueiredo2005acyclic, title={Acyclic orientations with path constraints}, author={Rosa M. V. Figueiredo, Valmir C. Barbosa, Nelson Maculan, Cid C. Souza}, journal={RAIRO Operations Research 42 (2008), 455-467}, year={2005}, doi={10.1051/ro:2008028}, archivePrefix={arXiv}, eprint={cs/0510064}, primaryClass={cs.DM} }
figueiredo2005acyclic
arxiv-673436
cs/0510065
A new authentication protocol for revocable anonymity in ad-hoc networks
<|reference_start|>A new authentication protocol for revocable anonymity in ad-hoc networks: This paper describes a new protocol for authentication in ad-hoc networks. The protocol has been designed to meet specialized requirements of ad-hoc networks, such as lack of direct communication between nodes or requirements for revocable anonymity. At the same time, a ad-hoc authentication protocol must be resistant to spoofing, eavesdropping and playback, and man-in-the-middle attacks. The article analyzes existing authentication methods based on the Public Key Infrastructure, and finds that they have several drawbacks in ad-hoc networks. Therefore, a new authentication protocol, basing on established cryptographic primitives (Merkle's puzzles and zero-knowledge proofs) is proposed. The protocol is studied for a model ad-hoc chat application that provides private conversations.<|reference_end|>
arxiv
@article{wierzbicki2005a, title={A new authentication protocol for revocable anonymity in ad-hoc networks}, author={Adam Wierzbicki, Aneta Zwierko and Zbigniew Kotulski}, journal={arXiv preprint arXiv:cs/0510065}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510065}, primaryClass={cs.CR cs.DC cs.NI} }
wierzbicki2005a
arxiv-673437
cs/0510066
The monadic second-order logic of graphs XVI : Canonical graph<br> decompositions
<|reference_start|>The monadic second-order logic of graphs XVI : Canonical graph<br> decompositions: This article establishes that the split decomposition of graphs introduced by Cunnigham, is definable in Monadic Second-Order Logic.This result is actually an instance of a more general result covering canonical graph decompositions like the modular decomposition and the Tutte decomposition of 2-connected graphs into 3-connected components. As an application, we prove that the set of graphs having the same cycle matroid as a given 2-connected graph can be defined from this graph by Monadic Second-Order formulas.<|reference_end|>
arxiv
@article{courcelle2005the, title={The monadic second-order logic of graphs XVI : Canonical graph<br> decompositions}, author={Bruno Courcelle}, journal={Logical Methods in Computer Science, Volume 2, Issue 2 (March 23, 2006) lmcs:2250}, year={2005}, doi={10.2168/LMCS-2(2:2)2006}, archivePrefix={arXiv}, eprint={cs/0510066}, primaryClass={cs.LO} }
courcelle2005the
arxiv-673438
cs/0510067
On the Spread of Random Interleaver
<|reference_start|>On the Spread of Random Interleaver: For a given blocklength we determine the number of interleavers which have spread equal to two. Using this, we find out the probability that a randomly chosen interleaver has spread two. We show that as blocklength increases, this probability increases but very quickly converges to the value $1-e^{-2} \approx 0.8647$. Subsequently, we determine a lower bound on the probability of an interleaver having spread at least $s$. We show that this lower bound converges to the value $e^{-2(s-2)^{2}}$, as the blocklength increases.<|reference_end|>
arxiv
@article{mazumdar2005on, title={On the Spread of Random Interleaver}, author={Arya Mazumdar, Adrish Banerjee and A K Chaturvedi}, journal={IEEE International Symposium on Information Theory 2005}, year={2005}, doi={10.1109/ISIT.2005.1523372}, archivePrefix={arXiv}, eprint={cs/0510067}, primaryClass={cs.IT math.IT} }
mazumdar2005on
arxiv-673439
cs/0510068
Ultra Wideband Impulse Radio Systems with Multiple Pulse Types
<|reference_start|>Ultra Wideband Impulse Radio Systems with Multiple Pulse Types: In an ultra wideband (UWB) impulse radio (IR) system, a number of pulses, each transmitted in an interval called a "frame", is employed to represent one information symbol. Conventionally, a single type of UWB pulse is used in all frames of all users. In this paper, IR systems with multiple types of UWB pulses are considered, where different types of pulses can be used in different frames by different users. Both stored-reference (SR) and transmitted-reference (TR) systems are considered. First, the spectral properties of a multi-pulse IR system with polarity randomization is investigated. It is shown that the average power spectral density is the average of the spectral contents of different pulse shapes. Then, approximate closed-form expressions for the bit error probability of a multi-pulse SR-IR system are derived for RAKE receivers in asynchronous multiuser environments. The effects of both inter-frame interference (IFI) and multiple-access interference (MAI) are analyzed. The theoretical and simulation results indicate that SR-IR systems that are more robust against IFI and MAI than a "conventional" SR-IR system can be designed with multiple types of ultra-wideband pulses. Finally, extensions to multi-pulse TR-IR systems are briefly described.<|reference_end|>
arxiv
@article{gezici2005ultra, title={Ultra Wideband Impulse Radio Systems with Multiple Pulse Types}, author={Sinan Gezici, Zafer Sahinoglu, Hisashi Kobayashi, and H. Vincent Poor}, journal={arXiv preprint arXiv:cs/0510068}, year={2005}, doi={10.1109/JSAC.2005.863880}, archivePrefix={arXiv}, eprint={cs/0510068}, primaryClass={cs.IT math.IT} }
gezici2005ultra
arxiv-673440
cs/0510069
Comparing Computational Power
<|reference_start|>Comparing Computational Power: It is common practice to compare the computational power of different models of computation. For example, the recursive functions are strictly more powerful than the primitive recursive functions, because the latter are a proper subset of the former (which includes Ackermann's function). Side-by-side with this "containment" method of measuring power, it is standard to use an approach based on "simulation". For example, one says that the (untyped) lambda calculus is as powerful--computationally speaking--as the partial recursive functions, because the lambda calculus can simulate all partial recursive functions by encoding the natural numbers as Church numerals. The problem is that unbridled use of these two ways of comparing power allows one to show that some computational models are strictly stronger than themselves! We argue that a better definition is that model A is strictly stronger than B if A can simulate B via some encoding, whereas B cannot simulate A under any encoding. We then show that the recursive functions are strictly stronger in this sense than the primitive recursive. We also prove that the recursive functions, partial recursive functions, and Turing machines are "complete", in the sense that no injective encoding can make them equivalent to any "hypercomputational" model.<|reference_end|>
arxiv
@article{boker2005comparing, title={Comparing Computational Power}, author={Udi Boker and Nachum Dershowitz}, journal={Logic Journal of the IGPL, vol. 14, no. 5, pp. 633-648, 2006}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510069}, primaryClass={cs.LO} }
boker2005comparing
arxiv-673441
cs/0510070
On Coding for Reliable Communication over Packet Networks
<|reference_start|>On Coding for Reliable Communication over Packet Networks: We present a capacity-achieving coding scheme for unicast or multicast over lossy packet networks. In the scheme, intermediate nodes perform additional coding yet do not decode nor even wait for a block of packets before sending out coded packets. Rather, whenever they have a transmission opportunity, they send out coded packets formed from random linear combinations of previously received packets. All coding and decoding operations have polynomial complexity. We show that the scheme is capacity-achieving as long as packets received on a link arrive according to a process that has an average rate. Thus, packet losses on a link may exhibit correlation in time or with losses on other links. In the special case of Poisson traffic with i.i.d. losses, we give error exponents that quantify the rate of decay of the probability of error with coding delay. Our analysis of the scheme shows that it is not only capacity-achieving, but that the propagation of packets carrying "innovative" information follows the propagation of jobs through a queueing network, and therefore fluid flow models yield good approximations. We consider networks with both lossy point-to-point and broadcast links, allowing us to model both wireline and wireless packet networks.<|reference_end|>
arxiv
@article{lun2005on, title={On Coding for Reliable Communication over Packet Networks}, author={Desmond S. Lun, Muriel Medard, Ralf Koetter, Michelle Effros}, journal={Physical Communication, vol. 1, no. 1, pp. 3-20, March 2008.}, year={2005}, doi={10.1016/j.phycom.2008.01.006}, archivePrefix={arXiv}, eprint={cs/0510070}, primaryClass={cs.IT cs.NI math.IT} }
lun2005on
arxiv-673442
cs/0510071
A Simple Cooperative Diversity Method Based on Network Path Selection
<|reference_start|>A Simple Cooperative Diversity Method Based on Network Path Selection: Cooperative diversity has been recently proposed as a way to form virtual antenna arrays that provide dramatic gains in slow fading wireless environments. However most of the proposed solutions require distributed space-time coding algorithms, the careful design of which is left for future investigation if there is more than one cooperative relay. We propose a novel scheme, that alleviates these problems and provides diversity gains on the order of the number of relays in the network. Our scheme first selects the best relay from a set of M available relays and then uses this best relay for cooperation between the source and the destination. We develop and analyze a distributed method to select the best relay that requires no topology information and is based on local measurements of the instantaneous channel conditions. This method also requires no explicit communication among the relays. The success (or failure) to select the best available path depends on the statistics of the wireless channel, and a methodology to evaluate performance for any kind of wireless channel statistics, is provided. Information theoretic analysis of outage probability shows that our scheme achieves the same diversity-multiplexing tradeoff as achieved by more complex protocols, where coordination and distributed space-time coding for M nodes is required, such as those proposed in [7]. The simplicity of the technique, allows for immediate implementation in existing radio hardware and its adoption could provide for improved flexibility, reliability and efficiency in future 4G wireless systems.<|reference_end|>
arxiv
@article{bletsas2005a, title={A Simple Cooperative Diversity Method Based on Network Path Selection}, author={Aggelos Bletsas, Ashish Khisti, David P. Reed, Andrew Lippman}, journal={arXiv preprint arXiv:cs/0510071}, year={2005}, doi={10.1109/JSAC.2005.862417}, archivePrefix={arXiv}, eprint={cs/0510071}, primaryClass={cs.IT math.IT} }
bletsas2005a
arxiv-673443
cs/0510072
On Interleaving Techniques for MIMO Channels and Limitations of Bit Interleaved Coded Modulation
<|reference_start|>On Interleaving Techniques for MIMO Channels and Limitations of Bit Interleaved Coded Modulation: It is shown that while the mutual information curves for coded modulation (CM) and bit interleaved coded modulation (BICM) overlap in the case of a single input single output channel, the same is not true in multiple input multiple output (MIMO) channels. A method for mitigating fading in the presence of multiple transmit antennas, named coordinate interleaving (CI), is presented as a generalization of component interleaving for a single transmit antenna. The extent of any advantages of CI over BICM, relative to CM, is analyzed from a mutual information perspective; the analysis is based on an equivalent parallel channel model for CI. Several expressions for mutual information in the presence of CI and multiple transmit and receive antennas are derived. Results show that CI gives higher mutual information compared to that of BICM if proper signal mappings are used. Effects like constellation rotation in the presence of CI are also considered and illustrated; it is shown that constellation rotation can increase the constrained capacity.<|reference_end|>
arxiv
@article{ionescu2005on, title={On Interleaving Techniques for MIMO Channels and Limitations of Bit Interleaved Coded Modulation}, author={Dumitru Mihai Ionescu, Dung N. Doan, Steven D. Gray}, journal={arXiv preprint arXiv:cs/0510072}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510072}, primaryClass={cs.IT math.IT} }
ionescu2005on
arxiv-673444
cs/0510073
Semantic Embedding of Petri Nets into Event-B
<|reference_start|>Semantic Embedding of Petri Nets into Event-B: We present an embedding of Petri nets into B abstract systems. The embedding is achieved by translating both the static structure (modelling aspect) and the evolution semantics of Petri nets. The static structure of a Petri-net is captured within a B abstract system through a graph structure. This abstract system is then included in another abstract system which captures the evolution semantics of Petri-nets. The evolution semantics results in some B events depending on the chosen policies: basic nets or high level Petri nets. The current embedding enables one to use conjointly Petri nets and Event-B in the same system development, but at different steps and for various analysis.<|reference_end|>
arxiv
@article{attiogbe2005semantic, title={Semantic Embedding of Petri Nets into Event-B}, author={Christian Attiogbe}, journal={arXiv preprint arXiv:cs/0510073}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510073}, primaryClass={cs.LO} }
attiogbe2005semantic
arxiv-673445
cs/0510074
Practical Datatype Specializations with Phantom Types and Recursion Schemes
<|reference_start|>Practical Datatype Specializations with Phantom Types and Recursion Schemes: Datatype specialization is a form of subtyping that captures program invariants on data structures that are expressed using the convenient and intuitive datatype notation. Of particular interest are structural invariants such as well-formedness. We investigate the use of phantom types for describing datatype specializations. We show that it is possible to express statically-checked specializations within the type system of Standard ML. We also show that this can be done in a way that does not lose useful programming facilities such as pattern matching in case expressions.<|reference_end|>
arxiv
@article{fluet2005practical, title={Practical Datatype Specializations with Phantom Types and Recursion Schemes}, author={Matthew Fluet, Riccardo Pucella}, journal={arXiv preprint arXiv:cs/0510074}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510074}, primaryClass={cs.PL} }
fluet2005practical
arxiv-673446
cs/0510075
On-Off Frequency-Shift-Keying for Wideband Fading Channels
<|reference_start|>On-Off Frequency-Shift-Keying for Wideband Fading Channels: M-ary On-Off Frequency-Shift-Keying (OOFSK) is a digital modulation format in which M-ary FSK signaling is overlaid on On/Off keying. This paper investigates the potential of this modulation format in the context of wideband fading channels. First it is assumed that the receiver uses energy detection for the reception of OOFSK signals. Capacity expressions are obtained for the cases in which the receiver has perfect and imperfect fading side information. Power efficiency is investigated when the transmitter is subject to a peak-to-average power ratio (PAR) limitation or a peak power limitation. It is shown that under a PAR limitation, it is extremely power inefficient to operate in the very low SNR regime. On the other hand, if there is only a peak power limitation, it is demonstrated that power efficiency improves as one operates with smaller SNR and vanishing duty factor. Also studied are the capacity improvements that accrue when the receiver can track phase shifts in the channel or if the received signal has a specular component. To take advantage of those features, the phase of the modulation is also allowed to carry information.<|reference_end|>
arxiv
@article{gursoy2005on-off, title={On-Off Frequency-Shift-Keying for Wideband Fading Channels}, author={Mustafa Cenk Gursoy, Sergio Verdu, H. Vincent Poor}, journal={arXiv preprint arXiv:cs/0510075}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510075}, primaryClass={cs.IT math.IT} }
gursoy2005on-off
arxiv-673447
cs/0510076
Applying Evolutionary Optimisation to Robot Obstacle Avoidance
<|reference_start|>Applying Evolutionary Optimisation to Robot Obstacle Avoidance: This paper presents an artificial evolutionbased method for stereo image analysis and its application to real-time obstacle detection and avoidance for a mobile robot. It uses the Parisian approach, which consists here in splitting the representation of the robot's environment into a large number of simple primitives, the "flies", which are evolved following a biologically inspired scheme and give a fast, low-cost solution to the obstacle detection problem in mobile robotics.<|reference_end|>
arxiv
@article{pauplin2005applying, title={Applying Evolutionary Optimisation to Robot Obstacle Avoidance}, author={Olivier Pauplin (INRIA Rocquencourt), Jean Louchet (INRIA Rocquencourt), Evelyne Lutton (INRIA Rocquencourt), Michel Parent (INRIA Rocquencourt)}, journal={Dans ISCIIA 2004}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510076}, primaryClass={cs.AI cs.RO} }
pauplin2005applying
arxiv-673448
cs/0510077
Connection state overhead in a dynamic linear network
<|reference_start|>Connection state overhead in a dynamic linear network: We consider a dynamical linear network where nearest neighbours communicate via links whose states form binary (open/closed) valued independent and identically distributed Markov processes. Our main result is the tight information-theoretic lower bound on the network traffic required by the connection state overhead, or the information required for all nodes to know their connected neighbourhood. These results, and especially their possible generalisations to more realistic network models, could give us valuable understanding of the unavoidable protocol overheads in rapidly changing Ad hoc and sensor networks.<|reference_end|>
arxiv
@article{ajanki2005connection, title={Connection state overhead in a dynamic linear network}, author={Oskari Ajanki, Antti Knowles}, journal={arXiv preprint arXiv:cs/0510077}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510077}, primaryClass={cs.IT cs.NI math.IT} }
ajanki2005connection
arxiv-673449
cs/0510078
Vector Gaussian Multiple Description with Individual and Central Receivers
<|reference_start|>Vector Gaussian Multiple Description with Individual and Central Receivers: L multiple descriptions of a vector Gaussian source for individual and central receivers are investigated. The sum rate of the descriptions with covariance distortion measure constraints, in a positive semidefinite ordering, is exactly characterized. For two descriptions, the entire rate region is characterized. Jointly Gaussian descriptions are optimal in achieving the limiting rates. The key component of the solution is a novel information-theoretic inequality that is used to lower bound the achievable multiple description rates.<|reference_end|>
arxiv
@article{wang2005vector, title={Vector Gaussian Multiple Description with Individual and Central Receivers}, author={H. Wang and P. Viswanath}, journal={arXiv preprint arXiv:cs/0510078}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510078}, primaryClass={cs.IT math.IT} }
wang2005vector
arxiv-673450
cs/0510079
Evidence with Uncertain Likelihoods
<|reference_start|>Evidence with Uncertain Likelihoods: An agent often has a number of hypotheses, and must choose among them based on observations, or outcomes of experiments. Each of these observations can be viewed as providing evidence for or against various hypotheses. All the attempts to formalize this intuition up to now have assumed that associated with each hypothesis h there is a likelihood function \mu_h, which is a probability measure that intuitively describes how likely each observation is, conditional on h being the correct hypothesis. We consider an extension of this framework where there is uncertainty as to which of a number of likelihood functions is appropriate, and discuss how one formal approach to defining evidence, which views evidence as a function from priors to posteriors, can be generalized to accommodate this uncertainty.<|reference_end|>
arxiv
@article{halpern2005evidence, title={Evidence with Uncertain Likelihoods}, author={Joseph Y. Halpern, Riccardo Pucella}, journal={arXiv preprint arXiv:cs/0510079}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510079}, primaryClass={cs.AI} }
halpern2005evidence
arxiv-673451
cs/0510080
When Ignorance is Bliss
<|reference_start|>When Ignorance is Bliss: It is commonly-accepted wisdom that more information is better, and that information should never be ignored. Here we argue, using both a Bayesian and a non-Bayesian analysis, that in some situations you are better off ignoring information if your uncertainty is represented by a set of probability measures. These include situations in which the information is relevant for the prediction task at hand. In the non-Bayesian analysis, we show how ignoring information avoids dilation, the phenomenon that additional pieces of information sometimes lead to an increase in uncertainty. In the Bayesian analysis, we show that for small sample sizes and certain prediction tasks, the Bayesian posterior based on a noninformative prior yields worse predictions than simply ignoring the given information.<|reference_end|>
arxiv
@article{grunwald2005when, title={When Ignorance is Bliss}, author={Peter D. Grunwald and Joseph Y. Halpern}, journal={arXiv preprint arXiv:cs/0510080}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510080}, primaryClass={cs.AI cs.LG} }
grunwald2005when
arxiv-673452
cs/0510081
Virtual Environments for multiphysics code validation on Computing Grids
<|reference_start|>Virtual Environments for multiphysics code validation on Computing Grids: We advocate in this paper the use of grid-based infrastructures that are designed for seamless approaches to the numerical expert users, i.e., the multiphysics applications designers. It relies on sophisticated computing environments based on computing grids, connecting heterogeneous computing resources: mainframes, PC-clusters and workstations running multiphysics codes and utility software, e.g., visualization tools. The approach is based on concepts defined by the HEAVEN* consortium. HEAVEN is a European scientific consortium including industrial partners from the aerospace, telecommunication and software industries, as well as academic research institutes. Currently, the HEAVEN consortium works on a project that aims to create advanced services platforms. It is intended to enable "virtual private grids" supporting various environments for users manipulating a suitable high-level interface. This will become the basis for future generalized services allowing the integration of various services without the need to deploy specific grid infrastructures.<|reference_end|>
arxiv
@article{nguyen2005virtual, title={Virtual Environments for multiphysics code validation on Computing Grids}, author={Toan Nguyen (INRIA Sophia Antipolis / INRIA Rh^one-Alpes), Lizhe Wang (INRIA Sophia Antipolis / INRIA Rh^one-Alpes), Vittorio Selmin (ALENIA Aeronautica)}, journal={arXiv preprint arXiv:cs/0510081}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510081}, primaryClass={cs.DC} }
nguyen2005virtual
arxiv-673453
cs/0510082
Architectural Considerations for a Self-Configuring Routing Scheme for Spontaneous Networks
<|reference_start|>Architectural Considerations for a Self-Configuring Routing Scheme for Spontaneous Networks: Decoupling the permanent identifier of a node from the node's topology-dependent address is a promising approach toward completely scalable self-organizing networks. A group of proposals that have adopted such an approach use the same structure to: address nodes, perform routing, and implement location service. In this way, the consistency of the routing protocol relies on the coherent sharing of the addressing space among all nodes in the network. Such proposals use a logical tree-like structure where routes in this space correspond to routes in the physical level. The advantage of tree-like spaces is that it allows for simple address assignment and management. Nevertheless, it has low route selection flexibility, which results in low routing performance and poor resilience to failures. In this paper, we propose to increase the number of paths using incomplete hypercubes. The design of more complex structures, like multi-dimensional Cartesian spaces, improves the resilience and routing performance due to the flexibility in route selection. We present a framework for using hypercubes to implement indirect routing. This framework allows to give a solution adapted to the dynamics of the network, providing a proactive and reactive routing protocols, our major contributions. We show that, contrary to traditional approaches, our proposal supports more dynamic networks and is more robust to node failures.<|reference_end|>
arxiv
@article{alvarez-hamelin2005architectural, title={Architectural Considerations for a Self-Configuring Routing Scheme for Spontaneous Networks}, author={Jos'e Ignacio Alvarez-Hamelin (LPT, FIUBA), Aline Carneiro Viana (IRISA / INRIA Rennes), Marcelo Dias De Amorim (LIP6)}, journal={arXiv preprint arXiv:cs/0510082}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510082}, primaryClass={cs.NI} }
alvarez-hamelin2005architectural
arxiv-673454
cs/0510083
Neuronal Spectral Analysis of EEG and Expert Knowledge Integration for Automatic Classification of Sleep Stages
<|reference_start|>Neuronal Spectral Analysis of EEG and Expert Knowledge Integration for Automatic Classification of Sleep Stages: Being able to analyze and interpret signal coming from electroencephalogram (EEG) recording can be of high interest for many applications including medical diagnosis and Brain-Computer Interfaces. Indeed, human experts are today able to extract from this signal many hints related to physiological as well as cognitive states of the recorded subject and it would be very interesting to perform such task automatically but today no completely automatic system exists. In previous studies, we have compared human expertise and automatic processing tools, including artificial neural networks (ANN), to better understand the competences of each and determine which are the difficult aspects to integrate in a fully automatic system. In this paper, we bring more elements to that study in reporting the main results of a practical experiment which was carried out in an hospital for sleep pathology study. An EEG recording was studied and labeled by a human expert and an ANN. We describe here the characteristics of the experiment, both human and neuronal procedure of analysis, compare their performances and point out the main limitations which arise from this study.<|reference_end|>
arxiv
@article{kerkeni2005neuronal, title={Neuronal Spectral Analysis of EEG and Expert Knowledge Integration for Automatic Classification of Sleep Stages}, author={Nizar Kerkeni (TIM), Frederic Alexandre (CORTEX), Mohamed Hedi Bedoui (TIM), Laurent Bougrain (CORTEX), Mohamed Dogui (SAHLOUL)}, journal={arXiv preprint arXiv:cs/0510083}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510083}, primaryClass={cs.AI} }
kerkeni2005neuronal
arxiv-673455
cs/0510084
R\'eflexions sur la question fr\'equentielle en traitement du signal
<|reference_start|>R\'eflexions sur la question fr\'equentielle en traitement du signal: New definitions are suggested for frequencies which may be instantaneous or not. The Heisenberg-Gabor inequality and the Shannon sampling theorem are briefly discussed.<|reference_end|>
arxiv
@article{fliess2005r\'{e}flexions, title={R\'{e}flexions sur la question fr\'{e}quentielle en traitement du signal}, author={Michel Fliess}, journal={arXiv preprint arXiv:cs/0510084}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510084}, primaryClass={cs.CE cs.IR math-ph math.MP math.SP} }
fliess2005r\'{e}flexions
arxiv-673456
cs/0510085
Canonical time-frequency, time-scale, and frequency-scale representations of time-varying channels
<|reference_start|>Canonical time-frequency, time-scale, and frequency-scale representations of time-varying channels: Mobile communication channels are often modeled as linear time-varying filters or, equivalently, as time-frequency integral operators with finite support in time and frequency. Such a characterization inherently assumes the signals are narrowband and may not be appropriate for wideband signals. In this paper time-scale characterizations are examined that are useful in wideband time-varying channels, for which a time-scale integral operator is physically justifiable. A review of these time-frequency and time-scale characterizations is presented. Both the time-frequency and time-scale integral operators have a two-dimensional discrete characterization which motivates the design of time-frequency or time-scale rake receivers. These receivers have taps for both time and frequency (or time and scale) shifts of the transmitted signal. A general theory of these characterizations which generates, as specific cases, the discrete time-frequency and time-scale models is presented here. The interpretation of these models, namely, that they can be seen to arise from processing assumptions on the transmit and receive waveforms is discussed. Out of this discussion a third model arises: a frequency-scale continuous channel model with an associated discrete frequency-scale characterization.<|reference_end|>
arxiv
@article{rickard2005canonical, title={Canonical time-frequency, time-scale, and frequency-scale representations of time-varying channels}, author={Scott T. Rickard, Radu V. Balan, H. Vincent Poor, Sergio Verdu}, journal={arXiv preprint arXiv:cs/0510085}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510085}, primaryClass={cs.IT math.IT} }
rickard2005canonical
arxiv-673457
cs/0510086
Balanced Allocation on Graphs
<|reference_start|>Balanced Allocation on Graphs: In this paper, we study the two choice balls and bins process when balls are not allowed to choose any two random bins, but only bins that are connected by an edge in an underlying graph. We show that for $n$ balls and $n$ bins, if the graph is almost regular with degree $n^\epsilon$, where $\epsilon$ is not too small, the previous bounds on the maximum load continue to hold. Precisely, the maximum load is $\log \log n + O(1/\epsilon) + O(1)$. For general $\Delta$-regular graphs, we show that the maximum load is $\log\log n + O(\frac{\log n}{\log (\Delta/\log^4 n)}) + O(1)$ and also provide an almost matching lower bound of $\log \log n + \frac{\log n}{\log (\Delta \log n)}$. V{\"o}cking [Voc99] showed that the maximum bin size with $d$ choice load balancing can be further improved to $O(\log\log n /d)$ by breaking ties to the left. This requires $d$ random bin choices. We show that such bounds can be achieved by making only two random accesses and querying $d/2$ contiguous bins in each access. By grouping a sequence of $n$ bins into $2n/d$ groups, each of $d/2$ consecutive bins, if each ball chooses two groups at random and inserts the new ball into the least-loaded bin in the lesser loaded group, then the maximum load is $O(\log\log n/d)$ with high probability.<|reference_end|>
arxiv
@article{kenthapadi2005balanced, title={Balanced Allocation on Graphs}, author={K. Kenthapadi and R. Panigrahy}, journal={arXiv preprint arXiv:cs/0510086}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510086}, primaryClass={cs.DS} }
kenthapadi2005balanced
arxiv-673458
cs/0510087
MathPSfrag: Creating Publication-Quality Labels in Mathematica Plots
<|reference_start|>MathPSfrag: Creating Publication-Quality Labels in Mathematica Plots: This article introduces a Mathematica package providing a graphics export function that automatically replaces Mathematica expressions in a graphic by the corresponding LaTeX constructs and positions them correctly. It thus facilitates the creation of publication-quality Enscapulated PostScript (EPS) graphics.<|reference_end|>
arxiv
@article{grosse2005mathpsfrag:, title={MathPSfrag: Creating Publication-Quality Labels in Mathematica Plots}, author={J. Grosse}, journal={arXiv preprint arXiv:cs/0510087}, year={2005}, number={LMU-ASC 70/05; MPP-2005-126}, archivePrefix={arXiv}, eprint={cs/0510087}, primaryClass={cs.GR} }
grosse2005mathpsfrag:
arxiv-673459
cs/0510088
Lower bounds on Locality Sensitive Hashing
<|reference_start|>Lower bounds on Locality Sensitive Hashing: Given a metric space $(X,d_X)$, $c\ge 1$, $r>0$, and $p,q\in [0,1]$, a distribution over mappings $\h:X\to \mathbb N$ is called a $(r,cr,p,q)$-sensitive hash family if any two points in $X$ at distance at most $r$ are mapped by $\h$ to the same value with probability at least $p$, and any two points at distance greater than $cr$ are mapped by $\h$ to the same value with probability at most $q$. This notion was introduced by Indyk and Motwani in 1998 as the basis for an efficient approximate nearest neighbor search algorithm, and has since been used extensively for this purpose. The performance of these algorithms is governed by the parameter $\rho=\frac{\log(1/p)}{\log(1/q)}$, and constructing hash families with small $\rho$ automatically yields improved nearest neighbor algorithms. Here we show that for $X=\ell_1$ it is impossible to achieve $\rho\le \frac{1}{2c}$. This almost matches the construction of Indyk and Motwani which achieves $\rho\le \frac{1}{c}$.<|reference_end|>
arxiv
@article{motwani2005lower, title={Lower bounds on Locality Sensitive Hashing}, author={Rajeev Motwani and Assaf Naor and Rina Panigrahy}, journal={arXiv preprint arXiv:cs/0510088}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510088}, primaryClass={cs.CG} }
motwani2005lower
arxiv-673460
cs/0510089
Automata-based adaptive behavior for economic modeling using game theory
<|reference_start|>Automata-based adaptive behavior for economic modeling using game theory: In this paper, we deal with some specific domains of applications to game theory. This is one of the major class of models in the new approaches of modelling in the economic domain. For that, we use genetic automata which allow to buid adaptive strategies for the players. We explain how the automata-based formalism proposed - matrix representation of automata with multiplicities - allows to define a semi-distance between the strategy behaviors. With that tools, we are able to generate an automatic processus to compute emergent systems of entities whose behaviors are represented by these genetic automata.<|reference_end|>
arxiv
@article{ghnemat2005automata-based, title={Automata-based adaptive behavior for economic modeling using game theory}, author={Rawan Ghnemat (IT), Khalaf Khatatneh (IT), Saleh Oqeili (IT), Cyrille Bertelle (LIH), G'erard Henry Edmond Duchamp (LIPN)}, journal={Dans EPNADS'05 within ECCS'05 - Emergent Properties in Natural and Artificial Dynamical Systems, Paris : France (2005)}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510089}, primaryClass={cs.MA cs.DM} }
ghnemat2005automata-based
arxiv-673461
cs/0510090
A simple effective method for curvatures estimation on triangular meshes
<|reference_start|>A simple effective method for curvatures estimation on triangular meshes: To definite and compute differential invariants, like curvatures, for triangular meshes (or polyhedral surfaces) is a key problem in CAGD and the computer vision. The Gaussian curvature and the mean curvature are determined by the differential of the Gauss map of the underlying surface. The Gauss map assigns to each point in the surface the unit normal vector of the tangent plane to the surface at this point. We follow the ideas developed in Chen and Wu \cite{Chen2}(2004) and Wu, Chen and Chi\cite{Wu}(2005) to describe a new and simple approach to estimate the differential of the Gauss map and curvatures from the viewpoint of the gradient and the centroid weights. This will give us a much better estimation of curvatures than Taubin's algorithm \cite{Taubin} (1995).<|reference_end|>
arxiv
@article{wu2005a, title={A simple effective method for curvatures estimation on triangular meshes}, author={Jyh-Yang Wu, Sheng-Gwo Chen and Mei-Hsiu Chi}, journal={arXiv preprint arXiv:cs/0510090}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510090}, primaryClass={cs.CG} }
wu2005a
arxiv-673462
cs/0510091
An efficient memetic, permutation-based evolutionary algorithm for real-world train timetabling
<|reference_start|>An efficient memetic, permutation-based evolutionary algorithm for real-world train timetabling: Train timetabling is a difficult and very tightly constrained combinatorial problem that deals with the construction of train schedules. We focus on the particular problem of local reconstruction of the schedule following a small perturbation, seeking minimisation of the total accumulated delay by adapting times of departure and arrival for each train and allocation of resources (tracks, routing nodes, etc.). We describe a permutation-based evolutionary algorithm that relies on a semi-greedy heuristic to gradually reconstruct the schedule by inserting trains one after the other following the permutation. This algorithm can be hybridised with ILOG commercial MIP programming tool CPLEX in a coarse-grained manner: the evolutionary part is used to quickly obtain a good but suboptimal solution and this intermediate solution is refined using CPLEX. Experimental results are presented on a large real-world case involving more than one million variables and 2 million constraints. Results are surprisingly good as the evolutionary algorithm, alone or hybridised, produces excellent solutions much faster than CPLEX alone.<|reference_end|>
arxiv
@article{schoenauer2005an, title={An efficient memetic, permutation-based evolutionary algorithm for real-world train timetabling}, author={Marc Schoenauer (INRIA Futurs), Yann Semet (INRIA Futurs)}, journal={arXiv preprint arXiv:cs/0510091}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510091}, primaryClass={cs.AI} }
schoenauer2005an
arxiv-673463
cs/0510092
Context Semantics, Linear Logic and Computational Complexity
<|reference_start|>Context Semantics, Linear Logic and Computational Complexity: We show that context semantics can be fruitfully applied to the quantitative analysis of proof normalization in linear logic. In particular, context semantics lets us define the weight of a proof-net as a measure of its inherent complexity: it is both an upper bound to normalization time (modulo a polynomial overhead, independently on the reduction strategy) and a lower bound to the number of steps to normal form (for certain reduction strategies). Weights are then exploited in proving strong soundness theorems for various subsystems of linear logic, namely elementary linear logic, soft linear logic and light linear logic.<|reference_end|>
arxiv
@article{lago2005context, title={Context Semantics, Linear Logic and Computational Complexity}, author={Ugo Dal Lago}, journal={arXiv preprint arXiv:cs/0510092}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510092}, primaryClass={cs.LO cs.CC} }
lago2005context
arxiv-673464
cs/0510093
ParFORM: recent development
<|reference_start|>ParFORM: recent development: We report on the status of our project of parallelization of the symbolic manipulation program FORM. We have now parallel versions of FORM running on Cluster- or SMP-architectures. These versions can be used to run arbitrary FORM programs in parallel.<|reference_end|>
arxiv
@article{tentyukov2005parform:, title={ParFORM: recent development}, author={M.Tentyukov, J.A.M.Vermaseren and H.M.Staudenmaier}, journal={arXiv preprint arXiv:cs/0510093}, year={2005}, doi={10.1016/j.nima.2005.11.142}, archivePrefix={arXiv}, eprint={cs/0510093}, primaryClass={cs.SC} }
tentyukov2005parform:
arxiv-673465
cs/0510094
Big Science with a Small Budget: Non-Embarrassingly Parallel Applications in a Non-Dedicated Network of Workstations
<|reference_start|>Big Science with a Small Budget: Non-Embarrassingly Parallel Applications in a Non-Dedicated Network of Workstations: Many astronomers and astrophysicists require large computing resources for their research, which are usually obtained via dedicated (and expensive) parallel machines. Depending on the type of the problem to be solved, an alternative solution can be provided by creating dynamically a computer cluster out of non-dedicated workstations using the Condor High Throughput Computing System and the Master-Worker (MW) framework. As an example of this we show in this paper how a radiative transfer application previously coded with MPI is solved using this solution without the need for dedicated machines.<|reference_end|>
arxiv
@article{de vicente2005big, title={Big Science with a Small Budget: Non-Embarrassingly Parallel Applications in a Non-Dedicated Network of Workstations}, author={Angel de Vicente, Nayra Rodriguez}, journal={arXiv preprint arXiv:cs/0510094}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510094}, primaryClass={cs.DC astro-ph} }
de vicente2005big
arxiv-673466
cs/0510095
Rate Region of the Quadratic Gaussian Two-Encoder Source-Coding Problem
<|reference_start|>Rate Region of the Quadratic Gaussian Two-Encoder Source-Coding Problem: We determine the rate region of the quadratic Gaussian two-encoder source-coding problem. This rate region is achieved by a simple architecture that separates the analog and digital aspects of the compression. Furthermore, this architecture requires higher rates to send a Gaussian source than it does to send any other source with the same covariance. Our techniques can also be used to determine the sum rate of some generalizations of this classical problem. Our approach involves coupling the problem to a quadratic Gaussian ``CEO problem.''<|reference_end|>
arxiv
@article{wagner2005rate, title={Rate Region of the Quadratic Gaussian Two-Encoder Source-Coding Problem}, author={Aaron B. Wagner, Saurabha Tavildar, and Pramod Viswanath}, journal={arXiv preprint arXiv:cs/0510095}, year={2005}, archivePrefix={arXiv}, eprint={cs/0510095}, primaryClass={cs.IT math.IT} }
wagner2005rate
arxiv-673467
cs/0511001
Capacity with Causal and Non-Causal Side Information - A Unified View
<|reference_start|>Capacity with Causal and Non-Causal Side Information - A Unified View: We identify the common underlying form of the capacity expression that is applicable to both cases where causal or non-causal side information is made available to the transmitter. Using this common form we find that for the single user channel, the multiple access channel, the degraded broadcast channel, and the degraded relay channel, the sum capacity with causal and non-causal side information are identical when all the transmitter side information is also made available to all the receivers. A genie-aided outerbound is developed that states that when a genie provides $n$ bits of side information to a receiver the resulting capacity improvement can not be more than $n$ bits. Combining these two results we are able to bound the relative capacity advantage of non-causal side information over causal side information for both single user as well as various multiple user communication scenarios. Applications of these capacity bounds are demonstrated through examples of random access channels. Interestingly, the capacity results indicate that the excessive MAC layer overheads common in present wireless systems may be avoided through coding across multiple access blocks. It is also shown that even one bit of side information at the transmitter can result in unbounded capacity improvement. As a side, we obtain the sum capacity for a multiple access channel when the side information available to the transmitter is causal and possibly correlated to the side information available to the receiver.<|reference_end|>
arxiv
@article{jafar2005capacity, title={Capacity with Causal and Non-Causal Side Information - A Unified View}, author={Syed A. Jafar}, journal={arXiv preprint arXiv:cs/0511001}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511001}, primaryClass={cs.IT math.IT} }
jafar2005capacity
arxiv-673468
cs/0511002
Bibliographic Classification using the ADS Databases
<|reference_start|>Bibliographic Classification using the ADS Databases: We discuss two techniques used to characterize bibliographic records based on their similarity to and relationship with the contents of the NASA Astrophysics Data System (ADS) databases. The first method has been used to classify input text as being relevant to one or more subject areas based on an analysis of the frequency distribution of its individual words. The second method has been used to classify existing records as being relevant to one or more databases based on the distribution of the papers citing them. Both techniques have proven to be valuable tools in assigning new and existing bibliographic records to different disciplines within the ADS databases.<|reference_end|>
arxiv
@article{accomazzi2005bibliographic, title={Bibliographic Classification using the ADS Databases}, author={Alberto Accomazzi, Michael J. Kurtz, Guenther Eichhorn, Edwin Henneken, Carolyn S. Grant, Markus Demleitner, Stephen S. Murray}, journal={arXiv preprint arXiv:cs/0511002}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511002}, primaryClass={cs.IR cs.DL} }
accomazzi2005bibliographic
arxiv-673469
cs/0511003
Optimal Prefix Codes for Infinite Alphabets with Nonlinear Costs
<|reference_start|>Optimal Prefix Codes for Infinite Alphabets with Nonlinear Costs: Let $P = \{p(i)\}$ be a measure of strictly positive probabilities on the set of nonnegative integers. Although the countable number of inputs prevents usage of the Huffman algorithm, there are nontrivial $P$ for which known methods find a source code that is optimal in the sense of minimizing expected codeword length. For some applications, however, a source code should instead minimize one of a family of nonlinear objective functions, $\beta$-exponential means, those of the form $\log_a \sum_i p(i) a^{n(i)}$, where $n(i)$ is the length of the $i$th codeword and $a$ is a positive constant. Applications of such minimizations include a novel problem of maximizing the chance of message receipt in single-shot communications ($a<1$) and a previously known problem of minimizing the chance of buffer overflow in a queueing system ($a>1$). This paper introduces methods for finding codes optimal for such exponential means. One method applies to geometric distributions, while another applies to distributions with lighter tails. The latter algorithm is applied to Poisson distributions and both are extended to alphabetic codes, as well as to minimizing maximum pointwise redundancy. The aforementioned application of minimizing the chance of buffer overflow is also considered.<|reference_end|>
arxiv
@article{baer2005optimal, title={Optimal Prefix Codes for Infinite Alphabets with Nonlinear Costs}, author={Michael B. Baer}, journal={arXiv preprint arXiv:cs/0511003}, year={2005}, doi={10.1109/TIT.2007.915696}, archivePrefix={arXiv}, eprint={cs/0511003}, primaryClass={cs.IT cs.DS math.IT} }
baer2005optimal
arxiv-673470
cs/0511004
Evolutionary Computing
<|reference_start|>Evolutionary Computing: Evolutionary computing (EC) is an exciting development in Computer Science. It amounts to building, applying and studying algorithms based on the Darwinian principles of natural selection. In this paper we briefly introduce the main concepts behind evolutionary computing. We present the main components all evolutionary algorithms (EA), sketch the differences between different types of EAs and survey application areas ranging from optimization, modeling and simulation to entertainment.<|reference_end|>
arxiv
@article{eiben2005evolutionary, title={Evolutionary Computing}, author={Aguston E. Eiben (VU), Marc Schoenauer (FRACTALES)}, journal={arXiv preprint arXiv:cs/0511004}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511004}, primaryClass={cs.AI} }
eiben2005evolutionary
arxiv-673471
cs/0511005
The egalitarian effect of search engines
<|reference_start|>The egalitarian effect of search engines: Search engines have become key media for our scientific, economic, and social activities by enabling people to access information on the Web in spite of its size and complexity. On the down side, search engines bias the traffic of users according to their page-ranking strategies, and some have argued that they create a vicious cycle that amplifies the dominance of established and already popular sites. We show that, contrary to these prior claims and our own intuition, the use of search engines actually has an egalitarian effect. We reconcile theoretical arguments with empirical evidence showing that the combination of retrieval by search engines and search behavior by users mitigates the attraction of popular pages, directing more traffic toward less popular sites, even in comparison to what would be expected from users randomly surfing the Web.<|reference_end|>
arxiv
@article{fortunato2005the, title={The egalitarian effect of search engines}, author={Santo Fortunato, Alessandro Flammini, Filippo Menczer, Alessandro Vespignani}, journal={arXiv preprint arXiv:cs/0511005}, year={2005}, doi={10.1073/pnas.0605525103}, archivePrefix={arXiv}, eprint={cs/0511005}, primaryClass={cs.CY cs.IR physics.soc-ph} }
fortunato2005the
arxiv-673472
cs/0511006
Logical Relations for Monadic Types
<|reference_start|>Logical Relations for Monadic Types: Logical relations and their generalizations are a fundamental tool in proving properties of lambda-calculi, e.g., yielding sound principles for observational equivalence. We propose a natural notion of logical relations able to deal with the monadic types of Moggi's computational lambda-calculus. The treatment is categorical, and is based on notions of subsconing, mono factorization systems, and monad morphisms. Our approach has a number of interesting applications, including cases for lambda-calculi with non-determinism (where being in logical relation means being bisimilar), dynamic name creation, and probabilistic systems.<|reference_end|>
arxiv
@article{goubault-larrecq2005logical, title={Logical Relations for Monadic Types}, author={Jean Goubault-Larrecq, Slawomir Lasota and David Nowak}, journal={Mathematical Structures in Computer Science, 18(6):1169-1217, December 2008}, year={2005}, doi={10.1017/S0960129508007172}, archivePrefix={arXiv}, eprint={cs/0511006}, primaryClass={cs.LO} }
goubault-larrecq2005logical
arxiv-673473
cs/0511007
K-core decomposition of Internet graphs: hierarchies, self-similarity and measurement biases
<|reference_start|>K-core decomposition of Internet graphs: hierarchies, self-similarity and measurement biases: We consider the $k$-core decomposition of network models and Internet graphs at the autonomous system (AS) level. The $k$-core analysis allows to characterize networks beyond the degree distribution and uncover structural properties and hierarchies due to the specific architecture of the system. We compare the $k$-core structure obtained for AS graphs with those of several network models and discuss the differences and similarities with the real Internet architecture. The presence of biases and the incompleteness of the real maps are discussed and their effect on the $k$-core analysis is assessed with numerical experiments simulating biased exploration on a wide range of network models. We find that the $k$-core analysis provides an interesting characterization of the fluctuations and incompleteness of maps as well as information helping to discriminate the original underlying structure.<|reference_end|>
arxiv
@article{alvarez-hamelin2005k-core, title={K-core decomposition of Internet graphs: hierarchies, self-similarity and measurement biases}, author={Jos'e Ignacio Alvarez-Hamelin (LPT), Luca Dall'Asta (LPT), Alain Barrat (LPT), Alessandro Vespignani}, journal={Networks and Heterogeneous Media 3 (2008) 371}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511007}, primaryClass={cs.NI cond-mat.stat-mech} }
alvarez-hamelin2005k-core
arxiv-673474
cs/0511008
Analysis of Stochastic Service Guarantees in Communication Networks: A Basic Calculus
<|reference_start|>Analysis of Stochastic Service Guarantees in Communication Networks: A Basic Calculus: A basic calculus is presented for stochastic service guarantee analysis in communication networks. Central to the calculus are two definitions, maximum-(virtual)-backlog-centric (m.b.c) stochastic arrival curve and stochastic service curve, which respectively generalize arrival curve and service curve in the deterministic network calculus framework. With m.b.c stochastic arrival curve and stochastic service curve, various basic results are derived under the (min, +) algebra for the general case analysis, which are crucial to the development of stochastic network calculus. These results include (i) superposition of flows, (ii) concatenation of servers, (iii) output characterization, (iv) per-flow service under aggregation, and (v) stochastic backlog and delay guarantees. In addition, to perform independent case analysis, stochastic strict server is defined, which uses an ideal service process and an impairment process to characterize a server. The concept of stochastic strict server not only allows us to improve the basic results (i) -- (v) under the independent case, but also provides a convenient way to find the stochastic service curve of a serve. Moreover, an approach is introduced to find the m.b.c stochastic arrival curve of a flow and the stochastic service curve of a server.<|reference_end|>
arxiv
@article{jiang2005analysis, title={Analysis of Stochastic Service Guarantees in Communication Networks: A Basic Calculus}, author={Yuming Jiang}, journal={arXiv preprint arXiv:cs/0511008}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511008}, primaryClass={cs.PF cs.IT cs.NI math.IT} }
jiang2005analysis
arxiv-673475
cs/0511009
Mismatched codebooks and the role of entropy-coding in lossy data compression
<|reference_start|>Mismatched codebooks and the role of entropy-coding in lossy data compression: We introduce a universal quantization scheme based on random coding, and we analyze its performance. This scheme consists of a source-independent random codebook (typically_mismatched_ to the source distribution), followed by optimal entropy-coding that is_matched_ to the quantized codeword distribution. A single-letter formula is derived for the rate achieved by this scheme at a given distortion, in the limit of large codebook dimension. The rate reduction due to entropy-coding is quantified, and it is shown that it can be arbitrarily large. In the special case of "almost uniform" codebooks (e.g., an i.i.d. Gaussian codebook with large variance) and difference distortion measures, a novel connection is drawn between the compression achieved by the present scheme and the performance of "universal" entropy-coded dithered lattice quantizers. This connection generalizes the "half-a-bit" bound on the redundancy of dithered lattice quantizers. Moreover, it demonstrates a strong notion of universality where a single "almost uniform" codebook is near-optimal for_any_ source and_any_ difference distortion measure.<|reference_end|>
arxiv
@article{kontoyiannis2005mismatched, title={Mismatched codebooks and the role of entropy-coding in lossy data compression}, author={Ioannis Kontoyiannis (Athens U of Econ & Business) and Rami Zamir (Tel-Aviv University)}, journal={arXiv preprint arXiv:cs/0511009}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511009}, primaryClass={cs.IT math.IT math.PR} }
kontoyiannis2005mismatched
arxiv-673476
cs/0511010
A Survey of Virtualization Techniques Focusing on Secure On-Demand Cluster Computing
<|reference_start|>A Survey of Virtualization Techniques Focusing on Secure On-Demand Cluster Computing: Virtualization, a technique once used to multiplex the resources of high-priced mainframe hardware, is seeing a resurgence in applicability with the increasing computing power of commodity computers. By inserting a layer of software between the machine and traditional operating systems, this technology allows access to a shared computing medium in a manner that is secure, resource-controlled, and efficient. These properties are attractive in the field of on-demand computing, where the fine-grained subdivision of resources provided by virtualized systems allows potentially higher utilization of computing resources. It this work, we survey a number of virtual machine systems with the goal of finding an appropriate candidate to serve as the basis for the On-Demand Secure Cluster Computing project at the National Center for Supercomputing Applications. Contenders are reviewed on a number of desirable properties including portability and security. We conclude with a comparison and justification of our choice.<|reference_end|>
arxiv
@article{kiyanclar2005a, title={A Survey of Virtualization Techniques Focusing on Secure On-Demand Cluster Computing}, author={Nadir Kiyanclar}, journal={arXiv preprint arXiv:cs/0511010}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511010}, primaryClass={cs.OS} }
kiyanclar2005a
arxiv-673477
cs/0511011
The Impact of Social Networks on Multi-Agent Recommender Systems
<|reference_start|>The Impact of Social Networks on Multi-Agent Recommender Systems: Awerbuch et al.'s approach to distributed recommender systems (DRSs) is to have agents sample products at random while randomly querying one another for the best item they have found; we improve upon this by adding a communication network. Agents can only communicate with their immediate neighbors in the network, but neighboring agents may or may not represent users with common interests. We define two network structures: in the ``mailing-list model,'' agents representing similar users form cliques, while in the ``word-of-mouth model'' the agents are distributed randomly in a scale-free network (SFN). In both models, agents tell their neighbors about satisfactory products as they are found. In the word-of-mouth model, knowledge of items propagates only through interested agents, and the SFN parameters affect the system's performance. We include a summary of our new results on the character and parameters of random subgraphs of SFNs, in particular SFNs with power-law degree distributions down to minimum degree 1. These networks are not as resilient as Cohen et al. originally suggested. In the case of the widely-cited ``Internet resilience'' result, high failure rates actually lead to the orphaning of half of the surviving nodes after 60% of the network has failed and the complete disintegration of the network at 90%. We show that given an appropriate network, the communication network reduces the number of sampled items, the number of messages sent, and the amount of ``spam.'' We conclude that in many cases DRSs will be useful for sharing information in a multi-agent learning system.<|reference_end|>
arxiv
@article{link2005the, title={The Impact of Social Networks on Multi-Agent Recommender Systems}, author={Hamilton Link and Jared Saia and Terran Lane and Randall A. LaViolette}, journal={arXiv preprint arXiv:cs/0511011}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511011}, primaryClass={cs.LG cs.CC cs.MA} }
link2005the
arxiv-673478
cs/0511012
Parameters Affecting the Resilience of Scale-Free Networks to Random Failures
<|reference_start|>Parameters Affecting the Resilience of Scale-Free Networks to Random Failures: It is commonly believed that scale-free networks are robust to massive numbers of random node deletions. For example, Cohen et al. study scale-free networks including some which approximate the measured degree distribution of the Internet. Their results suggest that if each node in this network failed independently with probability 0.99, the remaining network would continue to have a giant component. In this paper, we show that a large and important subclass of scale-free networks are not robust to massive numbers of random node deletions for practical purposes. In particular, we study finite scale-free networks which have minimum node degree of 1 and a power-law degree distribution beginning with nodes of degree 1 (power-law networks). We show that, in a power-law network approximating the Internet's reported distribution, when the probability of deletion of each node is 0.5 only about 25% of the surviving nodes in the network remain connected in a giant component, and the giant component does not persist beyond a critical failure rate of 0.9. The new result is partially due to improved analytical accommodation of the large number of degree-0 nodes that result after node deletions. Our results apply to finite power-law networks with a wide range of power-law exponents, including Internet-like networks. We give both analytical and empirical evidence that such networks are not generally robust to massive random node deletions.<|reference_end|>
arxiv
@article{link2005parameters, title={Parameters Affecting the Resilience of Scale-Free Networks to Random Failures}, author={Hamilton Link and Randall A. LaViolette and Jared Saia and Terran Lane}, journal={arXiv preprint arXiv:cs/0511012}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511012}, primaryClass={cs.NI cs.AR cs.MA} }
link2005parameters
arxiv-673479
cs/0511013
K-ANMI: A Mutual Information Based Clustering Algorithm for Categorical Data
<|reference_start|>K-ANMI: A Mutual Information Based Clustering Algorithm for Categorical Data: Clustering categorical data is an integral part of data mining and has attracted much attention recently. In this paper, we present k-ANMI, a new efficient algorithm for clustering categorical data. The k-ANMI algorithm works in a way that is similar to the popular k-means algorithm, and the goodness of clustering in each step is evaluated using a mutual information based criterion (namely, Average Normalized Mutual Information-ANMI) borrowed from cluster ensemble. Experimental results on real datasets show that k-ANMI algorithm is competitive with those state-of-art categorical data clustering algorithms with respect to clustering accuracy.<|reference_end|>
arxiv
@article{he2005k-anmi:, title={K-ANMI: A Mutual Information Based Clustering Algorithm for Categorical Data}, author={Zengyou He, Xiaofei Xu, Shengchun Deng}, journal={arXiv preprint arXiv:cs/0511013}, year={2005}, number={Tr-2004-03}, archivePrefix={arXiv}, eprint={cs/0511013}, primaryClass={cs.AI cs.DB} }
he2005k-anmi:
arxiv-673480
cs/0511014
Flat and One-Variable Clauses: Complexity of Verifying Cryptographic Protocols with Single Blind Copying
<|reference_start|>Flat and One-Variable Clauses: Complexity of Verifying Cryptographic Protocols with Single Blind Copying: Cryptographic protocols with single blind copying were defined and modeled by Comon and Cortier using the new class $\mathcal C$ of first order clauses. They showed its satisfiability problem to be in 3-DEXPTIME. We improve this result by showing that satisfiability for this class is NEXPTIME-complete, using new resolution techniques. We show satisfiability to be DEXPTIME-complete if clauses are Horn, which is what is required for modeling cryptographic protocols. While translation to Horn clauses only gives a DEXPTIME upper bound for the secrecy problem for these protocols, we further show that this secrecy problem is actually DEXPTIME-complete.<|reference_end|>
arxiv
@article{seidl2005flat, title={Flat and One-Variable Clauses: Complexity of Verifying Cryptographic Protocols with Single Blind Copying}, author={Helmut Seidl, Kumar Neeraj Verma}, journal={arXiv preprint arXiv:cs/0511014}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511014}, primaryClass={cs.LO cs.CR} }
seidl2005flat
arxiv-673481
cs/0511015
Towards a Hierarchical Model of Consciousness, Intelligence, Mind and Body
<|reference_start|>Towards a Hierarchical Model of Consciousness, Intelligence, Mind and Body: This article is taken out.<|reference_end|>
arxiv
@article{prashant2005towards, title={Towards a Hierarchical Model of Consciousness, Intelligence, Mind and Body}, author={Prashant}, journal={arXiv preprint arXiv:cs/0511015}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511015}, primaryClass={cs.AI} }
prashant2005towards
arxiv-673482
cs/0511016
How to make the top ten: Approximating PageRank from in-degree
<|reference_start|>How to make the top ten: Approximating PageRank from in-degree: PageRank has become a key element in the success of search engines, allowing to rank the most important hits in the top screen of results. One key aspect that distinguishes PageRank from other prestige measures such as in-degree is its global nature. From the information provider perspective, this makes it difficult or impossible to predict how their pages will be ranked. Consequently a market has emerged for the optimization of search engine results. Here we study the accuracy with which PageRank can be approximated by in-degree, a local measure made freely available by search engines. Theoretical and empirical analyses lead to conclude that given the weak degree correlations in the Web link graph, the approximation can be relatively accurate, giving service and information providers an effective new marketing tool.<|reference_end|>
arxiv
@article{fortunato2005how, title={How to make the top ten: Approximating PageRank from in-degree}, author={Santo Fortunato, Marian Boguna, Alessandro Flammini, Filippo Menczer}, journal={arXiv preprint arXiv:cs/0511016}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511016}, primaryClass={cs.IR physics.soc-ph} }
fortunato2005how
arxiv-673483
cs/0511017
Short Quantum Games
<|reference_start|>Short Quantum Games: In this thesis we introduce quantum refereed games, which are quantum interactive proof systems with two competing provers. We focus on a restriction of this model that we call "short quantum games" and we prove an upper bound and a lower bound on the expressive power of these games. For the lower bound, we prove that every language having an ordinary quantum interactive proof system also has a short quantum game. An important part of this proof is the establishment of a quantum measurement that reliably distinguishes between quantum states chosen from disjoint convex sets. For the upper bound, we show that certain types of quantum refereed games, including short quantum games, are decidable in deterministic exponential time by supplying a separation oracle for use with the ellipsoid method for convex feasibility.<|reference_end|>
arxiv
@article{gutoski2005short, title={Short Quantum Games}, author={Gus Gutoski}, journal={arXiv preprint arXiv:cs/0511017}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511017}, primaryClass={cs.CC quant-ph} }
gutoski2005short
arxiv-673484
cs/0511018
From General Systems to Soft Systems to Soft Computing: Applications for Large and Complex Real World Systems
<|reference_start|>From General Systems to Soft Systems to Soft Computing: Applications for Large and Complex Real World Systems: This is article is taken out.<|reference_end|>
arxiv
@article{prashant2005from, title={From General Systems to Soft Systems to Soft Computing: Applications for Large and Complex Real World Systems}, author={Prashant}, journal={arXiv preprint arXiv:cs/0511018}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511018}, primaryClass={cs.SE} }
prashant2005from
arxiv-673485
cs/0511019
A Counterexample to Cover's 2P Conjecture on Gaussian Feedback Capacity
<|reference_start|>A Counterexample to Cover's 2P Conjecture on Gaussian Feedback Capacity: We provide a counterexample to Cover's conjecture that the feedback capacity $C_\textrm{FB}$ of an additive Gaussian noise channel under power constraint $P$ be no greater than the nonfeedback capacity $C$ of the same channel under power constraint $2P$, i.e., $C_\textrm{FB}(P) \le C(2P)$.<|reference_end|>
arxiv
@article{kim2005a, title={A Counterexample to Cover's 2P Conjecture on Gaussian Feedback Capacity}, author={Young-Han Kim}, journal={arXiv preprint arXiv:cs/0511019}, year={2005}, doi={10.1109/TIT.2006.878167}, archivePrefix={arXiv}, eprint={cs/0511019}, primaryClass={cs.IT math.IT} }
kim2005a
arxiv-673486
cs/0511020
Pbit and other list sorting algorithms
<|reference_start|>Pbit and other list sorting algorithms: Pbit, besides its simplicity, is definitely the fastest list sorting algorithm. It considerably surpasses all already known methods. Among many advantages, it is stable, linear and be made to run in place. I will compare Pbit with algorithm described by Donald E. Knuth in the third volume of ''The Art of Computer Programming'' and other (QuickerSort, MergeSort) list sorting algorithms.<|reference_end|>
arxiv
@article{płaneta2005pbit, title={Pbit and other list sorting algorithms}, author={David S. P{l}aneta}, journal={Cornell University Computing and Information Science Technical Reports, 2006}, year={2005}, number={TR2006-2013}, archivePrefix={arXiv}, eprint={cs/0511020}, primaryClass={cs.DS} }
płaneta2005pbit
arxiv-673487
cs/0511021
Games of fixed rank: A hierarchy of bimatrix games
<|reference_start|>Games of fixed rank: A hierarchy of bimatrix games: We propose a new hierarchical approach to understand the complexity of the open problem of computing a Nash equilibrium in a bimatrix game. Specifically, we investigate a hierarchy of bimatrix games $(A,B)$ which results from restricting the rank of the matrix $A+B$ to be of fixed rank at most $k$. For every fixed $k$, this class strictly generalizes the class of zero-sum games, but is a very special case of general bimatrix games. We show that even for $k=1$ the set of Nash equilibria of these games can consist of an arbitrarily large number of connected components. While the question of exact polynomial time algorithms to find a Nash equilibrium remains open for games of fixed rank, we can provide polynomial time algorithms for finding an $\epsilon$-approximation.<|reference_end|>
arxiv
@article{kannan2005games, title={Games of fixed rank: A hierarchy of bimatrix games}, author={Ravi Kannan and Thorsten Theobald}, journal={arXiv preprint arXiv:cs/0511021}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511021}, primaryClass={cs.GT math.CO} }
kannan2005games
arxiv-673488
cs/0511022
Does a Plane Imitate a Bird? Does Computer Vision Have to Follow Biological Paradigms?
<|reference_start|>Does a Plane Imitate a Bird? Does Computer Vision Have to Follow Biological Paradigms?: We posit a new paradigm for image information processing. For the last 25 years, this task was usually approached in the frame of Treisman's two-stage paradigm [1]. The latter supposes an unsupervised, bottom-up directed process of preliminary information pieces gathering at the lower processing stages and a supervised, top-down directed process of information pieces binding and grouping at the higher stages. It is acknowledged that these sub-processes interact and intervene between them in a tricky and a complicated manner. Notwithstanding the prevalence of this paradigm in biological and computer vision, we nevertheless propose to replace it with a new one, which we would like to designate as a two-part paradigm. In it, information contained in an image is initially extracted in an independent top-down manner by one part of the system, and then it is examined and interpreted by another, separate system part. We argue that the new paradigm seems to be more plausible than its forerunner. We provide evidence from human attention vision studies and insights of Kolmogorov's complexity theory to support our arguments. We also provide some reasons in favor of separate image interpretation issues.<|reference_end|>
arxiv
@article{diamant2005does, title={Does a Plane Imitate a Bird? Does Computer Vision Have to Follow Biological Paradigms?}, author={Emanuel Diamant}, journal={LNCS, vol. 3704, pp. 108-115, 2005, Springer Verlag Berlin Heidelberg 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511022}, primaryClass={cs.NE} }
diamant2005does
arxiv-673489
cs/0511023
Verifying nondeterministic probabilistic channel systems against $\omega$-regular linear-time properties
<|reference_start|>Verifying nondeterministic probabilistic channel systems against $\omega$-regular linear-time properties: Lossy channel systems (LCSs) are systems of finite state automata that communicate via unreliable unbounded fifo channels. In order to circumvent the undecidability of model checking for nondeterministic LCSs, probabilistic models have been introduced, where it can be decided whether a linear-time property holds almost surely. However, such fully probabilistic systems are not a faithful model of nondeterministic protocols. We study a hybrid model for LCSs where losses of messages are seen as faults occurring with some given probability, and where the internal behavior of the system remains nondeterministic. Thus the semantics is in terms of infinite-state Markov decision processes. The purpose of this article is to discuss the decidability of linear-time properties formalized by formulas of linear temporal logic (LTL). Our focus is on the qualitative setting where one asks, e.g., whether a LTL-formula holds almost surely or with zero probability (in case the formula describes the bad behaviors). Surprisingly, it turns out that -- in contrast to finite-state Markov decision processes -- the satisfaction relation for LTL formulas depends on the chosen type of schedulers that resolve the nondeterminism. While all variants of the qualitative LTL model checking problem for the full class of history-dependent schedulers are undecidable, the same questions for finite-memory scheduler can be solved algorithmically. However, the restriction to reachability properties and special kinds of recurrent reachability properties yields decidable verification problems for the full class of schedulers, which -- for this restricted class of properties -- are as powerful as finite-memory schedulers, or even a subclass of them.<|reference_end|>
arxiv
@article{baier2005verifying, title={Verifying nondeterministic probabilistic channel systems against $\omega$-regular linear-time properties}, author={Christel Baier, Nathalie Bertrand and Philippe Schnoebelen}, journal={ACM Trans. Computational Logic 9(1), 2007}, year={2005}, doi={10.1145/1297658.1297663}, archivePrefix={arXiv}, eprint={cs/0511023}, primaryClass={cs.LO} }
baier2005verifying
arxiv-673490
cs/0511024
Heat kernel expansion for a family of stochastic volatility models : delta-geometry
<|reference_start|>Heat kernel expansion for a family of stochastic volatility models : delta-geometry: In this paper, we study a family of stochastic volatility processes; this family features a mean reversion term for the volatility and a double CEV-like exponent that generalizes SABR and Heston's models. We derive approximated closed form formulas for the digital prices, the local and implied volatilities. Our formulas are efficient for small maturities. Our method is based on differential geometry, especially small time diffusions on riemanian spaces. This geometrical point of view can be extended to other processes, and is very accurate to produce variate smiles for small maturities and small moneyness.<|reference_end|>
arxiv
@article{paul2005heat, title={Heat kernel expansion for a family of stochastic volatility models : delta-geometry}, author={Bourgade Paul, Croissant Olivier}, journal={arXiv preprint arXiv:cs/0511024}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511024}, primaryClass={cs.CE} }
paul2005heat
arxiv-673491
cs/0511025
Logic Column 14: Nominal Logic and Abstract Syntax
<|reference_start|>Logic Column 14: Nominal Logic and Abstract Syntax: Formalizing syntactic proofs of properties of logics, programming languages, security protocols, and other formal systems is a significant challenge, in large part because of the obligation to handle name-binding correctly. We present an approach called nominal abstract syntax that has attracted considerable interest since its introduction approximately six years ago. After an overview of other approaches, we describe nominal abstract syntax and nominal logic, a logic for reasoning about nominal abstract syntax. We also discuss applications of nominal techniques to programming, automated reasoning, and identify some future directions.<|reference_end|>
arxiv
@article{cheney2005logic, title={Logic Column 14: Nominal Logic and Abstract Syntax}, author={James Cheney}, journal={SIGACT News 36 (4), pp. 47-69, 2005}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511025}, primaryClass={cs.LO} }
cheney2005logic
arxiv-673492
cs/0511026
A Decision Theoretic Framework for Real-Time Communication
<|reference_start|>A Decision Theoretic Framework for Real-Time Communication: We consider a communication system in which the outputs of a Markov source are encoded and decoded in \emph{real-time} by a finite memory receiver, and the distortion measure does not tolerate delays. The objective is to choose designs, i.e. real-time encoding, decoding and memory update strategies that minimize a total expected distortion measure. This is a dynamic team problem with non-classical information structure [Witsenhausen:1971]. We use the structural results of [Teneketzis:2004] to develop a sequential decomposition for the finite and infinite horizon problems. Thus, we obtain a systematic methodology for the determination of jointly optimal encoding decoding and memory update strategies for real-time point-to-point communication systems.<|reference_end|>
arxiv
@article{mahajan2005a, title={A Decision Theoretic Framework for Real-Time Communication}, author={Aditya Mahajan, Demosthenis Teneketzis}, journal={arXiv preprint arXiv:cs/0511026}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511026}, primaryClass={cs.IT math.IT} }
mahajan2005a
arxiv-673493
cs/0511027
Discrete Network Dynamics Part 1: Operator Theory
<|reference_start|>Discrete Network Dynamics Part 1: Operator Theory: An operator algebra implementation of Markov chain Monte Carlo algorithms for simulating Markov random fields is proposed. It allows the dynamics of networks whose nodes have discrete state spaces to be specified by the action of an update operator that is composed of creation and annihilation operators. This formulation of discrete network dynamics has properties that are similar to those of a quantum field theory of bosons, which allows reuse of many conceptual and theoretical structures from QFT. The equilibrium behaviour of one of these generalised MRFs and of the adaptive cluster expansion network (ACEnet) are shown to be equivalent, which provides a way of unifying these two theories.<|reference_end|>
arxiv
@article{luttrell2005discrete, title={Discrete Network Dynamics. Part 1: Operator Theory}, author={Stephen Luttrell}, journal={arXiv preprint arXiv:cs/0511027}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511027}, primaryClass={cs.NE} }
luttrell2005discrete
arxiv-673494
cs/0511028
MIMO Diversity in the Presence of Double Scattering
<|reference_start|>MIMO Diversity in the Presence of Double Scattering: The potential benefits of multiple-antenna systems may be limited by two types of channel degradations rank deficiency and spatial fading correlation of the channel. In this paper, we assess the effects of these degradations on the diversity performance of multiple-input multiple-output (MIMO) systems, with an emphasis on orthogonal space-time block codes, in terms of the symbol error probability, the effective fading figure (EFF), and the capacity at low signal-to-noise ratio (SNR). In particular, we consider a general family of MIMO channels known as double-scattering channels, which encompasses a variety of propagation environments from independent and identically distributed Rayleigh to degenerate keyhole or pinhole cases by embracing both rank-deficient and spatial correlation effects. It is shown that a MIMO system with $n_T$ transmit and $n_R$ receive antennas achieves the diversity of order $\frac{\n_T n_S n_R}{\max(n_T,n_S,n_R)}$ in a double-scattering channel with $n_S$ effective scatterers. We also quantify the combined effect of the spatial correlation and the lack of scattering richness on the EFF and the low-SNR capacity in terms of the correlation figures of transmit, receive, and scatterer correlation matrices. We further show the monotonicity properties of these performance measures with respect to the strength of spatial correlation, characterized by the eigenvalue majorization relations of the correlation matrices.<|reference_end|>
arxiv
@article{shin2005mimo, title={MIMO Diversity in the Presence of Double Scattering}, author={Hyundong Shin and Moe Z. Win}, journal={arXiv preprint arXiv:cs/0511028}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511028}, primaryClass={cs.IT math.IT} }
shin2005mimo
arxiv-673495
cs/0511029
Non-coherent Rayleigh fading MIMO channels: Capacity Supremum
<|reference_start|>Non-coherent Rayleigh fading MIMO channels: Capacity Supremum: This paper investigates the limits of information transfer over a fast Rayleigh fading MIMO channel, where neither the transmitter nor the receiver has the knowledge of the channel state information (CSI) except the fading statistics. We develop a scalar channel model due to absence of the phase information in non-coherent Rayleigh fading and derive a capacity supremum with the number of receive antennas at any signal to noise ratio (SNR) using Lagrange optimisation. Also, we conceptualise the discrete nature of the optimal input distribution by posing the optimisation on the channel mutual information for $N$ discrete inputs. Furthermore, we derive an expression for the asymptotic capacity when the input power is large, and compare with the existing capacity results when the receiver is equipped with a large number of antennas.<|reference_end|>
arxiv
@article{perera2005non-coherent, title={Non-coherent Rayleigh fading MIMO channels: Capacity Supremum}, author={Rasika R Perera, Tony S Pollock, Thushara D Abhayapala}, journal={arXiv preprint arXiv:cs/0511029}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511029}, primaryClass={cs.IT math.IT} }
perera2005non-coherent
arxiv-673496
cs/0511030
The Linear Arrangement Problem Parameterized Above Guaranteed Value
<|reference_start|>The Linear Arrangement Problem Parameterized Above Guaranteed Value: A linear arrangement (LA) is an assignment of distinct integers to the vertices of a graph. The cost of an LA is the sum of lengths of the edges of the graph, where the length of an edge is defined as the absolute value of the difference of the integers assigned to its ends. For many application one hopes to find an LA with small cost. However, it is a classical NP-complete problem to decide whether a given graph $G$ admits an LA of cost bounded by a given integer. Since every edge of $G$ contributes at least one to the cost of any LA, the problem becomes trivially fixed-parameter tractable (FPT) if parameterized by the upper bound of the cost. Fernau asked whether the problem remains FPT if parameterized by the upper bound of the cost minus the number of edges of the given graph; thus whether the problem is FPT ``parameterized above guaranteed value.'' We answer this question positively by deriving an algorithm which decides in time $O(m+n+5.88^k)$ whether a given graph with $m$ edges and $n$ vertices admits an LA of cost at most $m+k$ (the algorithm computes such an LA if it exists). Our algorithm is based on a procedure which generates a problem kernel of linear size in linear time for a connected graph $G$. We also prove that more general parameterized LA problems stated by Serna and Thilikos are not FPT, unless P=NP.<|reference_end|>
arxiv
@article{gutin2005the, title={The Linear Arrangement Problem Parameterized Above Guaranteed Value}, author={G. Gutin, A. Rafiey, S. Szeider, A. Yeo}, journal={arXiv preprint arXiv:cs/0511030}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511030}, primaryClass={cs.DS cs.CC} }
gutin2005the
arxiv-673497
cs/0511031
Internet Protocol Black Holes: A E-Security Threat
<|reference_start|>Internet Protocol Black Holes: A E-Security Threat: The paper is taken out.<|reference_end|>
arxiv
@article{prashant2005internet, title={Internet Protocol Black Holes: A E-Security Threat}, author={Prashant}, journal={arXiv preprint arXiv:cs/0511031}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511031}, primaryClass={cs.NI} }
prashant2005internet
arxiv-673498
cs/0511032
Spatiotemporal sensistivity and visual attention for efficient rendering of dynamic environments
<|reference_start|>Spatiotemporal sensistivity and visual attention for efficient rendering of dynamic environments: We present a method to accelerate global illumination computation in dynamic environments by taking advantage of limitations of the human visual system. A model of visual attention is used to locate regions of interest in a scene and to modulate spatiotemporal sensitivity. The method is applied in the form of a spatiotemporal error tolerance map. Perceptual acceleration combined with good sampling protocols provide a global illumination solution feasible for use in animation. Results indicate an order of magnitude improvement in computational speed. The method is adaptable and can also be used in image-based rendering, geometry level of detail selection, realistic image synthesis, video telephony and video compression.<|reference_end|>
arxiv
@article{yee2005spatiotemporal, title={Spatiotemporal sensistivity and visual attention for efficient rendering of dynamic environments}, author={Yang Li Hector Yee (Cornell University)}, journal={ACM Transactions on Graphics, 20(1), January 2001}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511032}, primaryClass={cs.GR cs.CV} }
yee2005spatiotemporal
arxiv-673499
cs/0511033
Fast (Multi-)Evaluation of Linearly Recurrent Sequences: Improvements and Applications
<|reference_start|>Fast (Multi-)Evaluation of Linearly Recurrent Sequences: Improvements and Applications: For a linearly recurrent vector sequence P[n+1] = A(n) * P[n], consider the problem of calculating either the n-th term P[n] or L<=n arbitrary terms P[n_1],...,P[n_L], both for the case of constant coefficients A(n)=A and for a matrix A(n) with entries polynomial in n. We improve and extend known algorithms for this problem and present new applications for it. Specifically it turns out that for instance * any family (p_n) of classical orthogonal polynomials admits evaluation at given x within O(n^{1/2} log n) operations INDEPENDENT of the family (p_n) under consideration. * For any L indices n_1,...,n_L <= n, the values p_{n_i}(x) can be calculated simultaneously using O(n^{1/2} log n + L log(n/L)) arithmetic operations; again this running time bound holds uniformly. * Every hypergeometric (or, more generally, holonomic) function admits approximate evaluation up to absolute error e>0 within O((log(1/e)^{1/2} loglog(1/e)) -- as opposed to O(log(1/e)) -- arithmetic steps. * Given m and a polynomial p of degree d over a field of characteristic zero, the coefficient of p^m to term X^n can be computed within O(d^2 M(n^{1/2})) steps where M(n) denotes the cost of multiplying two degree-n polynomials. * The same time bound holds for the joint calculation of any L<=n^{1/2} desired coefficients of p^m to terms X^{n_i}, n_1,...,n_L <= n.<|reference_end|>
arxiv
@article{ziegler2005fast, title={Fast (Multi-)Evaluation of Linearly Recurrent Sequences: Improvements and Applications}, author={Martin Ziegler}, journal={arXiv preprint arXiv:cs/0511033}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511033}, primaryClass={cs.SC} }
ziegler2005fast
arxiv-673500
cs/0511034
Generalized Hermitian Codes over GF(2^r)
<|reference_start|>Generalized Hermitian Codes over GF(2^r): In this paper we studied generalization of Hermitian function field proposed by A.Garcia and H.Stichtenoth. We calculated a Weierstrass semigroup of the point at infinity for the case q=2, r>=3. It turned out that unlike Hermitian case, we have already three generators for the semigroup. We then applied this result to codes, constructed on generalized Hermitian function fields. Further, we applied results of C.Kirfel and R.Pellikaan to estimating a Feng-Rao designed distance for GH-codes, which improved on Goppa designed distance. Next, we studied the question of codes dual to GH-codes. We identified that the duals are also GH-codes and gave an explicit formula. We concluded with some computational results. In particular, a new record-giving [32,16,>=12]-code over GF(8) was presented.<|reference_end|>
arxiv
@article{bulygin2005generalized, title={Generalized Hermitian Codes over GF(2^r)}, author={Stanislav Bulygin}, journal={arXiv preprint arXiv:cs/0511034}, year={2005}, archivePrefix={arXiv}, eprint={cs/0511034}, primaryClass={cs.DM} }
bulygin2005generalized