corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-5201 | 0810.3345 | Enhancements to A Lightweight RFID Authentication Protocol | <|reference_start|>Enhancements to A Lightweight RFID Authentication Protocol: Vajda and Buttyan (VB) proposed a set of five lightweight RFID authentication protocols. Defend, Fu, and Juels (DFJ) did cryptanalysis on two of them - XOR and SUBSET. To the XOR protocol, DFJ proposed repeated keys attack and nibble attack. In this paper, we identify the vulnerability existed in the original VB's successive session key permutation algorithm. We propose three enhancements to prevent DFJ's attacks and make XOR protocol stronger without introducing extra resource cost.<|reference_end|> | arxiv | @article{zhang2008enhancements,
title={Enhancements to A Lightweight RFID Authentication Protocol},
author={Xiaowen Zhang, Zhanyang Zhang, Xinzhou Wei},
journal={arXiv preprint arXiv:0810.3345},
year={2008},
archivePrefix={arXiv},
eprint={0810.3345},
primaryClass={cs.CR}
} | zhang2008enhancements |
arxiv-5202 | 0810.3356 | The Fundamental Problem with the Building Block Hypothesis | <|reference_start|>The Fundamental Problem with the Building Block Hypothesis: Skepticism of the building block hypothesis (BBH) has previously been expressed on account of the weak theoretical foundations of this hypothesis and the anomalies in the empirical record of the simple genetic algorithm. In this paper we hone in on a more fundamental cause for skepticism--the extraordinary strength of some of the assumptions that undergird the BBH. Specifically, we focus on assumptions made about the distribution of fitness over the genome set, and argue that these assumptions are unacceptably strong. As most of these assumptions have been embraced by the designers of so-called "competent" genetic algorithms, our critique is relevant to an appraisal of such algorithms as well.<|reference_end|> | arxiv | @article{burjorjee2008the,
title={The Fundamental Problem with the Building Block Hypothesis},
author={Keki Burjorjee},
journal={arXiv preprint arXiv:0810.3356},
year={2008},
archivePrefix={arXiv},
eprint={0810.3356},
primaryClass={cs.NE}
} | burjorjee2008the |
arxiv-5203 | 0810.3357 | Two Remarkable Computational Competencies of the Simple Genetic Algorithm | <|reference_start|>Two Remarkable Computational Competencies of the Simple Genetic Algorithm: Since the inception of genetic algorithmics the identification of computational efficiencies of the simple genetic algorithm (SGA) has been an important goal. In this paper we distinguish between a computational competency of the SGA--an efficient, but narrow computational ability--and a computational proficiency of the SGA--a computational ability that is both efficient and broad. Till date, attempts to deduce a computational proficiency of the SGA have been unsuccessful. It may, however, be possible to inductively infer a computational proficiency of the SGA from a set of related computational competencies that have been deduced. With this in mind we deduce two computational competencies of the SGA. These competencies, when considered together, point toward a remarkable computational proficiency of the SGA. This proficiency is pertinent to a general problem that is closely related to a well-known statistical problem at the cutting edge of computational genetics.<|reference_end|> | arxiv | @article{burjorjee2008two,
title={Two Remarkable Computational Competencies of the Simple Genetic
Algorithm},
author={Keki M. Burjorjee},
journal={arXiv preprint arXiv:0810.3357},
year={2008},
archivePrefix={arXiv},
eprint={0810.3357},
primaryClass={cs.NE}
} | burjorjee2008two |
arxiv-5204 | 0810.3416 | Text as Statistical Mechanics Object | <|reference_start|>Text as Statistical Mechanics Object: In this article we present a model of human written text based on statistical mechanics approach by deriving the potential energy for different parts of the text using large text corpus. We have checked the results numerically and found that the specific heat parameter effectively separates the closed class words from the specific terms used in the text.<|reference_end|> | arxiv | @article{koroutchev2008text,
title={Text as Statistical Mechanics Object},
author={K.Koroutchev and E.Korutcheva},
journal={arXiv preprint arXiv:0810.3416},
year={2008},
archivePrefix={arXiv},
eprint={0810.3416},
primaryClass={cs.CL physics.soc-ph}
} | koroutchev2008text |
arxiv-5205 | 0810.3418 | Detecting the Most Unusual Part of a Digital Image | <|reference_start|>Detecting the Most Unusual Part of a Digital Image: The purpose of this paper is to introduce an algorithm that can detect the most unusual part of a digital image. The most unusual part of a given shape is defined as a part of the image that has the maximal distance to all non intersecting shapes with the same form. The method can be used to scan image databases with no clear model of the interesting part or large image databases, as for example medical databases.<|reference_end|> | arxiv | @article{koroutchev2008detecting,
title={Detecting the Most Unusual Part of a Digital Image},
author={K.Koroutchev and E. Korutcheva},
journal={arXiv preprint arXiv:0810.3418},
year={2008},
archivePrefix={arXiv},
eprint={0810.3418},
primaryClass={cs.CV cs.GR}
} | koroutchev2008detecting |
arxiv-5206 | 0810.3422 | Coding Theorems for Repeat Multiple Accumulate Codes | <|reference_start|>Coding Theorems for Repeat Multiple Accumulate Codes: In this paper the ensemble of codes formed by a serial concatenation of a repetition code with multiple accumulators connected through random interleavers is considered. Based on finite length weight enumerators for these codes, asymptotic expressions for the minimum distance and an arbitrary number of accumulators larger than one are derived using the uniform interleaver approach. In accordance with earlier results in the literature, it is first shown that the minimum distance of repeat-accumulate codes can grow, at best, sublinearly with block length. Then, for repeat-accumulate-accumulate codes and rates of 1/3 or less, it is proved that these codes exhibit asymptotically linear distance growth with block length, where the gap to the Gilbert-Varshamov bound can be made vanishingly small by increasing the number of accumulators beyond two. In order to address larger rates, random puncturing of a low-rate mother code is introduced. It is shown that in this case the resulting ensemble of repeat-accumulate-accumulate codes asymptotically achieves linear distance growth close to the Gilbert-Varshamov bound. This holds even for very high rate codes.<|reference_end|> | arxiv | @article{kliewer2008coding,
title={Coding Theorems for Repeat Multiple Accumulate Codes},
author={Joerg Kliewer, Kamil S. Zigangirov, Christian Koller, Daniel J.
Costello Jr},
journal={arXiv preprint arXiv:0810.3422},
year={2008},
archivePrefix={arXiv},
eprint={0810.3422},
primaryClass={cs.IT math.IT}
} | kliewer2008coding |
arxiv-5207 | 0810.3434 | Numerical method for Darcy flow derived using Discrete Exterior Calculus | <|reference_start|>Numerical method for Darcy flow derived using Discrete Exterior Calculus: We derive a numerical method for Darcy flow, hence also for Poisson's equation in mixed (first order) form, based on discrete exterior calculus (DEC). Exterior calculus is a generalization of vector calculus to smooth manifolds and DEC is one of its discretizations on simplicial complexes such as triangle and tetrahedral meshes. DEC is a coordinate invariant discretization, in that it does not depend on the embedding of the simplices or the whole mesh. We start by rewriting the governing equations of Darcy flow using the language of exterior calculus. This yields a formulation in terms of flux differential form and pressure. The numerical method is then derived by using the framework provided by DEC for discretizing differential forms and operators that act on forms. We also develop a discretization for spatially dependent Hodge star that varies with the permeability of the medium. This also allows us to address discontinuous permeability. The matrix representation for our discrete non-homogeneous Hodge star is diagonal, with positive diagonal entries. The resulting linear system of equations for flux and pressure are saddle type, with a diagonal matrix as the top left block. The performance of the proposed numerical method is illustrated on many standard test problems. These include patch tests in two and three dimensions, comparison with analytically known solution in two dimensions, layered medium with alternating permeability values, and a test with a change in permeability along the flow direction. We also show numerical evidence of convergence of the flux and the pressure. A convergence experiment is also included for Darcy flow on a surface. A short introduction to the relevant parts of smooth and discrete exterior calculus is included in this paper. We also include a discussion of the boundary condition in terms of exterior calculus.<|reference_end|> | arxiv | @article{hirani2008numerical,
title={Numerical method for Darcy flow derived using Discrete Exterior Calculus},
author={Anil N. Hirani, Kalyana B. Nakshatrala, Jehanzeb H. Chaudhry},
journal={arXiv preprint arXiv:0810.3434},
year={2008},
number={UIUCDCS-R-2008-2937},
archivePrefix={arXiv},
eprint={0810.3434},
primaryClass={math.NA cs.NA math.DG}
} | hirani2008numerical |
arxiv-5208 | 0810.3438 | Efficient Algorithms and Routing Protocols for Handling Transient Single Node Failures | <|reference_start|>Efficient Algorithms and Routing Protocols for Handling Transient Single Node Failures: Single node failures represent more than 85% of all node failures in the today's large communication networks such as the Internet. Also, these node failures are usually transient. Consequently, having the routing paths globally recomputed does not pay off since the failed nodes recover fairly quickly, and the recomputed routing paths need to be discarded. Instead, we develop algorithms and protocols for dealing with such transient single node failures by suppressing the failure (instead of advertising it across the network), and routing messages to the destination via alternate paths that do not use the failed node. We compare our solution to that of Ref. [11] wherein the authors have presented a "Failure Insensitive Routing" protocol as a proactive recovery scheme for handling transient node failures. We show that our algorithms are faster by an order of magnitude while our paths are equally good. We show via simulation results that our paths are usually within 15% of the optimal for randomly generated graph with 100-1000 nodes.<|reference_end|> | arxiv | @article{bhosle2008efficient,
title={Efficient Algorithms and Routing Protocols for Handling Transient Single
Node Failures},
author={Amit M Bhosle and Teofilo F Gonzalez},
journal={arXiv preprint arXiv:0810.3438},
year={2008},
archivePrefix={arXiv},
eprint={0810.3438},
primaryClass={cs.DS}
} | bhosle2008efficient |
arxiv-5209 | 0810.3442 | Language structure in the n-object naming game | <|reference_start|>Language structure in the n-object naming game: We examine a naming game with two agents trying to establish a common vocabulary for n objects. Such efforts lead to the emergence of language that allows for an efficient communication and exhibits some degree of homonymy and synonymy. Although homonymy reduces the communication efficiency, it seems to be a dynamical trap that persists for a long, and perhaps indefinite, time. On the other hand, synonymy does not reduce the efficiency of communication, but appears to be only a transient feature of the language. Thus, in our model the role of synonymy decreases and in the long-time limit it becomes negligible. A similar rareness of synonymy is observed in present natural languages. The role of noise, that distorts the communicated words, is also examined. Although, in general, the noise reduces the communication efficiency, it also regroups the words so that they are more evenly distributed within the available "verbal" space.<|reference_end|> | arxiv | @article{lipowski2008language,
title={Language structure in the n-object naming game},
author={Adam Lipowski and Dorota Lipowska},
journal={Phys. Rev. E 80, 056107 (2009)},
year={2008},
doi={10.1103/PhysRevE.80.056107},
archivePrefix={arXiv},
eprint={0810.3442},
primaryClass={cs.CL cs.MA physics.soc-ph}
} | lipowski2008language |
arxiv-5210 | 0810.3451 | The many faces of optimism - Extended version | <|reference_start|>The many faces of optimism - Extended version: The exploration-exploitation dilemma has been an intriguing and unsolved problem within the framework of reinforcement learning. "Optimism in the face of uncertainty" and model building play central roles in advanced exploration methods. Here, we integrate several concepts and obtain a fast and simple algorithm. We show that the proposed algorithm finds a near-optimal policy in polynomial time, and give experimental evidence that it is robust and efficient compared to its ascendants.<|reference_end|> | arxiv | @article{szita2008the,
title={The many faces of optimism - Extended version},
author={Istv'an Szita, Andr'as LH{o}rincz},
journal={arXiv preprint arXiv:0810.3451},
year={2008},
archivePrefix={arXiv},
eprint={0810.3451},
primaryClass={cs.AI cs.CC cs.LG}
} | szita2008the |
arxiv-5211 | 0810.3453 | Grid Computing in the Collider Detector at Fermilab (CDF) scientific experiment | <|reference_start|>Grid Computing in the Collider Detector at Fermilab (CDF) scientific experiment: The computing model for the Collider Detector at Fermilab (CDF) scientific experiment has evolved since the beginning of the experiment. Initially CDF computing was comprised of dedicated resources located in computer farms around the world. With the wide spread acceptance of grid computing in High Energy Physics, CDF computing has migrated to using grid computing extensively. CDF uses computing grids around the world. Each computing grid has required different solutions. The use of portals as interfaces to the collaboration computing resources has proven to be an extremely useful technique allowing the CDF physicists transparently migrate from using dedicated computer farm to using computing located in grid farms often away from Fermilab. Grid computing at CDF continues to evolve as the grid standards and practices change.<|reference_end|> | arxiv | @article{benjamin2008grid,
title={Grid Computing in the Collider Detector at Fermilab (CDF) scientific
experiment},
author={Douglas P. Benjamin},
journal={arXiv preprint arXiv:0810.3453},
year={2008},
archivePrefix={arXiv},
eprint={0810.3453},
primaryClass={cs.DC hep-ex physics.data-an}
} | benjamin2008grid |
arxiv-5212 | 0810.3468 | A Call-Graph Profiler for GNU Octave | <|reference_start|>A Call-Graph Profiler for GNU Octave: We report the design and implementation of a call-graph profiler for GNU Octave, a numerical computing platform. GNU Octave simplifies matrix computation for use in modeling or simulation. Our work provides a call-graph profiler, which is an improvement on the flat profiler. We elaborate design constraints of building a profiler for numerical computation, and benchmark the profiler by comparing it to the rudimentary timer start-stop (tic-toc) measurements, for a similar set of programs. The profiler code provides clean interfaces to internals of GNU Octave, for other (newer) profiling tools on GNU Octave.<|reference_end|> | arxiv | @article{annamalai2008a,
title={A Call-Graph Profiler for GNU Octave},
author={Muthiah Annamalai, Leela Velusamy},
journal={arXiv preprint arXiv:0810.3468},
year={2008},
archivePrefix={arXiv},
eprint={0810.3468},
primaryClass={cs.PF cs.PL cs.SE}
} | annamalai2008a |
arxiv-5213 | 0810.3474 | Social Learning Methods in Board Games | <|reference_start|>Social Learning Methods in Board Games: This paper discusses the effects of social learning in training of game playing agents. The training of agents in a social context instead of a self-play environment is investigated. Agents that use the reinforcement learning algorithms are trained in social settings. This mimics the way in which players of board games such as scrabble and chess mentor each other in their clubs. A Round Robin tournament and a modified Swiss tournament setting are used for the training. The agents trained using social settings are compared to self play agents and results indicate that more robust agents emerge from the social training setting. Higher state space games can benefit from such settings as diverse set of agents will have multiple strategies that increase the chances of obtaining more experienced players at the end of training. The Social Learning trained agents exhibit better playing experience than self play agents. The modified Swiss playing style spawns a larger number of better playing agents as the population size increases.<|reference_end|> | arxiv | @article{marivate2008social,
title={Social Learning Methods in Board Games},
author={Vukosi N. Marivate and Tshilidzi Marwala},
journal={arXiv preprint arXiv:0810.3474},
year={2008},
archivePrefix={arXiv},
eprint={0810.3474},
primaryClass={cs.AI cs.MA}
} | marivate2008social |
arxiv-5214 | 0810.3484 | A Study of NK Landscapes' Basins and Local Optima Networks | <|reference_start|>A Study of NK Landscapes' Basins and Local Optima Networks: We propose a network characterization of combinatorial fitness landscapes by adapting the notion of inherent networks proposed for energy surfaces (Doye, 2002). We use the well-known family of $NK$ landscapes as an example. In our case the inherent network is the graph where the vertices are all the local maxima and edges mean basin adjacency between two maxima. We exhaustively extract such networks on representative small NK landscape instances, and show that they are 'small-worlds'. However, the maxima graphs are not random, since their clustering coefficients are much larger than those of corresponding random graphs. Furthermore, the degree distributions are close to exponential instead of Poissonian. We also describe the nature of the basins of attraction and their relationship with the local maxima network.<|reference_end|> | arxiv | @article{ochoa2008a,
title={A Study of NK Landscapes' Basins and Local Optima Networks},
author={Gabriela Ochoa, Marco Tomassini (ISI), S'ebastien Verel (I3S),
Christian Darabos (ISI)},
journal={Genetic And Evolutionary Computation Conference, Atlanta :
\'Etats-Unis d'Am\'erique (2008)},
year={2008},
doi={10.1145/1389095.1389204},
archivePrefix={arXiv},
eprint={0810.3484},
primaryClass={cs.NE}
} | ochoa2008a |
arxiv-5215 | 0810.3492 | The Connectivity of NK Landscapes' Basins: A Network Analysis | <|reference_start|>The Connectivity of NK Landscapes' Basins: A Network Analysis: We propose a network characterization of combinatorial fitness landscapes by adapting the notion of inherent networks proposed for energy surfaces. We use the well-known family of NK landscapes as an example. In our case the inherent network is the graph where the vertices represent the local maxima in the landscape, and the edges account for the transition probabilities between their corresponding basins of attraction. We exhaustively extracted such networks on representative small NK landscape instances, and performed a statistical characterization of their properties. We found that most of these network properties can be related to the search difficulty on the underlying NK landscapes with varying values of K.<|reference_end|> | arxiv | @article{verel2008the,
title={The Connectivity of NK Landscapes' Basins: A Network Analysis},
author={S'ebastien Verel (I3S), Gabriela Ochoa, Marco Tomassini (ISI)},
journal={arXiv preprint arXiv:0810.3492},
year={2008},
archivePrefix={arXiv},
eprint={0810.3492},
primaryClass={cs.NE}
} | verel2008the |
arxiv-5216 | 0810.3525 | The use of entropy to measure structural diversity | <|reference_start|>The use of entropy to measure structural diversity: In this paper entropy based methods are compared and used to measure structural diversity of an ensemble of 21 classifiers. This measure is mostly applied in ecology, whereby species counts are used as a measure of diversity. The measures used were Shannon entropy, Simpsons and the Berger Parker diversity indexes. As the diversity indexes increased so did the accuracy of the ensemble. An ensemble dominated by classifiers with the same structure produced poor accuracy. Uncertainty rule from information theory was also used to further define diversity. Genetic algorithms were used to find the optimal ensemble by using the diversity indices as the cost function. The method of voting was used to aggregate the decisions.<|reference_end|> | arxiv | @article{masisi2008the,
title={The use of entropy to measure structural diversity},
author={L. Masisi, V. Nelwamondo and T. Marwala},
journal={arXiv preprint arXiv:0810.3525},
year={2008},
archivePrefix={arXiv},
eprint={0810.3525},
primaryClass={cs.LG cs.AI q-bio.QM}
} | masisi2008the |
arxiv-5217 | 0810.3564 | The Poisson Channel at Low Input Powers | <|reference_start|>The Poisson Channel at Low Input Powers: The asymptotic capacity at low input powers of an average-power limited or an average- and peak-power limited discrete-time Poisson channel is considered. For a Poisson channel whose dark current is zero or decays to zero linearly with its average input power $E$, capacity scales like $E\log\frac{1}{E}$ for small $E$. For a Poisson channel whose dark current is a nonzero constant, capacity scales, to within a constant, like $E\log\log\frac{1}{E}$ for small $E$.<|reference_end|> | arxiv | @article{lapidoth2008the,
title={The Poisson Channel at Low Input Powers},
author={Amos Lapidoth, Jeffrey H. Shapiro, Vinodh Venkatesan, Ligong Wang},
journal={arXiv preprint arXiv:0810.3564},
year={2008},
archivePrefix={arXiv},
eprint={0810.3564},
primaryClass={cs.IT math.IT}
} | lapidoth2008the |
arxiv-5218 | 0810.3579 | Hierarchical Bag of Paths for Kernel Based Shape Classification | <|reference_start|>Hierarchical Bag of Paths for Kernel Based Shape Classification: Graph kernels methods are based on an implicit embedding of graphs within a vector space of large dimension. This implicit embedding allows to apply to graphs methods which where until recently solely reserved to numerical data. Within the shape classification framework, graphs are often produced by a skeletonization step which is sensitive to noise. We propose in this paper to integrate the robustness to structural noise by using a kernel based on a bag of path where each path is associated to a hierarchy encoding successive simplifications of the path. Several experiments prove the robustness and the flexibility of our approach compared to alternative shape classification methods.<|reference_end|> | arxiv | @article{dupé2008hierarchical,
title={Hierarchical Bag of Paths for Kernel Based Shape Classification},
author={Franc{c}ois-Xavier Dup'e (GREYC), Luc Brun (GREYC)},
journal={Joint IAPR International Workshops on Structural and Syntactic
Pattern Recognition (SSPR 2008), Orlando : \'Etats-Unis d'Am\'erique (2008)},
year={2008},
archivePrefix={arXiv},
eprint={0810.3579},
primaryClass={cs.CV}
} | dupé2008hierarchical |
arxiv-5219 | 0810.3581 | Recursive Concurrent Stochastic Games | <|reference_start|>Recursive Concurrent Stochastic Games: We study Recursive Concurrent Stochastic Games (RCSGs), extending our recent analysis of recursive simple stochastic games to a concurrent setting where the two players choose moves simultaneously and independently at each state. For multi-exit games, our earlier work already showed undecidability for basic questions like termination, thus we focus on the important case of single-exit RCSGs (1-RCSGs). We first characterize the value of a 1-RCSG termination game as the least fixed point solution of a system of nonlinear minimax functional equations, and use it to show PSPACE decidability for the quantitative termination problem. We then give a strategy improvement technique, which we use to show that player 1 (maximizer) has \epsilon-optimal randomized Stackless & Memoryless (r-SM) strategies for all \epsilon > 0, while player 2 (minimizer) has optimal r-SM strategies. Thus, such games are r-SM-determined. These results mirror and generalize in a strong sense the randomized memoryless determinacy results for finite stochastic games, and extend the classic Hoffman-Karp strategy improvement approach from the finite to an infinite state setting. The proofs in our infinite-state setting are very different however, relying on subtle analytic properties of certain power series that arise from studying 1-RCSGs. We show that our upper bounds, even for qualitative (probability 1) termination, can not be improved, even to NP, without a major breakthrough, by giving two reductions: first a P-time reduction from the long-standing square-root sum problem to the quantitative termination decision problem for finite concurrent stochastic games, and then a P-time reduction from the latter problem to the qualitative termination problem for 1-RCSGs.<|reference_end|> | arxiv | @article{etessami2008recursive,
title={Recursive Concurrent Stochastic Games},
author={Kousha Etessami and Mihalis Yannakakis},
journal={Logical Methods in Computer Science, Volume 4, Issue 4 (November
11, 2008) lmcs:1196},
year={2008},
doi={10.2168/LMCS-4(4:7)2008},
archivePrefix={arXiv},
eprint={0810.3581},
primaryClass={cs.GT cs.CC}
} | etessami2008recursive |
arxiv-5220 | 0810.3605 | A Minimum Relative Entropy Principle for Learning and Acting | <|reference_start|>A Minimum Relative Entropy Principle for Learning and Acting: This paper proposes a method to construct an adaptive agent that is universal with respect to a given class of experts, where each expert is an agent that has been designed specifically for a particular environment. This adaptive control problem is formalized as the problem of minimizing the relative entropy of the adaptive agent from the expert that is most suitable for the unknown environment. If the agent is a passive observer, then the optimal solution is the well-known Bayesian predictor. However, if the agent is active, then its past actions need to be treated as causal interventions on the I/O stream rather than normal probability conditions. Here it is shown that the solution to this new variational problem is given by a stochastic controller called the Bayesian control rule, which implements adaptive behavior as a mixture of experts. Furthermore, it is shown that under mild assumptions, the Bayesian control rule converges to the control law of the most suitable expert.<|reference_end|> | arxiv | @article{ortega2008a,
title={A Minimum Relative Entropy Principle for Learning and Acting},
author={Pedro A. Ortega, Daniel A. Braun},
journal={arXiv preprint arXiv:0810.3605},
year={2008},
archivePrefix={arXiv},
eprint={0810.3605},
primaryClass={cs.AI cs.LG}
} | ortega2008a |
arxiv-5221 | 0810.3626 | Experimental Study of Application Specific Source Coding for Wireless Sensor Networks | <|reference_start|>Experimental Study of Application Specific Source Coding for Wireless Sensor Networks: The energy bottleneck in Wireless Sensor Network(WSN) can be reduced by limiting communication overhead. Application specific source coding schemes for the sensor networks provide fewer bits to represent the same amount of information exploiting the redundancy present in the source model, network architecture and the physical process. This paper reports the performance of representative codes from various families of source coding schemes (lossless, lossy, constant bit-rate, variable bit-rate, distributed and joint encoding/decoding) in terms of energy consumed, bit-rate achieved, quantization-error/reconstruction-error, latency and complexity of encoder-decoder(codec). A reusable frame work for testing source codes is provided. Finally we propose a set of possible applications and suitable source codes in terms of these parameters.<|reference_end|> | arxiv | @article{annamalai2008experimental,
title={Experimental Study of Application Specific Source Coding for Wireless
Sensor Networks},
author={Muthiah Annamalai, Darshan Shrestha, Saibun Tjuatja},
journal={arXiv preprint arXiv:0810.3626},
year={2008},
archivePrefix={arXiv},
eprint={0810.3626},
primaryClass={cs.NI cs.DC}
} | annamalai2008experimental |
arxiv-5222 | 0810.3631 | Approximating the Gaussian Multiple Description Rate Region Under Symmetric Distortion Constraints | <|reference_start|>Approximating the Gaussian Multiple Description Rate Region Under Symmetric Distortion Constraints: We consider multiple description coding for the Gaussian source with K descriptions under the symmetric mean squared error distortion constraints, and provide an approximate characterization of the rate region. We show that the rate region can be sandwiched between two polytopes, between which the gap can be upper bounded by constants dependent on the number of descriptions, but independent of the exact distortion constraints. Underlying this result is an exact characterization of the lossless multi-level diversity source coding problem: a lossless counterpart of the MD problem. This connection provides a polytopic template for the inner and outer bounds to the rate region. In order to establish the outer bound, we generalize Ozarow's technique to introduce a strategic expansion of the original probability space by more than one random variables. For the symmetric rate case with any number of descriptions, we show that the gap between the upper bound and the lower bound for the individual description rate is no larger than 0.92 bit. The results developed in this work also suggest the "separation" approach of combining successive refinement quantization and lossless multi-level diversity coding is a competitive one, since it is only a constant away from the optimum. The results are further extended to general sources under the mean squared error distortion measure, where a similar but looser bound on the gap holds.<|reference_end|> | arxiv | @article{tian2008approximating,
title={Approximating the Gaussian Multiple Description Rate Region Under
Symmetric Distortion Constraints},
author={Chao Tian, Soheil Mohajer, and Suhas N. Diggavi},
journal={arXiv preprint arXiv:0810.3631},
year={2008},
doi={10.1109/TIT.2009.2023704},
archivePrefix={arXiv},
eprint={0810.3631},
primaryClass={cs.IT math.IT}
} | tian2008approximating |
arxiv-5223 | 0810.3641 | Rational Hadamard products via Quantum Diagonal Operators | <|reference_start|>Rational Hadamard products via Quantum Diagonal Operators: We use the remark that, through Bargmann-Fock representation, diagonal operators of the Heisenberg-Weyl algebra are scalars for the Hadamard product to give some properties (like the stability of periodic fonctions) of the Hadamard product by a rational fraction. In particular, we provide through this way explicit formulas for the multiplication table of the Hadamard product in the algebra of rational functions in $\C[[z]]$.<|reference_end|> | arxiv | @article{duchamp2008rational,
title={Rational Hadamard products via Quantum Diagonal Operators},
author={G'erard Henry Edmond Duchamp (LIPN), Silvia Goodenough (LIPN), Karol
A. Penson (LPTMC)},
journal={arXiv preprint arXiv:0810.3641},
year={2008},
archivePrefix={arXiv},
eprint={0810.3641},
primaryClass={cs.SC math-ph math.CO math.MP}
} | duchamp2008rational |
arxiv-5224 | 0810.3671 | Emergency Centre Organization and Automated Triage System | <|reference_start|>Emergency Centre Organization and Automated Triage System: The excessive rate of patients arriving at accident and emergency centres is a major problem facing South African hospitals. Patients are prioritized for medical care through a triage process. Manual systems allow for inconsistency and error. This paper proposes a novel system to automate accident and emergency centre triage and uses this triage score along with an artificial intelligence estimate of patient-doctor time to optimize the queue order. A fuzzy inference system is employed to triage patients and a similar system estimates the time but adapts continuously through fuzzy Q-learning. The optimal queue order is found using a novel procedure based on genetic algorithms. These components are integrated in a simple graphical user interface. Live tests could not be performed but simulations reveal that the average waiting time can be reduced by 48 minutes and priority is given to urgent patients<|reference_end|> | arxiv | @article{golding2008emergency,
title={Emergency Centre Organization and Automated Triage System},
author={Dan Golding, Linda Wilson and Tshilidzi Marwala},
journal={arXiv preprint arXiv:0810.3671},
year={2008},
archivePrefix={arXiv},
eprint={0810.3671},
primaryClass={cs.CY}
} | golding2008emergency |
arxiv-5225 | 0810.3695 | An Efficient Quantum Algorithm for the Hidden Subgroup Problem over Weyl-Heisenberg Groups | <|reference_start|>An Efficient Quantum Algorithm for the Hidden Subgroup Problem over Weyl-Heisenberg Groups: Many exponential speedups that have been achieved in quantum computing are obtained via hidden subgroup problems (HSPs). We show that the HSP over Weyl-Heisenberg groups can be solved efficiently on a quantum computer. These groups are well-known in physics and play an important role in the theory of quantum error-correcting codes. Our algorithm is based on non-commutative Fourier analysis of coset states which are quantum states that arise from a given black-box function. We use Clebsch-Gordan decompositions to combine and reduce tensor products of irreducible representations. Furthermore, we use a new technique of changing labels of irreducible representations to obtain low-dimensional irreducible representations in the decomposition process. A feature of the presented algorithm is that in each iteration of the algorithm the quantum computer operates on two coset states simultaneously. This is an improvement over the previously best known quantum algorithm for these groups which required four coset states.<|reference_end|> | arxiv | @article{krovi2008an,
title={An Efficient Quantum Algorithm for the Hidden Subgroup Problem over
Weyl-Heisenberg Groups},
author={Hari Krovi and Martin Roetteler},
journal={Proceedings of Mathematical Methods in Computer Science,
(MMICS'08), pp.70-88, 2008},
year={2008},
archivePrefix={arXiv},
eprint={0810.3695},
primaryClass={quant-ph cs.CC}
} | krovi2008an |
arxiv-5226 | 0810.3708 | Characterising Testing Preorders for Finite Probabilistic Processes | <|reference_start|>Characterising Testing Preorders for Finite Probabilistic Processes: In 1992 Wang & Larsen extended the may- and must preorders of De Nicola and Hennessy to processes featuring probabilistic as well as nondeterministic choice. They concluded with two problems that have remained open throughout the years, namely to find complete axiomatisations and alternative characterisations for these preorders. This paper solves both problems for finite processes with silent moves. It characterises the may preorder in terms of simulation, and the must preorder in terms of failure simulation. It also gives a characterisation of both preorders using a modal logic. Finally it axiomatises both preorders over a probabilistic version of CSP.<|reference_end|> | arxiv | @article{deng2008characterising,
title={Characterising Testing Preorders for Finite Probabilistic Processes},
author={Yuxin Deng, Matthew Hennessy, Rob van Glabbeek, Carroll Morgan},
journal={Logical Methods in Computer Science, Volume 4, Issue 4 (October
28, 2008) lmcs:694},
year={2008},
doi={10.2168/LMCS-4(4:4)2008},
archivePrefix={arXiv},
eprint={0810.3708},
primaryClass={cs.LO}
} | deng2008characterising |
arxiv-5227 | 0810.3715 | Distributed Estimation over Wireless Sensor Networks with Packet Losses | <|reference_start|>Distributed Estimation over Wireless Sensor Networks with Packet Losses: A distributed adaptive algorithm to estimate a time-varying signal, measured by a wireless sensor network, is designed and analyzed. One of the major features of the algorithm is that no central coordination among the nodes needs to be assumed. The measurements taken by the nodes of the network are affected by noise, and the communication among the nodes is subject to packet losses. Nodes exchange local estimates and measurements with neighboring nodes. Each node of the network locally computes adaptive weights that minimize the estimation error variance. Decentralized conditions on the weights, needed for the convergence of the estimation error throughout the overall network, are presented. A Lipschitz optimization problem is posed to guarantee stability and the minimization of the variance. An efficient strategy to distribute the computation of the optimal solution is investigated. A theoretical performance analysis of the distributed algorithm is carried out both in the presence of perfect and lossy links. Numerical simulations illustrate performance for various network topologies and packet loss probabilities.<|reference_end|> | arxiv | @article{fischione2008distributed,
title={Distributed Estimation over Wireless Sensor Networks with Packet Losses},
author={Carlo Fischione, Alberto Speranzon, Karl H. Johansson, Alberto
Sangiovanni-Vincentelli},
journal={arXiv preprint arXiv:0810.3715},
year={2008},
archivePrefix={arXiv},
eprint={0810.3715},
primaryClass={cs.DC}
} | fischione2008distributed |
arxiv-5228 | 0810.3729 | Optimal codes in deletion and insertion metric | <|reference_start|>Optimal codes in deletion and insertion metric: We improve the upper bound of Levenshtein for the cardinality of a code of length 4 capable of correcting single deletions over an alphabet of even size. We also illustrate that the new upper bound is sharp. Furthermore we will construct an optimal perfect code capable of correcting single deletions for the same parameters.<|reference_end|> | arxiv | @article{kim2008optimal,
title={Optimal codes in deletion and insertion metric},
author={Hyun Kwang Kim, Joon Yop Lee, Dong Yeol Oh},
journal={arXiv preprint arXiv:0810.3729},
year={2008},
archivePrefix={arXiv},
eprint={0810.3729},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | kim2008optimal |
arxiv-5229 | 0810.3776 | Design of a Fractional Order PID Controller Using Particle Swarm Optimization Technique | <|reference_start|>Design of a Fractional Order PID Controller Using Particle Swarm Optimization Technique: Particle Swarm Optimization technique offers optimal or suboptimal solution to multidimensional rough objective functions. In this paper, this optimization technique is used for designing fractional order PID controllers that give better performance than their integer order counterparts. Controller synthesis is based on required peak overshoot and rise time specifications. The characteristic equation is minimized to obtain an optimum set of controller parameters. Results show that this design method can effectively tune the parameters of the fractional order controller.<|reference_end|> | arxiv | @article{maiti2008design,
title={Design of a Fractional Order PID Controller Using Particle Swarm
Optimization Technique},
author={Deepyaman Maiti, Sagnik Biswas, Amit Konar},
journal={arXiv preprint arXiv:0810.3776},
year={2008},
archivePrefix={arXiv},
eprint={0810.3776},
primaryClass={cs.OH}
} | maiti2008design |
arxiv-5230 | 0810.3783 | Directed Transmission Method, A Fully Asynchronous approach to Solve Sparse Linear Systems in Parallel | <|reference_start|>Directed Transmission Method, A Fully Asynchronous approach to Solve Sparse Linear Systems in Parallel: In this paper, we propose a new distributed algorithm, called Directed Transmission Method (DTM). DTM is a fully asynchronous and continuous-time iterative algorithm to solve SPD sparse linear system. As an architecture-aware algorithm, DTM could be freely running on all kinds of heterogeneous parallel computer. We proved that DTM is convergent by making use of the final-value theorem of Laplacian Transformation. Numerical experiments show that DTM is stable and efficient.<|reference_end|> | arxiv | @article{wei2008directed,
title={Directed Transmission Method, A Fully Asynchronous approach to Solve
Sparse Linear Systems in Parallel},
author={Fei Wei, Huazhong Yang},
journal={arXiv preprint arXiv:0810.3783},
year={2008},
doi={10.1145/1378533.1378598},
archivePrefix={arXiv},
eprint={0810.3783},
primaryClass={math.NA cs.DC}
} | wei2008directed |
arxiv-5231 | 0810.3787 | Automorphisms of doubly-even self-dual binary codes | <|reference_start|>Automorphisms of doubly-even self-dual binary codes: The automorphism group of a binary doubly-even self-dual code is always contained in the alternating group. On the other hand, given a permutation group $G$ of degree $n$ there exists a doubly-even self-dual $G$-invariant code if and only if $n$ is a multiple of 8, every simple self-dual $\F_2G$-module occurs with even multiplicity in $\F_2^n$, and $G$ is contained in the alternating group.<|reference_end|> | arxiv | @article{guenther2008automorphisms,
title={Automorphisms of doubly-even self-dual binary codes},
author={Annika Guenther, Gabriele Nebe},
journal={arXiv preprint arXiv:0810.3787},
year={2008},
doi={10.1112/blms/bdp026},
archivePrefix={arXiv},
eprint={0810.3787},
primaryClass={math.NT cs.IT math.IT}
} | guenther2008automorphisms |
arxiv-5232 | 0810.3827 | Comments on the Boundary of the Capacity Region of Multiaccess Fading Channels | <|reference_start|>Comments on the Boundary of the Capacity Region of Multiaccess Fading Channels: A modification is proposed for the formula known from the literature that characterizes the boundary of the capacity region of Gaussian multiaccess fading channels. The modified version takes into account potentially negative arguments of the cumulated density function that would affect the accuracy of the numerical capacity results.<|reference_end|> | arxiv | @article{shaqfeh2008comments,
title={Comments on the Boundary of the Capacity Region of Multiaccess Fading
Channels},
author={Mohamed Shaqfeh, Norbert Goertz},
journal={arXiv preprint arXiv:0810.3827},
year={2008},
archivePrefix={arXiv},
eprint={0810.3827},
primaryClass={cs.IT math.IT}
} | shaqfeh2008comments |
arxiv-5233 | 0810.3828 | Quantum reinforcement learning | <|reference_start|>Quantum reinforcement learning: The key approaches for machine learning, especially learning in unknown probabilistic environments are new representations and computation mechanisms. In this paper, a novel quantum reinforcement learning (QRL) method is proposed by combining quantum theory and reinforcement learning (RL). Inspired by the state superposition principle and quantum parallelism, a framework of value updating algorithm is introduced. The state (action) in traditional RL is identified as the eigen state (eigen action) in QRL. The state (action) set can be represented with a quantum superposition state and the eigen state (eigen action) can be obtained by randomly observing the simulated quantum state according to the collapse postulate of quantum measurement. The probability of the eigen action is determined by the probability amplitude, which is parallelly updated according to rewards. Some related characteristics of QRL such as convergence, optimality and balancing between exploration and exploitation are also analyzed, which shows that this approach makes a good tradeoff between exploration and exploitation using the probability amplitude and can speed up learning through the quantum parallelism. To evaluate the performance and practicability of QRL, several simulated experiments are given and the results demonstrate the effectiveness and superiority of QRL algorithm for some complex problems. The present work is also an effective exploration on the application of quantum computation to artificial intelligence.<|reference_end|> | arxiv | @article{dong2008quantum,
title={Quantum reinforcement learning},
author={Daoyi Dong, Chunlin Chen, Hanxiong Li and Tzyh-Jong Tarn},
journal={IEEE Transactions on Systems Man and Cybernetics Part B:
Cybernetics, Vol. 38, No. 5, pp.1207-1220, 2008},
year={2008},
doi={10.1109/TSMCB.2008.925743},
archivePrefix={arXiv},
eprint={0810.3828},
primaryClass={quant-ph cs.AI cs.LG}
} | dong2008quantum |
arxiv-5234 | 0810.3836 | Best-effort Group Service in Dynamic Networks | <|reference_start|>Best-effort Group Service in Dynamic Networks: We propose a group membership service for dynamic ad hoc networks. It maintains as long as possible the existing groups and ensures that each group diameter is always smaller than a constant, fixed according to the application using the groups. The proposed protocol is self-stabilizing and works in dynamic distributed systems. Moreover, it ensures a kind of continuity in the service offer to the application while the system is converging, except if too strong topology changes happen. Such a best effort behavior allows applications to rely on the groups while the stabilization has not been reached, which is very useful in dynamic ad hoc networks.<|reference_end|> | arxiv | @article{ducourthial2008best-effort,
title={Best-effort Group Service in Dynamic Networks},
author={Bertrand Ducourthial (HEUDIASYC), Sofiane Khalfallah (HEUDIASYC),
Franck Petit (LIP6)},
journal={arXiv preprint arXiv:0810.3836},
year={2008},
archivePrefix={arXiv},
eprint={0810.3836},
primaryClass={cs.DC}
} | ducourthial2008best-effort |
arxiv-5235 | 0810.3851 | Astronomical imaging: The theory of everything | <|reference_start|>Astronomical imaging: The theory of everything: We are developing automated systems to provide homogeneous calibration meta-data for heterogeneous imaging data, using the pixel content of the image alone where necessary. Standardized and complete calibration meta-data permit generative modeling: A good model of the sky through wavelength and time--that is, a model of the positions, motions, spectra, and variability of all stellar sources, plus an intensity map of all cosmological sources--could synthesize or generate any astronomical image ever taken at any time with any equipment in any configuration. We argue that the best-fit or highest likelihood model of the data is also the best possible astronomical catalog constructed from those data. A generative model or catalog of this form is the best possible platform for automated discovery, because it is capable of identifying informative failures of the model in new data at the pixel level, or as statistical anomalies in the joint distribution of residuals from many images. It is also, in some sense, an astronomer's "theory of everything".<|reference_end|> | arxiv | @article{hogg2008astronomical,
title={Astronomical imaging: The theory of everything},
author={David W. Hogg (NYU), Dustin Lang (Toronto)},
journal={arXiv preprint arXiv:0810.3851},
year={2008},
doi={10.1063/1.3059072},
archivePrefix={arXiv},
eprint={0810.3851},
primaryClass={astro-ph cs.CV physics.data-an}
} | hogg2008astronomical |
arxiv-5236 | 0810.3865 | Relationship between Diversity and Perfomance of Multiple Classifiers for Decision Support | <|reference_start|>Relationship between Diversity and Perfomance of Multiple Classifiers for Decision Support: The paper presents the investigation and implementation of the relationship between diversity and the performance of multiple classifiers on classification accuracy. The study is critical as to build classifiers that are strong and can generalize better. The parameters of the neural network within the committee were varied to induce diversity; hence structural diversity is the focus for this study. The hidden nodes and the activation function are the parameters that were varied. The diversity measures that were adopted from ecology such as Shannon and Simpson were used to quantify diversity. Genetic algorithm is used to find the optimal ensemble by using the accuracy as the cost function. The results observed shows that there is a relationship between structural diversity and accuracy. It is observed that the classification accuracy of an ensemble increases as the diversity increases. There was an increase of 3%-6% in the classification accuracy.<|reference_end|> | arxiv | @article{musehane2008relationship,
title={Relationship between Diversity and Perfomance of Multiple Classifiers
for Decision Support},
author={R. Musehane, F. Netshiongolwe, F.V. Nelwamondo, L. Masisi and T.
Marwala},
journal={arXiv preprint arXiv:0810.3865},
year={2008},
archivePrefix={arXiv},
eprint={0810.3865},
primaryClass={cs.AI}
} | musehane2008relationship |
arxiv-5237 | 0810.3869 | Power Control in Two-Tier Femtocell Networks | <|reference_start|>Power Control in Two-Tier Femtocell Networks: In a two tier cellular network -- comprised of a central macrocell underlaid with shorter range femtocell hotspots -- cross-tier interference limits overall capacity with universal frequency reuse. To quantify near-far effects with universal frequency reuse, this paper derives a fundamental relation providing the largest feasible cellular Signal-to-Interference-Plus-Noise Ratio (SINR), given any set of feasible femtocell SINRs. We provide a link budget analysis which enables simple and accurate performance insights in a two-tier network. A distributed utility-based SINR adaptation at femtocells is proposed in order to alleviate cross-tier interference at the macrocell from cochannel femtocells. The Foschini-Miljanic (FM) algorithm is a special case of the adaptation. Each femtocell maximizes their individual utility consisting of a SINR based reward less an incurred cost (interference to the macrocell). Numerical results show greater than 30% improvement in mean femtocell SINRs relative to FM. In the event that cross-tier interference prevents a cellular user from obtaining its SINR target, an algorithm is proposed that reduces transmission powers of the strongest femtocell interferers. The algorithm ensures that a cellular user achieves its SINR target even with 100 femtocells/cell-site, and requires a worst case SINR reduction of only 16% at femtocells. These results motivate design of power control schemes requiring minimal network overhead in two-tier networks with shared spectrum.<|reference_end|> | arxiv | @article{chandrasekhar2008power,
title={Power Control in Two-Tier Femtocell Networks},
author={Vikram Chandrasekhar, Jeffrey G. Andrews, Tarik Muharemovic, Zukang
Shen and Alan Gatherer},
journal={arXiv preprint arXiv:0810.3869},
year={2008},
doi={10.1109/TWC.2009.081386},
archivePrefix={arXiv},
eprint={0810.3869},
primaryClass={cs.NI}
} | chandrasekhar2008power |
arxiv-5238 | 0810.3891 | Control Theoretic Formulation of Capacity of Dynamic Electro Magnetic Channels | <|reference_start|>Control Theoretic Formulation of Capacity of Dynamic Electro Magnetic Channels: In this paper nonhomogeneous deterministic and stochastic Maxwell equations are used to rigorously formulate the capacity of electromagnetic channels such as wave guides (cavities, coaxial cables etc). Both distributed, but localized, and Dirichlet boundary data are considered as the potential input sources. We prove the existence of a source measure, satisfying certain second order constraints (equivalent to power constraints), at which the channel capacity is attained. Further, necessary and sufficient conditions for optimality are presented.<|reference_end|> | arxiv | @article{ahmed2008control,
title={Control Theoretic Formulation of Capacity of Dynamic Electro Magnetic
Channels},
author={N.U.Ahmed, F. Rezaei and S. Loyka},
journal={arXiv preprint arXiv:0810.3891},
year={2008},
archivePrefix={arXiv},
eprint={0810.3891},
primaryClass={cs.IT math.IT}
} | ahmed2008control |
arxiv-5239 | 0810.3900 | On the Capacity and Diversity-Multiplexing Tradeoff of the Two-Way Relay Channel | <|reference_start|>On the Capacity and Diversity-Multiplexing Tradeoff of the Two-Way Relay Channel: This paper considers a multiple input multiple output (MIMO) two-way relay channel, where two nodes want to exchange data with each other using multiple relays. An iterative algorithm is proposed to achieve the optimal achievable rate region, when each relay employs an amplify and forward (AF) strategy. The iterative algorithm solves a power minimization problem at every step, subject to minimum signal-to-interference-and-noise ratio constraints, which is non-convex, however, for which the Karush Kuhn Tuker conditions are sufficient for optimality. The optimal AF strategy assumes global channel state information (CSI) at each relay. To simplify the CSI requirements, a simple amplify and forward strategy, called dual channel matching, is also proposed, that requires only local channel state information, and whose achievable rate region is close to that of the optimal AF strategy. In the asymptotic regime of large number of relays, we show that the achievable rate region of the dual channel matching and an upper bound differ by only a constant term and establish the capacity scaling law of the two-way relay channel. Relay strategies achieving optimal diversity-multiplexing tradeoff are also considered with a single relay node. A compress and forward strategy is shown to be optimal for achieving diversity multiplexing tradeoff for the full-duplex case, in general, and for the half-duplex case in some cases.<|reference_end|> | arxiv | @article{vaze2008on,
title={On the Capacity and Diversity-Multiplexing Tradeoff of the Two-Way Relay
Channel},
author={Rahul Vaze and Robert W. Heath Jr},
journal={arXiv preprint arXiv:0810.3900},
year={2008},
archivePrefix={arXiv},
eprint={0810.3900},
primaryClass={cs.IT math.IT}
} | vaze2008on |
arxiv-5240 | 0810.3935 | Modeling Spatial and Temporal Dependencies of User Mobility in Wireless Mobile Networks | <|reference_start|>Modeling Spatial and Temporal Dependencies of User Mobility in Wireless Mobile Networks: Realistic mobility models are fundamental to evaluate the performance of protocols in mobile ad hoc networks. Unfortunately, there are no mobility models that capture the non-homogeneous behaviors in both space and time commonly found in reality, while at the same time being easy to use and analyze. Motivated by this, we propose a time-variant community mobility model, referred to as the TVC model, which realistically captures spatial and temporal correlations. We devise the communities that lead to skewed location visiting preferences, and time periods that allow us to model time dependent behaviors and periodic re-appearances of nodes at specific locations. To demonstrate the power and flexibility of the TVC model, we use it to generate synthetic traces that match the characteristics of a number of qualitatively different mobility traces, including wireless LAN traces, vehicular mobility traces, and human encounter traces. More importantly, we show that, despite the high level of realism achieved, our TVC model is still theoretically tractable. To establish this, we derive a number of important quantities related to protocol performance, such as the average node degree, the hitting time, and the meeting time, and provide examples of how to utilize this theory to guide design decisions in routing protocols.<|reference_end|> | arxiv | @article{hsu2008modeling,
title={Modeling Spatial and Temporal Dependencies of User Mobility in Wireless
Mobile Networks},
author={Wei-jen Hsu, Thrasyvoulos Spyropoulos, Konstantinos Psounis, Ahmed
Helmy},
journal={arXiv preprint arXiv:0810.3935},
year={2008},
archivePrefix={arXiv},
eprint={0810.3935},
primaryClass={cs.NI}
} | hsu2008modeling |
arxiv-5241 | 0810.3990 | To which extend is the "neural code" a metric ? | <|reference_start|>To which extend is the "neural code" a metric ?: Here is proposed a review of the different choices to structure spike trains, using deterministic metrics. Temporal constraints observed in biological or computational spike trains are first taken into account. The relation with existing neural codes (rate coding, rank coding, phase coding, ..) is then discussed. To which extend the "neural code" contained in spike trains is related to a metric appears to be a key point, a generalization of the Victor-Purpura metric family being proposed for temporal constrained causal spike trains<|reference_end|> | arxiv | @article{cessac2008to,
title={To which extend is the "neural code" a metric ?},
author={Bruno Cessac, Horacio Rostro-Gonz'alez, Juan-Carlos Vasquez, Thierry
Vi'eville},
journal={arXiv preprint arXiv:0810.3990},
year={2008},
archivePrefix={arXiv},
eprint={0810.3990},
primaryClass={physics.bio-ph cs.NE physics.data-an q-bio.NC}
} | cessac2008to |
arxiv-5242 | 0810.3992 | Introducing numerical bounds to improve event-based neural network simulation | <|reference_start|>Introducing numerical bounds to improve event-based neural network simulation: Although the spike-trains in neural networks are mainly constrained by the neural dynamics itself, global temporal constraints (refractoriness, time precision, propagation delays, ..) are also to be taken into account. These constraints are revisited in this paper in order to use them in event-based simulation paradigms. We first review these constraints, and discuss their consequences at the simulation level, showing how event-based simulation of time-constrained networks can be simplified in this context: the underlying data-structures are strongly simplified, while event-based and clock-based mechanisms can be easily mixed. These ideas are applied to punctual conductance-based generalized integrate-and-fire neural networks simulation, while spike-response model simulations are also revisited within this framework. As an outcome, a fast minimal complementary alternative with respect to existing simulation event-based methods, with the possibility to simulate interesting neuron models is implemented and experimented.<|reference_end|> | arxiv | @article{cessac2008introducing,
title={Introducing numerical bounds to improve event-based neural network
simulation},
author={Bruno Cessac, Olivier Rochel, Thierry Vi'eville},
journal={arXiv preprint arXiv:0810.3992},
year={2008},
archivePrefix={arXiv},
eprint={0810.3992},
primaryClass={nlin.AO cs.NE nlin.CD q-bio.NC}
} | cessac2008introducing |
arxiv-5243 | 0810.4000 | Le trading algorithmique | <|reference_start|>Le trading algorithmique: The algorithmic trading comes from digitalisation of the processing of trading assets on financial markets. Since 1980 the computerization of the stock market offers real time processing of financial information. This technological revolution has offered processes and mathematic methods to identify best return on transactions. Current research relates to autonomous transaction systems programmed in certain periods and some algorithms. This offers return opportunities where traders can not intervene. There are about thirty algorithms to assist the traders, the best known are the VWAP, the TWAP, TVOL. The algorithms offer the latest strategies and decision-making are the subject of much research. These advances in modeling decision-making autonomous agent can envisage a rich future for these technologies, the players already in use for more than 30% of their trading.<|reference_end|> | arxiv | @article{lebreton2008le,
title={Le trading algorithmique},
author={Victor Lebreton (CES)},
journal={arXiv preprint arXiv:0810.4000},
year={2008},
archivePrefix={arXiv},
eprint={0810.4000},
primaryClass={q-fin.TR cs.GL}
} | lebreton2008le |
arxiv-5244 | 0810.4002 | A new distance for high level RNA secondary structure comparison | <|reference_start|>A new distance for high level RNA secondary structure comparison: We describe an algorithm for comparing two RNA secondary structures coded in the form of trees that introduces two new operations, called node fusion and edge fusion, besides the tree edit operations of deletion, insertion, and relabeling classically used in the literature. This allows us to address some serious limitations of the more traditional tree edit operations when the trees represent RNAs and what is searched for is a common structural core of two RNAs. Although the algorithm complexity has an exponential term, this term depends only on the number of successive fusions that may be applied to a same node, not on the total number of fusions. The algorithm remains therefore efficient in practice and is used for illustrative purposes on ribosomal as well as on other types of RNAs.<|reference_end|> | arxiv | @article{allali2008a,
title={A new distance for high level RNA secondary structure comparison},
author={Julien Allali (LaBRI), Marie-France Sagot (ENS Lyon / Insa Lyon /
INRIA Grenoble Rh^one-Alpes)},
journal={IEEE/ACM Transactions on Computational Biology and Bioinformatics
2 (2005) 3--14},
year={2008},
archivePrefix={arXiv},
eprint={0810.4002},
primaryClass={cs.DS q-bio.QM}
} | allali2008a |
arxiv-5245 | 0810.4015 | On the Equation $x^2^l+1+x+a=0$ over $\mathrmGF(2^k)$ (Extended Version) | <|reference_start|>On the Equation $x^2^l+1+x+a=0$ over $\mathrmGF(2^k)$ (Extended Version): In this paper, the polynomials $P_a(x)=x^{2^l+1}+x+a$ with $a\in\mathrm{GF}(2^k)$ are studied. New criteria for the number of zeros of $P_a(x)$ in $\mathrm{GF}(2^k)$ are proved. In particular, a criterion for $P_a(x)$ to have exactly one zero in $\mathrm{GF}(2^k)$ when $\gcd(l,k)=1$ is formulated in terms of the values of permutation polynomials introduced by Dobbertin. We also study the affine polynomial $a^{2^l}x^{2^{2l}}+x^{2^l}+ax+1$ which is closely related to $P_a(x)$. In many cases, explicit expressions for calculating zeros of these polynomials are provided.<|reference_end|> | arxiv | @article{helleseth2008on,
title={On the Equation $x^{2^l+1}+x+a=0$ over $\mathrm{GF}(2^k)$ (Extended
Version)},
author={Tor Helleseth and Alexander Kholosha},
journal={arXiv preprint arXiv:0810.4015},
year={2008},
archivePrefix={arXiv},
eprint={0810.4015},
primaryClass={cs.DM}
} | helleseth2008on |
arxiv-5246 | 0810.4058 | Statistical Characterizers of Transport in a Communication Network | <|reference_start|>Statistical Characterizers of Transport in a Communication Network: We identify the statistical characterizers of congestion and decongestion for message transport in model communication lattices. These turn out to be the travel time distributions, which are Gaussian in the congested phase, and log-normal in the decongested phase. Our results are demonstrated for two dimensional lattices, such the Waxman graph, and for lattices with local clustering and geographic separations, gradient connections, as well as for a 1-d ring lattice with random assortative connections. The behavior of the distribution identifies the congested and decongested phase correctly for these distinct network topologies and decongestion strategies. The waiting time distributions of the systems also show identical signatures of the congested and decongested phases.<|reference_end|> | arxiv | @article{mukherjee2008statistical,
title={Statistical Characterizers of Transport in a Communication Network},
author={Satyam Mukherjee, Neelima Gupte and Gautam Mukherjee},
journal={arXiv preprint arXiv:0810.4058},
year={2008},
archivePrefix={arXiv},
eprint={0810.4058},
primaryClass={physics.soc-ph cond-mat.stat-mech cs.NI}
} | mukherjee2008statistical |
arxiv-5247 | 0810.4059 | Network Coding-based Protection Strategies Against a Single Link Failure in Optical Networks | <|reference_start|>Network Coding-based Protection Strategies Against a Single Link Failure in Optical Networks: In this paper we develop network protection strategies against a single link failure in optical networks. The motivation behind this work is the fact that $%70$ of all available links in an optical network suffers from a single link failure. In the proposed protection strategies, denoted NPS-I and NPS-II, we deploy network coding and reduced capacity on the working paths to provide a backup protection path that will carry encoded data from all sources. In addition, we provide implementation aspects and how to deploy the proposed strategies in case of an optical network with $n$ disjoint working paths.<|reference_end|> | arxiv | @article{aly2008network,
title={Network Coding-based Protection Strategies Against a Single Link Failure
in Optical Networks},
author={Salah A. Aly, Ahmed E. Kamal},
journal={Proc. of IEEE ICCES '08, Cairo, EG, 2008},
year={2008},
doi={10.1109/ICCES.2008.4773006},
archivePrefix={arXiv},
eprint={0810.4059},
primaryClass={cs.IT cs.NI math.IT}
} | aly2008network |
arxiv-5248 | 0810.4061 | Locally computable approximations for spectral clustering and absorption times of random walks | <|reference_start|>Locally computable approximations for spectral clustering and absorption times of random walks: We address the problem of determining a natural local neighbourhood or "cluster" associated to a given seed vertex in an undirected graph. We formulate the task in terms of absorption times of random walks from other vertices to the vertex of interest, and observe that these times are well approximated by the components of the principal eigenvector of the corresponding fundamental matrix of the graph's adjacency matrix. We further present a locally computable gradient-descent method to estimate this Dirichlet-Fiedler vector, based on minimising the respective Rayleigh quotient. Experimental evaluation shows that the approximations behave well and yield well-defined local clusters.<|reference_end|> | arxiv | @article{orponen2008locally,
title={Locally computable approximations for spectral clustering and absorption
times of random walks},
author={Pekka Orponen, Satu Elisa Schaeffer, Vanesa Avalos Gayt'an},
journal={arXiv preprint arXiv:0810.4061},
year={2008},
archivePrefix={arXiv},
eprint={0810.4061},
primaryClass={cs.DM cs.DS}
} | orponen2008locally |
arxiv-5249 | 0810.4112 | Sums of residues on algebraic surfaces and application to coding theory | <|reference_start|>Sums of residues on algebraic surfaces and application to coding theory: In this paper, we study residues of differential 2-forms on a smooth algebraic surface over an arbitrary field and give several statements about sums of residues. Afterwards, using these results we construct algebraic-geometric codes which are an extension to surfaces of the well-known differential codes on curves. We also study some properties of these codes and extend to them some known properties for codes on curves.<|reference_end|> | arxiv | @article{couvreur2008sums,
title={Sums of residues on algebraic surfaces and application to coding theory},
author={Alain Couvreur},
journal={Journal of Pure and Applied Algebra, vol 213 number 12, pages
2201-2223, 2009},
year={2008},
archivePrefix={arXiv},
eprint={0810.4112},
primaryClass={math.AG cs.IT math.IT}
} | couvreur2008sums |
arxiv-5250 | 0810.4171 | Capacity of Steganographic Channels | <|reference_start|>Capacity of Steganographic Channels: This work investigates a central problem in steganography, that is: How much data can safely be hidden without being detected? To answer this question, a formal definition of steganographic capacity is presented. Once this has been defined, a general formula for the capacity is developed. The formula is applicable to a very broad spectrum of channels due to the use of an information-spectrum approach. This approach allows for the analysis of arbitrary steganalyzers as well as non-stationary, non-ergodic encoder and attack channels. After the general formula is presented, various simplifications are applied to gain insight into example hiding and detection methodologies. Finally, the context and applications of the work are summarized in a general discussion.<|reference_end|> | arxiv | @article{harmsen2008capacity,
title={Capacity of Steganographic Channels},
author={Jeremiah J. Harmsen, William A. Pearlman},
journal={arXiv preprint arXiv:0810.4171},
year={2008},
archivePrefix={arXiv},
eprint={0810.4171},
primaryClass={cs.CR cs.IT math.IT}
} | harmsen2008capacity |
arxiv-5251 | 0810.4182 | Bucketing Coding and Information Theory for the Statistical High Dimensional Nearest Neighbor Problem | <|reference_start|>Bucketing Coding and Information Theory for the Statistical High Dimensional Nearest Neighbor Problem: Consider the problem of finding high dimensional approximate nearest neighbors, where the data is generated by some known probabilistic model. We will investigate a large natural class of algorithms which we call bucketing codes. We will define bucketing information, prove that it bounds the performance of all bucketing codes, and that the bucketing information bound can be asymptotically attained by randomly constructed bucketing codes. For example suppose we have n Bernoulli(1/2) very long (length d-->infinity) sequences of bits. Let n-2m sequences be completely independent, while the remaining 2m sequences are composed of m independent pairs. The interdependence within each pair is that their bits agree with probability 1/2<p<=1. It is well known how to find most pairs with high probability by performing order of n^{\log_{2}2/p} comparisons. We will see that order of n^{1/p+\epsilon} comparisons suffice, for any \epsilon>0. Moreover if one sequence out of each pair belongs to a a known set of n^{(2p-1)^{2}-\epsilon} sequences, than pairing can be done using order n comparisons!<|reference_end|> | arxiv | @article{dubiner2008bucketing,
title={Bucketing Coding and Information Theory for the Statistical High
Dimensional Nearest Neighbor Problem},
author={Moshe Dubiner},
journal={arXiv preprint arXiv:0810.4182},
year={2008},
doi={10.1109/TIT.2010.2050814},
archivePrefix={arXiv},
eprint={0810.4182},
primaryClass={cs.IT math.IT}
} | dubiner2008bucketing |
arxiv-5252 | 0810.4187 | Bicycle cycles and mobility patterns - Exploring and characterizing data from a community bicycle program | <|reference_start|>Bicycle cycles and mobility patterns - Exploring and characterizing data from a community bicycle program: This paper provides an analysis of human mobility data in an urban area using the amount of available bikes in the stations of the community bicycle program Bicing in Barcelona. The data was obtained by periodic mining of a KML-file accessible through the Bicing website. Although in principle very noisy, after some preprocessing and filtering steps the data allows to detect temporal patterns in mobility as well as identify residential, university, business and leisure areas of the city. The results lead to a proposal for an improvement of the bicing website, including a prediction of the number of available bikes in a certain station within the next minutes/hours. Furthermore a model for identifying the most probable routes between stations is briefly sketched.<|reference_end|> | arxiv | @article{kaltenbrunner2008bicycle,
title={Bicycle cycles and mobility patterns - Exploring and characterizing data
from a community bicycle program},
author={Andreas Kaltenbrunner, Rodrigo Meza, Jens Grivolla, Joan Codina,
Rafael Banchs},
journal={arXiv preprint arXiv:0810.4187},
year={2008},
archivePrefix={arXiv},
eprint={0810.4187},
primaryClass={cs.CY cs.HC}
} | kaltenbrunner2008bicycle |
arxiv-5253 | 0810.4188 | A Heterogeneous High Dimensional Approximate Nearest Neighbor Algorithm | <|reference_start|>A Heterogeneous High Dimensional Approximate Nearest Neighbor Algorithm: We consider the problem of finding high dimensional approximate nearest neighbors. Suppose there are d independent rare features, each having its own independent statistics. A point x will have x_{i}=0 denote the absence of feature i, and x_{i}=1 its existence. Sparsity means that usually x_{i}=0. Distance between points is a variant of the Hamming distance. Dimensional reduction converts the sparse heterogeneous problem into a lower dimensional full homogeneous problem. However we will see that the converted problem can be much harder to solve than the original problem. Instead we suggest a direct approach. It consists of T tries. In try t we rearrange the coordinates in decreasing order of (1-r_{t,i})\frac{p_{i,11}}{p_{i,01}+p_{i,10}} \ln\frac{1}{p_{i,1*}} where 0<r_{t,i}<1 are uniform pseudo-random numbers, and the p's are the coordinate's statistical parameters. The points are lexicographically ordered, and each is compared to its neighbors in that order. We analyze a generalization of this algorithm, show that it is optimal in some class of algorithms, and estimate the necessary number of tries to success. It is governed by an information like function, which we call bucketing forest information. Any doubts whether it is "information" are dispelled by another paper, where unrestricted bucketing information is defined.<|reference_end|> | arxiv | @article{dubiner2008a,
title={A Heterogeneous High Dimensional Approximate Nearest Neighbor Algorithm},
author={Moshe Dubiner},
journal={arXiv preprint arXiv:0810.4188},
year={2008},
archivePrefix={arXiv},
eprint={0810.4188},
primaryClass={cs.IT math.IT}
} | dubiner2008a |
arxiv-5254 | 0810.4196 | Interval Semantics for Standard Floating-Point Arithmetic | <|reference_start|>Interval Semantics for Standard Floating-Point Arithmetic: If the non-zero finite floating-point numbers are interpreted as point intervals, then the effect of rounding can be interpreted as computing one of the bounds of the result according to interval arithmetic. We give an interval interpretation for the signed zeros and infinities, so that the undefined operations 0*inf, inf - inf, inf/inf, and 0/0 become defined. In this way no operation remains that gives rise to an error condition. Mathematically questionable features of the floating-point standard become well-defined sets of reals. Interval semantics provides a basis for the verification of numerical algorithms. We derive the results of the newly defined operations and consider the implications for hardware implementation.<|reference_end|> | arxiv | @article{edmonson2008interval,
title={Interval Semantics for Standard Floating-Point Arithmetic},
author={W.W. Edmonson and M.H. van Emden},
journal={arXiv preprint arXiv:0810.4196},
year={2008},
number={DCS-323-IR},
archivePrefix={arXiv},
eprint={0810.4196},
primaryClass={cs.NA cs.AR}
} | edmonson2008interval |
arxiv-5255 | 0810.4201 | Interchanging Interactive 3-d Graphics for Astronomy | <|reference_start|>Interchanging Interactive 3-d Graphics for Astronomy: We demonstrate how interactive, three-dimensional (3-d) scientific visualizations can be efficiently interchanged between a variety of mediums. Through the use of an appropriate interchange format, and a unified interaction interface, we minimize the effort to produce visualizations appropriate for undertaking knowledge discovery at the astronomer's desktop, as part of conference presentations, in digital publications or as Web content. We use examples from cosmological visualization to address some of the issues of interchange, and to describe our approach to adapting S2PLOT desktop visualizations to the Web. Supporting demonstrations are available at http://astronomy.swin.edu.au/s2plot/interchange/<|reference_end|> | arxiv | @article{fluke2008interchanging,
title={Interchanging Interactive 3-d Graphics for Astronomy},
author={C. J. Fluke, D. G. Barnes and N. T. Jones},
journal={arXiv preprint arXiv:0810.4201},
year={2008},
doi={10.1071/AS08025},
archivePrefix={arXiv},
eprint={0810.4201},
primaryClass={astro-ph cs.GR}
} | fluke2008interchanging |
arxiv-5256 | 0810.4249 | Ogden's Lemma for Regular Tree Languages | <|reference_start|>Ogden's Lemma for Regular Tree Languages: We motivate and prove a strong pumping lemma for regular tree languages. The new lemma can be seen as the natural correspondent of Ogden's lemma for context-free string languages.<|reference_end|> | arxiv | @article{kuhlmann2008ogden's,
title={Ogden's Lemma for Regular Tree Languages},
author={Marco Kuhlmann},
journal={arXiv preprint arXiv:0810.4249},
year={2008},
archivePrefix={arXiv},
eprint={0810.4249},
primaryClass={cs.CC}
} | kuhlmann2008ogden's |
arxiv-5257 | 0810.4341 | Entropy of Hidden Markov Processes via Cycle Expansion | <|reference_start|>Entropy of Hidden Markov Processes via Cycle Expansion: Hidden Markov Processes (HMP) is one of the basic tools of the modern probabilistic modeling. The characterization of their entropy remains however an open problem. Here the entropy of HMP is calculated via the cycle expansion of the zeta-function, a method adopted from the theory of dynamical systems. For a class of HMP this method produces exact results both for the entropy and the moment-generating function. The latter allows to estimate, via the Chernoff bound, the probabilities of large deviations for the HMP. More generally, the method offers a representation of the moment-generating function and of the entropy via convergent series.<|reference_end|> | arxiv | @article{allahverdyan2008entropy,
title={Entropy of Hidden Markov Processes via Cycle Expansion},
author={Armen E. Allahverdyan},
journal={arXiv preprint arXiv:0810.4341},
year={2008},
doi={10.1007/s10955-008-9613-0},
archivePrefix={arXiv},
eprint={0810.4341},
primaryClass={cs.IT cond-mat.other math.IT physics.data-an}
} | allahverdyan2008entropy |
arxiv-5258 | 0810.4366 | Resource Allocation and Relay Selection for Collaborative Communications | <|reference_start|>Resource Allocation and Relay Selection for Collaborative Communications: We investigate the relay selection problem for a decode and forward collaborative network. Users are able to collaborate; decode messages of each other, re-encode and forward along with their own messages. We study the performance obtained from collaboration in terms of 1) increasing the achievable rate, 2) saving the transmit energy and 3) reducing the resource requirement (resource means time-bandwidth). To ensure fairness, we fix the transmit-energy-to-rate ratio among all users. We allocate resource optimally for the collaborative protocol (CP), and compare the result with the non-collaborative protocol (NCP) where users transmits their messages directly. The collaboration gain is a function of the channel gain and available energies and allows us 1) to decide to collaborate or not, 2) to select one relay among the possible relay users, and 3) to determine the involved gain and loss of possible collaboration. A considerable gain can be obtained if the direct source-destination channel gain is significantly smaller than those of alternative involved links. We demonstrate that a rate and energy improvement of up to $(1+\sqrt[\eta]{\frac{k}{k+1}})^\eta$ can be obtained, where $\eta$ is the environment path loss exponent and $k$ is the ratio of the rates of involved users. The gain is maximum for low transmit-energy-to-received-noise-ratio (TERN) and in a high TERN environment the NCP is preferred.<|reference_end|> | arxiv | @article{astaneh2008resource,
title={Resource Allocation and Relay Selection for Collaborative Communications},
author={Saeed Akhavan Astaneh, Saeed Gazor},
journal={arXiv preprint arXiv:0810.4366},
year={2008},
archivePrefix={arXiv},
eprint={0810.4366},
primaryClass={cs.IT math.IT}
} | astaneh2008resource |
arxiv-5259 | 0810.4401 | Efficient Exact Inference in Planar Ising Models | <|reference_start|>Efficient Exact Inference in Planar Ising Models: We give polynomial-time algorithms for the exact computation of lowest-energy (ground) states, worst margin violators, log partition functions, and marginal edge probabilities in certain binary undirected graphical models. Our approach provides an interesting alternative to the well-known graph cut paradigm in that it does not impose any submodularity constraints; instead we require planarity to establish a correspondence with perfect matchings (dimer coverings) in an expanded dual graph. We implement a unified framework while delegating complex but well-understood subproblems (planar embedding, maximum-weight perfect matching) to established algorithms for which efficient implementations are freely available. Unlike graph cut methods, we can perform penalized maximum-likelihood as well as maximum-margin parameter estimation in the associated conditional random fields (CRFs), and employ marginal posterior probabilities as well as maximum a posteriori (MAP) states for prediction. Maximum-margin CRF parameter estimation on image denoising and segmentation problems shows our approach to be efficient and effective. A C++ implementation is available from http://nic.schraudolph.org/isinf/<|reference_end|> | arxiv | @article{schraudolph2008efficient,
title={Efficient Exact Inference in Planar Ising Models},
author={Nicol N. Schraudolph and Dmitry Kamenetsky},
journal={arXiv preprint arXiv:0810.4401},
year={2008},
archivePrefix={arXiv},
eprint={0810.4401},
primaryClass={cs.LG cs.CV stat.ML}
} | schraudolph2008efficient |
arxiv-5260 | 0810.4404 | Non binary LDPC codes over the binary erasure channel: density evolution analysis | <|reference_start|>Non binary LDPC codes over the binary erasure channel: density evolution analysis: In this paper we present a thorough analysis of non binary LDPC codes over the binary erasure channel. First, the decoding of non binary LDPC codes is investigated. The proposed algorithm performs on-the-fly decoding, i.e. it starts decoding as soon as the first symbols are received, which generalizes the erasure decoding of binary LDPC codes. Next, we evaluate the asymptotical performance of ensembles of non binary LDPC codes, by using the density evolution method. Density evolution equations are derived by taking into consideration both the irregularity of the bipartite graph and the probability distribution of the graph edge labels. Finally, infinite-length performance of some ensembles of non binary LDPC codes for different edge label distributions are shown.<|reference_end|> | arxiv | @article{savin2008non,
title={Non binary LDPC codes over the binary erasure channel: density evolution
analysis},
author={Valentin Savin},
journal={arXiv preprint arXiv:0810.4404},
year={2008},
archivePrefix={arXiv},
eprint={0810.4404},
primaryClass={cs.IT math.IT}
} | savin2008non |
arxiv-5261 | 0810.4419 | Binding bigraphs as symmetric monoidal closed theories | <|reference_start|>Binding bigraphs as symmetric monoidal closed theories: Milner's bigraphs are a general framework for reasoning about distributed and concurrent programming languages. Notably, it has been designed to encompass both the pi-calculus and the Ambient calculus. This paper is only concerned with bigraphical syntax: given what we here call a bigraphical signature K, Milner constructs a (pre-) category of bigraphs BBig(K), whose main features are (1) the presence of relative pushouts (RPOs), which makes them well-behaved w.r.t. bisimulations, and that (2) the so-called structural equations become equalities. Examples of the latter include, e.g., in pi and Ambient, renaming of bound variables, associativity and commutativity of parallel composition, or scope extrusion for restricted names. Also, bigraphs follow a scoping discipline ensuring that, roughly, bound variables never escape their scope. Here, we reconstruct bigraphs using a standard categorical tool: symmetric monoidal closed (SMC) theories. Our theory enforces the same scoping discipline as bigraphs, as a direct property of SMC structure. Furthermore, it elucidates the slightly mysterious status of so-called links in bigraphs. Finally, our category is also considerably larger than the category of bigraphs, notably encompassing in the same framework terms and a flexible form of higher-order contexts.<|reference_end|> | arxiv | @article{hirschowitz2008binding,
title={Binding bigraphs as symmetric monoidal closed theories},
author={Tom Hirschowitz (LAMA), Aur'elien Pardon (LIP)},
journal={arXiv preprint arXiv:0810.4419},
year={2008},
archivePrefix={arXiv},
eprint={0810.4419},
primaryClass={cs.LO cs.PL}
} | hirschowitz2008binding |
arxiv-5262 | 0810.4420 | Graphical Presentations of Symmetric Monoidal Closed Theories | <|reference_start|>Graphical Presentations of Symmetric Monoidal Closed Theories: We define a notion of symmetric monoidal closed (SMC) theory, consisting of a SMC signature augmented with equations, and describe the classifying categories of such theories in terms of proof nets.<|reference_end|> | arxiv | @article{garner2008graphical,
title={Graphical Presentations of Symmetric Monoidal Closed Theories},
author={Richard Garner, Tom Hirschowitz (LAMA), Aur'elien Pardon (LIP)},
journal={arXiv preprint arXiv:0810.4420},
year={2008},
archivePrefix={arXiv},
eprint={0810.4420},
primaryClass={cs.LO math.CT}
} | garner2008graphical |
arxiv-5263 | 0810.4423 | Efficient Algorithmic Techniques for Several Multidimensional Geometric Data Management and Analysis Problems | <|reference_start|>Efficient Algorithmic Techniques for Several Multidimensional Geometric Data Management and Analysis Problems: In this paper I present several novel, efficient, algorithmic techniques for solving some multidimensional geometric data management and analysis problems. The techniques are based on several data structures from computational geometry (e.g. segment tree and range tree) and on the well-known sweep-line method.<|reference_end|> | arxiv | @article{andreica2008efficient,
title={Efficient Algorithmic Techniques for Several Multidimensional Geometric
Data Management and Analysis Problems},
author={Mugurel Ionut Andreica},
journal={arXiv preprint arXiv:0810.4423},
year={2008},
archivePrefix={arXiv},
eprint={0810.4423},
primaryClass={cs.CG cs.DM cs.DS}
} | andreica2008efficient |
arxiv-5264 | 0810.4426 | Camera distortion self-calibration using the plumb-line constraint and minimal Hough entropy | <|reference_start|>Camera distortion self-calibration using the plumb-line constraint and minimal Hough entropy: In this paper we present a simple and robust method for self-correction of camera distortion using single images of scenes which contain straight lines. Since the most common distortion can be modelled as radial distortion, we illustrate the method using the Harris radial distortion model, but the method is applicable to any distortion model. The method is based on transforming the edgels of the distorted image to a 1-D angular Hough space, and optimizing the distortion correction parameters which minimize the entropy of the corresponding normalized histogram. Properly corrected imagery will have fewer curved lines, and therefore less spread in Hough space. Since the method does not rely on any image structure beyond the existence of edgels sharing some common orientations and does not use edge fitting, it is applicable to a wide variety of image types. For instance, it can be applied equally well to images of texture with weak but dominant orientations, or images with strong vanishing points. Finally, the method is performed on both synthetic and real data revealing that it is particularly robust to noise.<|reference_end|> | arxiv | @article{rosten2008camera,
title={Camera distortion self-calibration using the plumb-line constraint and
minimal Hough entropy},
author={Edward Rosten, Rohan Loveland},
journal={arXiv preprint arXiv:0810.4426},
year={2008},
doi={10.1007/s00138-009-0196-9},
number={08-2665},
archivePrefix={arXiv},
eprint={0810.4426},
primaryClass={cs.CV}
} | rosten2008camera |
arxiv-5265 | 0810.4431 | An Eye Tracking Study into the Effects of Graph Layout | <|reference_start|>An Eye Tracking Study into the Effects of Graph Layout: Graphs are typically visualized as node-link diagrams. Although there is a fair amount of research focusing on crossing minimization to improve readability, little attention has been paid on how to handle crossings when they are an essential part of the final visualizations. This requires us to understand how people read graphs and how crossings affect reading performance. As an initial step to this end, a preliminary eye tracking experiment was conducted. The specific purpose of this experiment was to test the effects of crossing angles and geometric-path tendency on eye movements and performance. Sixteen subjects performed both path search and node locating tasks with six drawings. The results showed that small angles can slow down and trigger extra eye movements, causing delays for path search tasks, whereas crossings have little impact on node locating tasks. Geometric-path tendency indicates that a path between two nodes can become harder to follow when many branches of the path go toward the target node. The insights obtained are discussed with a view to further confirmation in future work.<|reference_end|> | arxiv | @article{huang2008an,
title={An Eye Tracking Study into the Effects of Graph Layout},
author={Weidong Huang},
journal={arXiv preprint arXiv:0810.4431},
year={2008},
archivePrefix={arXiv},
eprint={0810.4431},
primaryClass={cs.HC}
} | huang2008an |
arxiv-5266 | 0810.4440 | Randomization Adaptive Self-Stabilization | <|reference_start|>Randomization Adaptive Self-Stabilization: We present a scheme to convert self-stabilizing algorithms that use randomization during and following convergence to self-stabilizing algorithms that use randomization only during convergence. We thus reduce the number of random bits from an infinite number to a bounded number. The scheme is applicable to the cases in which there exits a local predicate for each node, such that global consistency is implied by the union of the local predicates. We demonstrate our scheme over the token circulation algorithm of Herman and the recent constant time Byzantine self-stabilizing clock synchronization algorithm by Ben-Or, Dolev and Hoch. The application of our scheme results in the first constant time Byzantine self-stabilizing clock synchronization algorithm that uses a bounded number of random bits.<|reference_end|> | arxiv | @article{dolev2008randomization,
title={Randomization Adaptive Self-Stabilization},
author={Shlomi Dolev and Nir Tzachar},
journal={arXiv preprint arXiv:0810.4440},
year={2008},
doi={10.1007/978-3-642-05118-0_57},
archivePrefix={arXiv},
eprint={0810.4440},
primaryClass={cs.DC}
} | dolev2008randomization |
arxiv-5267 | 0810.4442 | Message passing resource allocation for the uplink of multicarrier systems | <|reference_start|>Message passing resource allocation for the uplink of multicarrier systems: We propose a novel distributed resource allocation scheme for the up-link of a cellular multi-carrier system based on the message passing (MP) algorithm. In the proposed approach each transmitter iteratively sends and receives information messages to/from the base station with the goal of achieving an optimal resource allocation strategy. The exchanged messages are the solution of small distributed allocation problems. To reduce the computational load, the MP problems at the terminals follow a dynamic programming formulation. The advantage of the proposed scheme is that it distributes the computational effort among all the transmitters in the cell and it does not require the presence of a central controller that takes all the decisions. Numerical results show that the proposed approach is an excellent solution to the resource allocation problem for cellular multi-carrier systems.<|reference_end|> | arxiv | @article{abrardo2008message,
title={Message passing resource allocation for the uplink of multicarrier
systems},
author={Andrea Abrardo, Paolo Detti, Marco Moretti},
journal={arXiv preprint arXiv:0810.4442},
year={2008},
archivePrefix={arXiv},
eprint={0810.4442},
primaryClass={cs.IT math.IT}
} | abrardo2008message |
arxiv-5268 | 0810.4451 | The Mob core language and abstract machine (rev 02) | <|reference_start|>The Mob core language and abstract machine (rev 02): Most current mobile agent systems are based on programming languages whose semantics are difficult to prove correct as they lack an adequate underlying formal theory. In recent years, the development of the theory of concurrent systems, namely of process calculi, has allowed for the first time the modeling of mobile agent systems.Languages directly based on process calculi are, however, very low-level and it is desirable to provide the programmer with higher level abstractions, while keeping the semantics of the base calculus. In this technical report we present the syntax and the semantics of a scripting language for programming mobile agents called Mob. We describe the language's syntax and semantics. Mob is service-oriented, meaning that agents act both as servers and as clients of services and that this coupling is done dynamically at run-time. The language is implemented on top of a process calculus which allows us to prove that the framework is sound by encoding its semantics into the underlying calculus. This provides a form of language security not available to other mobile agent languages developed using a more ah-doc approach.<|reference_end|> | arxiv | @article{paulino2008the,
title={The Mob core language and abstract machine (rev 0.2)},
author={Herve Paulino and Luis Lopes},
journal={arXiv preprint arXiv:0810.4451},
year={2008},
archivePrefix={arXiv},
eprint={0810.4451},
primaryClass={cs.PL cs.DC}
} | paulino2008the |
arxiv-5269 | 0810.4460 | Logics for XML | <|reference_start|>Logics for XML: This thesis describes the theoretical and practical foundations of a system for the static analysis of XML processing languages. The system relies on a fixpoint temporal logic with converse, derived from the mu-calculus, where models are finite trees. This calculus is expressive enough to capture regular tree types along with multi-directional navigation in trees, while having a single exponential time complexity. Specifically the decidability of the logic is proved in time 2^O(n) where n is the size of the input formula. Major XML concepts are linearly translated into the logic: XPath navigation and node selection semantics, and regular tree languages (which include DTDs and XML Schemas). Based on these embeddings, several problems of major importance in XML applications are reduced to satisfiability of the logic. These problems include XPath containment, emptiness, equivalence, overlap, coverage, in the presence or absence of regular tree type constraints, and the static type-checking of an annotated query. The focus is then given to a sound and complete algorithm for deciding the logic, along with a detailed complexity analysis, and crucial implementation techniques for building an effective solver. Practical experiments using a full implementation of the system are presented. The system appears to be efficient in practice for several realistic scenarios. The main application of this work is a new class of static analyzers for programming languages using both XPath expressions and XML type annotations (input and output). Such analyzers allow to ensure at compile-time valuable properties such as type-safety and optimizations, for safer and more efficient XML processing.<|reference_end|> | arxiv | @article{geneves2008logics,
title={Logics for XML},
author={Pierre Geneves},
journal={arXiv preprint arXiv:0810.4460},
year={2008},
archivePrefix={arXiv},
eprint={0810.4460},
primaryClass={cs.PL cs.DB cs.LO}
} | geneves2008logics |
arxiv-5270 | 0810.4576 | New Constructions for Query-Efficient Locally Decodable Codes of Subexponential Length | <|reference_start|>New Constructions for Query-Efficient Locally Decodable Codes of Subexponential Length: A $(k,\delta,\epsilon)$-locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$ is an error-correcting code that encodes each message $\vec{x}=(x_{1},x_{2},...,x_{n}) \in F_{q}^{n}$ to $C(\vec{x}) \in F_{q}^{N}$ and has the following property: For any $\vec{y} \in {\bf F}_{q}^{N}$ such that $d(\vec{y},C(\vec{x})) \leq \delta N$ and each $1 \leq i \leq n$, the symbol $x_{i}$ of $\vec{x}$ can be recovered with probability at least $1-\epsilon$ by a randomized decoding algorithm looking only at $k$ coordinates of $\vec{y}$. The efficiency of a $(k,\delta,\epsilon)$-locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$ is measured by the code length $N$ and the number $k$ of queries. For any $k$-query locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$, the code length $N$ is conjectured to be exponential of $n$, however, this was disproved. Yekhanin [In Proc. of STOC, 2007] showed that there exists a 3-query locally decodable code $C: F_{2}^{n} \to F_{2}^{N}$ such that $N=\exp(n^{(1/\log \log n)})$ assuming that the number of Mersenne primes is infinite. For a 3-query locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$, Efremenko [ECCC Report No.69, 2008] reduced the code length further to $N=\exp(n^{O((\log \log n/ \log n)^{1/2})})$, and also showed that for any integer $r>1$, there exists a $k$-query locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$ such that $k \leq 2^{r}$ and $N=\exp(n^{O((\log \log n/ \log n)^{1-1/r})})$. In this paper, we present a query-efficient locally decodable code and show that for any integer $r>1$, there exists a $k$-query locally decodable code $C: F_{q}^{n} \to F_{q}^{N}$ such that $k \leq 3 \cdot 2^{r-2}$ and $N=\exp(n^{O((\log \log n/ \log n)^{1-1/r})})$.<|reference_end|> | arxiv | @article{itoh2008new,
title={New Constructions for Query-Efficient Locally Decodable Codes of
Subexponential Length},
author={Toshiya Itoh and Yasuhiro Suzuki},
journal={IEICE Trans. on Inf. and Syst. E93-D(2), pp.263-270, 2010},
year={2008},
doi={10.1587/transinf.E93.D.263},
archivePrefix={arXiv},
eprint={0810.4576},
primaryClass={cs.CC cs.CR}
} | itoh2008new |
arxiv-5271 | 0810.4611 | Learning Isometric Separation Maps | <|reference_start|>Learning Isometric Separation Maps: Maximum Variance Unfolding (MVU) and its variants have been very successful in embedding data-manifolds in lower dimensional spaces, often revealing the true intrinsic dimension. In this paper we show how to also incorporate supervised class information into an MVU-like method without breaking its convexity. We call this method the Isometric Separation Map and we show that the resulting kernel matrix can be used as a binary/multiclass Support Vector Machine-like method in a semi-supervised (transductive) framework. We also show that the method always finds a kernel matrix that linearly separates the training data exactly without projecting them in infinite dimensional spaces. In traditional SVMs we choose a kernel and hope that the data become linearly separable in the kernel space. In this paper we show how the hyperplane can be chosen ad-hoc and the kernel is trained so that data are always linearly separable. Comparisons with Large Margin SVMs show comparable performance.<|reference_end|> | arxiv | @article{vasiloglou2008learning,
title={Learning Isometric Separation Maps},
author={Nikolaos Vasiloglou, Alexander G. Gray, David V. Anderson},
journal={arXiv preprint arXiv:0810.4611},
year={2008},
archivePrefix={arXiv},
eprint={0810.4611},
primaryClass={cs.LG}
} | vasiloglou2008learning |
arxiv-5272 | 0810.4616 | Assembling Actor-based Mind-Maps from Text Stream | <|reference_start|>Assembling Actor-based Mind-Maps from Text Stream: For human beings, the processing of text streams of unknown size leads generally to problems because e.g. noise must be selected out, information be tested for its relevance or redundancy, and linguistic phenomenon like ambiguity or the resolution of pronouns be advanced. Putting this into simulation by using an artificial mind-map is a challenge, which offers the gate for a wide field of applications like automatic text summarization or punctual retrieval. In this work we present a framework that is a first step towards an automatic intellect. It aims at assembling a mind-map based on incoming text streams and on a subject-verb-object strategy, having the verb as an interconnection between the adjacent nouns. The mind-map's performance is enriched by a pronoun resolution engine that bases on the work of D. Klein, and C. D. Manning.<|reference_end|> | arxiv | @article{brucks2008assembling,
title={Assembling Actor-based Mind-Maps from Text Stream},
author={Claudine Brucks, Christoph Schommer},
journal={Summary of the Master Thesis "Actor-based Mind-map learning from
Text Streams". Dept. of Computer Science and Communication, University of
Luxembourg, 2008},
year={2008},
archivePrefix={arXiv},
eprint={0810.4616},
primaryClass={cs.CL cs.DL}
} | brucks2008assembling |
arxiv-5273 | 0810.4617 | Graph-based classification of multiple observation sets | <|reference_start|>Graph-based classification of multiple observation sets: We consider the problem of classification of an object given multiple observations that possibly include different transformations. The possible transformations of the object generally span a low-dimensional manifold in the original signal space. We propose to take advantage of this manifold structure for the effective classification of the object represented by the observation set. In particular, we design a low complexity solution that is able to exploit the properties of the data manifolds with a graph-based algorithm. Hence, we formulate the computation of the unknown label matrix as a smoothing process on the manifold under the constraint that all observations represent an object of one single class. It results into a discrete optimization problem, which can be solved by an efficient and low complexity algorithm. We demonstrate the performance of the proposed graph-based algorithm in the classification of sets of multiple images. Moreover, we show its high potential in video-based face recognition, where it outperforms state-of-the-art solutions that fall short of exploiting the manifold structure of the face image data sets.<|reference_end|> | arxiv | @article{kokiopoulou2008graph-based,
title={Graph-based classification of multiple observation sets},
author={Effrosyni Kokiopoulou and Pascal Frossard},
journal={arXiv preprint arXiv:0810.4617},
year={2008},
archivePrefix={arXiv},
eprint={0810.4617},
primaryClass={cs.CV}
} | kokiopoulou2008graph-based |
arxiv-5274 | 0810.4657 | Cooperative Strategies for the Half-Duplex Gaussian Parallel Relay Channel: Simultaneous Relaying versus Successive Relaying | <|reference_start|>Cooperative Strategies for the Half-Duplex Gaussian Parallel Relay Channel: Simultaneous Relaying versus Successive Relaying: This study investigates the problem of communication for a network composed of two half-duplex parallel relays with additive white Gaussian noise. Two protocols, i.e., \emph{Simultaneous} and \emph{Successive} relaying, associated with two possible relay orderings are proposed. The simultaneous relaying protocol is based on \emph{Dynamic Decode and Forward (DDF)} scheme. For the successive relaying protocol: (i) a \emph{Non-Cooperative} scheme based on the \emph{Dirty Paper Coding (DPC)}, and (ii) a \emph{Cooperative} scheme based on the \emph{Block Markov Encoding (BME)} are considered. Furthermore, the composite scheme of employing BME at one relay and DPC at another always achieves a better rate when compared to the \emph{Cooperative} scheme. A \emph{"Simultaneous-Successive Relaying based on Dirty paper coding scheme" (SSRD)} is also proposed. The optimum ordering of the relays and hence the capacity of the half-duplex Gaussian parallel relay channel in the low and high signal-to-noise ratio (SNR) scenarios is derived. In the low SNR scenario, it is revealed that under certain conditions for the channel coefficients, the ratio of the achievable rate of the simultaneous relaying based on DDF to the cut-set bound tends to be 1. On the other hand, as SNR goes to infinity, it is proved that successive relaying, based on the DPC, asymptotically achieves the capacity of the network.<|reference_end|> | arxiv | @article{rezaei2008cooperative,
title={Cooperative Strategies for the Half-Duplex Gaussian Parallel Relay
Channel: Simultaneous Relaying versus Successive Relaying},
author={Seyed Saeed Changiz Rezaei, Shahab Oveis Gharan, and Amir K. Khandani},
journal={arXiv preprint arXiv:0810.4657},
year={2008},
doi={10.1109/ALLERTON.2008.4797712},
archivePrefix={arXiv},
eprint={0810.4657},
primaryClass={cs.IT math.IT}
} | rezaei2008cooperative |
arxiv-5275 | 0810.4658 | Indexability of Restless Bandit Problems and Optimality of Whittle's Index for Dynamic Multichannel Access | <|reference_start|>Indexability of Restless Bandit Problems and Optimality of Whittle's Index for Dynamic Multichannel Access: We consider a class of restless multi-armed bandit problems (RMBP) that arises in dynamic multichannel access, user/server scheduling, and optimal activation in multi-agent systems. For this class of RMBP, we establish the indexability and obtain Whittle's index in closed-form for both discounted and average reward criteria. These results lead to a direct implementation of Whittle's index policy with remarkably low complexity. When these Markov chains are stochastically identical, we show that Whittle's index policy is optimal under certain conditions. Furthermore, it has a semi-universal structure that obviates the need to know the Markov transition probabilities. The optimality and the semi-universal structure result from the equivalency between Whittle's index policy and the myopic policy established in this work. For non-identical channels, we develop efficient algorithms for computing a performance upper bound given by Lagrangian relaxation. The tightness of the upper bound and the near-optimal performance of Whittle's index policy are illustrated with simulation examples.<|reference_end|> | arxiv | @article{liu2008indexability,
title={Indexability of Restless Bandit Problems and Optimality of Whittle's
Index for Dynamic Multichannel Access},
author={Keqin Liu, Qing Zhao},
journal={arXiv preprint arXiv:0810.4658},
year={2008},
archivePrefix={arXiv},
eprint={0810.4658},
primaryClass={cs.IT math.IT}
} | liu2008indexability |
arxiv-5276 | 0810.4668 | On Granular Knowledge Structures | <|reference_start|>On Granular Knowledge Structures: Knowledge plays a central role in human and artificial intelligence. One of the key characteristics of knowledge is its structured organization. Knowledge can be and should be presented in multiple levels and multiple views to meet people's needs in different levels of granularities and from different perspectives. In this paper, we stand on the view point of granular computing and provide our understanding on multi-level and multi-view of knowledge through granular knowledge structures (GKS). Representation of granular knowledge structures, operations for building granular knowledge structures and how to use them are investigated. As an illustration, we provide some examples through results from an analysis of proceeding papers. Results show that granular knowledge structures could help users get better understanding of the knowledge source from set theoretical, logical and visual point of views. One may consider using them to meet specific needs or solve certain kinds of problems.<|reference_end|> | arxiv | @article{zeng2008on,
title={On Granular Knowledge Structures},
author={Yi Zeng, Ning Zhong},
journal={arXiv preprint arXiv:0810.4668},
year={2008},
archivePrefix={arXiv},
eprint={0810.4668},
primaryClass={cs.AI cs.DL}
} | zeng2008on |
arxiv-5277 | 0810.4711 | A topological chaos framework for hash functions | <|reference_start|>A topological chaos framework for hash functions: This paper presents a new procedure of generating hash functions which can be evaluated using some mathematical tools. This procedure is based on discrete chaotic iterations. First, it is mathematically proven, that these discrete chaotic iterations can be considered as a \linebreak particular case of topological chaos. Then, the process of generating hash function based on the \linebreak topological chaos is detailed. Finally it is shown how some tools coming from the domain of \linebreak topological chaos can be used to measure quantitatively and qualitatively some desirable properties for hash functions. An illustration example is detailed in order to show how one can create hash functions using our theoretical study. Key-words : Discrete chaotic iterations. Topological chaos. Hash function<|reference_end|> | arxiv | @article{bahi2008a,
title={A topological chaos framework for hash functions},
author={Jacques M. Bahi and Christophe Guyeux},
journal={arXiv preprint arXiv:0810.4711},
year={2008},
archivePrefix={arXiv},
eprint={0810.4711},
primaryClass={cs.CR}
} | bahi2008a |
arxiv-5278 | 0810.4713 | A watermarking algorithm satisfying topological chaos properties | <|reference_start|>A watermarking algorithm satisfying topological chaos properties: A new watermarking algorithm is given, it is based on the so-called chaotic iterations and on the choice of some coefficients which are deduced from the description of the carrier medium. After defining these coefficients, chaotic discrete iterations are used to encrypt the watermark and to embed it in the carrier medium. This procedure generates a topological chaos and ensures that the required properties of a watermarking algorithm are satisfied. Key-words: Watermarking, Encryption, Chaotic iterations, Topological chaos, Information hiding<|reference_end|> | arxiv | @article{bahi2008a,
title={A watermarking algorithm satisfying topological chaos properties},
author={Jacques M. Bahi and Christophe Guyeux},
journal={arXiv preprint arXiv:0810.4713},
year={2008},
archivePrefix={arXiv},
eprint={0810.4713},
primaryClass={cs.CR}
} | bahi2008a |
arxiv-5279 | 0810.4727 | Robust Estimation of Mean Values | <|reference_start|>Robust Estimation of Mean Values: In this paper, we develop a computational approach for estimating the mean value of a quantity in the presence of uncertainty. We demonstrate that, under some mild assumptions, the upper and lower bounds of the mean value are efficiently computable via a sample reuse technique, of which the computational complexity is shown to posses a Poisson distribution.<|reference_end|> | arxiv | @article{chen2008robust,
title={Robust Estimation of Mean Values},
author={Xinjia Chen},
journal={arXiv preprint arXiv:0810.4727},
year={2008},
archivePrefix={arXiv},
eprint={0810.4727},
primaryClass={math.ST cs.SY math.PR stat.CO stat.TH}
} | chen2008robust |
arxiv-5280 | 0810.4741 | On the Capacity and Generalized Degrees of Freedom of the X Channel | <|reference_start|>On the Capacity and Generalized Degrees of Freedom of the X Channel: We explore the capacity and generalized degrees of freedom of the two-user Gaussian X channel, i.e. a generalization of the 2 user interference channel where there is an independent message from each transmitter to each receiver. There are three main results in this paper. First, we characterize the sum capacity of the deterministic X channel model under a symmetric setting. Second, we characterize the generalized degrees of freedom of the Gaussian X channel under a similar symmetric model. Third, we extend the noisy interference capacity characterization previously obtained for the interference channel to the X channel. Specifically, we show that the X channel associated with noisy (very weak) interference channel has the same sum capacity as the noisy interference channel.<|reference_end|> | arxiv | @article{huang2008on,
title={On the Capacity and Generalized Degrees of Freedom of the X Channel},
author={Chiachi Huang, Viveck R. Cadambe, and Syed A. Jafar},
journal={arXiv preprint arXiv:0810.4741},
year={2008},
archivePrefix={arXiv},
eprint={0810.4741},
primaryClass={cs.IT math.IT}
} | huang2008on |
arxiv-5281 | 0810.4796 | Kernel(s) for Problems With no Kernel: On Out-Trees With Many Leaves | <|reference_start|>Kernel(s) for Problems With no Kernel: On Out-Trees With Many Leaves: The {\sc $k$-Leaf Out-Branching} problem is to find an out-branching (i.e. a rooted oriented spanning tree) with at least $k$ leaves in a given digraph. The problem has recently received much attention from the viewpoint of parameterized algorithms {alonLNCS4596,AlonFGKS07fsttcs,BoDo2,KnLaRo}. In this paper we step aside and take a kernelization based approach to the {\sc $k$-Leaf-Out-Branching} problem. We give the first polynomial kernel for {\sc Rooted $k$-Leaf-Out-Branching}, a variant of {\sc $k$-Leaf-Out-Branching} where the root of the tree searched for is also a part of the input. Our kernel has cubic size and is obtained using extremal combinatorics. For the {\sc $k$-Leaf-Out-Branching} problem we show that no polynomial kernel is possible unless polynomial hierarchy collapses to third level %$PH=\Sigma_p^3$ by applying a recent breakthrough result by Bodlaender et al. {BDFH08} in a non-trivial fashion. However our positive results for {\sc Rooted $k$-Leaf-Out-Branching} immediately imply that the seemingly intractable the {\sc $k$-Leaf-Out-Branching} problem admits a data reduction to $n$ independent $O(k^3)$ kernels. These two results, tractability and intractability side by side, are the first separating {\it many-to-one kernelization} from {\it Turing kernelization}. This answers affirmatively an open problem regarding "cheat kernelization" raised in {IWPECOPEN08}.<|reference_end|> | arxiv | @article{fernau2008kernel(s),
title={Kernel(s) for Problems With no Kernel: On Out-Trees With Many Leaves},
author={Henning Fernau, Fedor V. Fomin, Daniel Lokshtanov, Daniel Raible,
Saket Saurabh, Yngve Villanger},
journal={arXiv preprint arXiv:0810.4796},
year={2008},
archivePrefix={arXiv},
eprint={0810.4796},
primaryClass={cs.DS cs.CC}
} | fernau2008kernel(s) |
arxiv-5282 | 0810.4809 | XQuery Join Graph Isolation | <|reference_start|>XQuery Join Graph Isolation: A purely relational account of the true XQuery semantics can turn any relational database system into an XQuery processor. Compiling nested expressions of the fully compositional XQuery language, however, yields odd algebraic plan shapes featuring scattered distributions of join operators that currently overwhelm commercial SQL query optimizers. This work rewrites such plans before submission to the relational database back-end. Once cast into the shape of join graphs, we have found off-the-shelf relational query optimizers--the B-tree indexing subsystem and join tree planner, in particular--to cope and even be autonomously capable of "reinventing" advanced processing strategies that have originally been devised specifically for the XQuery domain, e.g., XPath step reordering, axis reversal, and path stitching. Performance assessments provide evidence that relational query engines are among the most versatile and efficient XQuery processors readily available today.<|reference_end|> | arxiv | @article{grust2008xquery,
title={XQuery Join Graph Isolation},
author={T. Grust, M. Mayr, J. Rittinger},
journal={arXiv preprint arXiv:0810.4809},
year={2008},
archivePrefix={arXiv},
eprint={0810.4809},
primaryClass={cs.DB}
} | grust2008xquery |
arxiv-5283 | 0810.4812 | A constructive proof of the Lovasz Local Lemma | <|reference_start|>A constructive proof of the Lovasz Local Lemma: The Lovasz Local Lemma [EL75] is a powerful tool to prove the existence of combinatorial objects meeting a prescribed collection of criteria. The technique can directly be applied to the satisfiability problem, yielding that a k-CNF formula in which each clause has common variables with at most 2^(k-2) other clauses is always satisfiable. All hitherto known proofs of the Local Lemma are non-constructive and do thus not provide a recipe as to how a satisfying assignment to such a formula can be efficiently found. In his breakthrough paper [Bec91], Beck demonstrated that if the neighbourhood of each clause be restricted to O(2^(k/48)), a polynomial time algorithm for the search problem exists. Alon simplified and randomized his procedure and improved the bound to O(2^(k/8)) [Alo91]. Srinivasan presented in [Sri08] a variant that achieves a bound of essentially O(2^(k/4)). In [Mos08], we improved this to O(2^(k/2)). In the present paper, we give a randomized algorithm that finds a satisfying assignment to every k-CNF formula in which each clause has a neighbourhood of at most the asymptotic optimum of 2^(k-5)-1 other clauses and that runs in expected time polynomial in the size of the formula, irrespective of k. If k is considered a constant, we can also give a deterministic variant. In contrast to all previous approaches, our analysis does not anymore invoke the standard non-constructive versions of the Local Lemma and can therefore be considered an alternative, constructive proof of it.<|reference_end|> | arxiv | @article{moser2008a,
title={A constructive proof of the Lovasz Local Lemma},
author={Robin A. Moser},
journal={arXiv preprint arXiv:0810.4812},
year={2008},
archivePrefix={arXiv},
eprint={0810.4812},
primaryClass={cs.DS}
} | moser2008a |
arxiv-5284 | 0810.4840 | The Pursuit of Uniqueness: Extending Valiant-Vazirani Theorem to the Probabilistic and Quantum Settings | <|reference_start|>The Pursuit of Uniqueness: Extending Valiant-Vazirani Theorem to the Probabilistic and Quantum Settings: Valiant-Vazirani showed in 1985 [VV85] that solving NP with the promise that "yes" instances have only one witness is powerful enough to solve the entire NP class (under randomized reductions). We are interested in extending this result to the quantum setting. We prove extensions to the classes Merlin-Arthur MA and Quantum-Classical-Merlin-Arthur QCMA. Our results have implications for the complexity of approximating the ground state energy of a quantum local Hamiltonian with a unique ground state and an inverse polynomial spectral gap. We show that the estimation (to within polynomial accuracy) of the ground state energy of poly-gapped 1-D local Hamiltonians is QCMA-hard [AN02], under randomized reductions. This is in stark contrast to the case of constant gapped 1-D Hamiltonians, which is in NP [Has07]. Moreover, it shows that unless QCMA can be reduced to NP by randomized reductions, there is no classical description of the ground state of every poly-gapped local Hamiltonian that allows efficient calculation of expectation values. Finally, we discuss a few of the obstacles to the establishment of an analogous result to the class Quantum-Merlin-Arthur (QMA). In particular, we show that random projections fail to provide a polynomial gap between two witnesses.<|reference_end|> | arxiv | @article{aharonov2008the,
title={The Pursuit of Uniqueness: Extending Valiant-Vazirani Theorem to the
Probabilistic and Quantum Settings},
author={Dorit Aharonov, Michael Ben-Or, Fernando G.S.L. Brandao, Or Sattath},
journal={Quantum 6, 668 (2022)},
year={2008},
doi={10.22331/q-2022-03-17-668},
archivePrefix={arXiv},
eprint={0810.4840},
primaryClass={quant-ph cs.CC}
} | aharonov2008the |
arxiv-5285 | 0810.4884 | The adaptability of physiological systems optimizes performance: new directions in augmentation | <|reference_start|>The adaptability of physiological systems optimizes performance: new directions in augmentation: This paper contributes to the human-machine interface community in two ways: as a critique of the closed-loop AC (augmented cognition) approach, and as a way to introduce concepts from complex systems and systems physiology into the field. Of particular relevance is a comparison of the inverted-U (or Gaussian) model of optimal performance and multidimensional fitness landscape model. Hypothetical examples will be given from human physiology and learning and memory. In particular, a four-step model will be introduced that is proposed as a better means to characterize multivariate systems during behavioral processes with complex dynamics such as learning. Finally, the alternate approach presented herein is considered as a preferable design alternate in human-machine systems. It is within this context that future directions are discussed.<|reference_end|> | arxiv | @article{alicea2008the,
title={The adaptability of physiological systems optimizes performance: new
directions in augmentation},
author={Bradly Alicea},
journal={arXiv preprint arXiv:0810.4884},
year={2008},
archivePrefix={arXiv},
eprint={0810.4884},
primaryClass={cs.HC cs.NE}
} | alicea2008the |
arxiv-5286 | 0810.4904 | On Finite Bases for Weak Semantics: Failures versus Impossible Futures | <|reference_start|>On Finite Bases for Weak Semantics: Failures versus Impossible Futures: We provide a finite basis for the (in)equational theory of the process algebra BCCS modulo the weak failures preorder and equivalence. We also give positive and negative results regarding the axiomatizability of BCCS modulo weak impossible futures semantics.<|reference_end|> | arxiv | @article{chen2008on,
title={On Finite Bases for Weak Semantics: Failures versus Impossible Futures},
author={Taolue Chen, Wan Fokkink, Rob van Glabbeek},
journal={arXiv preprint arXiv:0810.4904},
year={2008},
archivePrefix={arXiv},
eprint={0810.4904},
primaryClass={cs.LO}
} | chen2008on |
arxiv-5287 | 0810.4916 | Sequential adaptive compressed sampling via Huffman codes | <|reference_start|>Sequential adaptive compressed sampling via Huffman codes: There are two main approaches in compressed sensing: the geometric approach and the combinatorial approach. In this paper we introduce an information theoretic approach and use results from the theory of Huffman codes to construct a sequence of binary sampling vectors to determine a sparse signal. Unlike other approaches, our approach is adaptive in the sense that each sampling vector depends on the previous sample. The number of measurements we need for a k-sparse vector in n-dimensional space is no more than O(k log n) and the reconstruction is O(k).<|reference_end|> | arxiv | @article{aldroubi2008sequential,
title={Sequential adaptive compressed sampling via Huffman codes},
author={Akram Aldroubi, Haichao Wang, and Kourosh Zarringhalam},
journal={arXiv preprint arXiv:0810.4916},
year={2008},
archivePrefix={arXiv},
eprint={0810.4916},
primaryClass={cs.IT math.IT}
} | aldroubi2008sequential |
arxiv-5288 | 0810.4934 | Exponential-Time Approximation of Hard Problems | <|reference_start|>Exponential-Time Approximation of Hard Problems: We study optimization problems that are neither approximable in polynomial time (at least with a constant factor) nor fixed parameter tractable, under widely believed complexity assumptions. Specifically, we focus on Maximum Independent Set, Vertex Coloring, Set Cover, and Bandwidth. In recent years, many researchers design exact exponential-time algorithms for these and other hard problems. The goal is getting the time complexity still of order $O(c^n)$, but with the constant $c$ as small as possible. In this work we extend this line of research and we investigate whether the constant $c$ can be made even smaller when one allows constant factor approximation. In fact, we describe a kind of approximation schemes -- trade-offs between approximation factor and the time complexity. We study two natural approaches. The first approach consists of designing a backtracking algorithm with a small search tree. We present one result of that kind: a $(4r-1)$-approximation of Bandwidth in time $O^*(2^{n/r})$, for any positive integer $r$. The second approach uses general transformations from exponential-time exact algorithms to approximations that are faster but still exponential-time. For example, we show that for any reduction rate $r$, one can transform any $O^*(c^n)$-time algorithm for Set Cover into a $(1+\ln r)$-approximation algorithm running in time $O^*(c^{n/r})$. We believe that results of that kind extend the applicability of exact algorithms for NP-hard problems.<|reference_end|> | arxiv | @article{cygan2008exponential-time,
title={Exponential-Time Approximation of Hard Problems},
author={Marek Cygan, Lukasz Kowalik, Marcin Pilipczuk and Mateusz Wykurz},
journal={arXiv preprint arXiv:0810.4934},
year={2008},
archivePrefix={arXiv},
eprint={0810.4934},
primaryClass={cs.DS}
} | cygan2008exponential-time |
arxiv-5289 | 0810.4946 | FPT Algorithms and Kernels for the Directed $k$-Leaf Problem | <|reference_start|>FPT Algorithms and Kernels for the Directed $k$-Leaf Problem: A subgraph $T$ of a digraph $D$ is an {\em out-branching} if $T$ is an oriented spanning tree with only one vertex of in-degree zero (called the {\em root}). The vertices of $T$ of out-degree zero are {\em leaves}. In the {\sc Directed $k$-Leaf} Problem, we are given a digraph $D$ and an integral parameter $k$, and we are to decide whether $D$ has an out-branching with at least $k$ leaves. Recently, Kneis et al. (2008) obtained an algorithm for the problem of running time $4^{k}\cdot n^{O(1)}$. We describe a new algorithm for the problem of running time $3.72^{k}\cdot n^{O(1)}$. In {\sc Rooted Directed $k$-Leaf} Problem, apart from $D$ and $k$, we are given a vertex $r$ of $D$ and we are to decide whether $D$ has an out-branching rooted at $r$ with at least $k$ leaves. Very recently, Fernau et al. (2008) found an $O(k^3)$-size kernel for {\sc Rooted Directed $k$-Leaf}. In this paper, we obtain an $O(k)$ kernel for {\sc Rooted Directed $k$-Leaf} restricted to acyclic digraphs.<|reference_end|> | arxiv | @article{daligault2008fpt,
title={FPT Algorithms and Kernels for the Directed $k$-Leaf Problem},
author={Jean Daligault, Gregory Gutin, Eun Jung Kim, Anders Yeo},
journal={arXiv preprint arXiv:0810.4946},
year={2008},
archivePrefix={arXiv},
eprint={0810.4946},
primaryClass={cs.DS cs.CC}
} | daligault2008fpt |
arxiv-5290 | 0810.4952 | Computational modelling of evolution: ecosystems and language | <|reference_start|>Computational modelling of evolution: ecosystems and language: Recently, computational modelling became a very important research tool that enables us to study problems that for decades evaded scientific analysis. Evolutionary systems are certainly examples of such problems: they are composed of many units that might reproduce, diffuse, mutate, die, or in some cases for example communicate. These processes might be of some adaptive value, they influence each other and occur on various time scales. That is why such systems are so difficult to study. In this paper we briefly review some computational approaches, as well as our contributions, to the evolution of ecosystems and language. We start from Lotka-Volterra equations and the modelling of simple two-species prey-predator systems. Such systems are canonical example for studying oscillatory behaviour in competitive populations. Then we describe various approaches to study long-term evolution of multi-species ecosystems. We emphasize the need to use models that take into account both ecological and evolutionary processes. Finally, we address the problem of the emergence and development of language. It is becoming more and more evident that any theory of language origin and development must be consistent with darwinian principles of evolution. Consequently, a number of techniques developed for modelling evolution of complex ecosystems are being applied to the problem of language. We briefly review some of these approaches.<|reference_end|> | arxiv | @article{lipowski2008computational,
title={Computational modelling of evolution: ecosystems and language},
author={Adam Lipowski and Dorota Lipowska},
journal={Series on Advances in Mathematics for Applied Sciences - vol.79
(2009)},
year={2008},
archivePrefix={arXiv},
eprint={0810.4952},
primaryClass={q-bio.PE cs.CL physics.soc-ph}
} | lipowski2008computational |
arxiv-5291 | 0810.4993 | New completely regular q-ary codes based on Kronecker products | <|reference_start|>New completely regular q-ary codes based on Kronecker products: For any integer $\rho \geq 1$ and for any prime power q, the explicit construction of a infinite family of completely regular (and completely transitive) q-ary codes with d=3 and with covering radius $\rho$ is given. The intersection array is also computed. Under the same conditions, the explicit construction of an infinite family of q-ary uniformly packed codes (in the wide sense) with covering radius $\rho$, which are not completely regular, is also given. In both constructions the Kronecker product is the basic tool that has been used.<|reference_end|> | arxiv | @article{rifa2008new,
title={New completely regular q-ary codes based on Kronecker products},
author={J. Rifa, V. A. Zinoviev},
journal={arXiv preprint arXiv:0810.4993},
year={2008},
archivePrefix={arXiv},
eprint={0810.4993},
primaryClass={cs.IT cs.DM math.CO math.IT}
} | rifa2008new |
arxiv-5292 | 0810.4998 | Automatic structures of bounded degree revisited | <|reference_start|>Automatic structures of bounded degree revisited: The first-order theory of a string automatic structure is known to be decidable, but there are examples of string automatic structures with nonelementary first-order theories. We prove that the first-order theory of a string automatic structure of bounded degree is decidable in doubly exponential space (for injective automatic presentations, this holds even uniformly). This result is shown to be optimal since we also present a string automatic structure of bounded degree whose first-order theory is hard for 2EXPSPACE. We prove similar results also for tree automatic structures. These findings close the gaps left open in a previous paper of the second author by improving both, the lower and the upper bounds.<|reference_end|> | arxiv | @article{kuske2008automatic,
title={Automatic structures of bounded degree revisited},
author={Dietrich Kuske and Markus Lohrey},
journal={arXiv preprint arXiv:0810.4998},
year={2008},
archivePrefix={arXiv},
eprint={0810.4998},
primaryClass={cs.LO cs.CC}
} | kuske2008automatic |
arxiv-5293 | 0810.5056 | P is not equal to NP | <|reference_start|>P is not equal to NP: SAT is not in P, is true and provable in a simply consistent extension B' of a first order theory B of computing, with a single finite axiom characterizing a universal Turing machine. Therefore, P is not equal to NP, is true and provable in a simply consistent extension B" of B.<|reference_end|> | arxiv | @article{tarnlund2008p,
title={P is not equal to NP},
author={Sten-Ake Tarnlund},
journal={arXiv preprint arXiv:0810.5056},
year={2008},
archivePrefix={arXiv},
eprint={0810.5056},
primaryClass={cs.CC cs.LO}
} | tarnlund2008p |
arxiv-5294 | 0810.5057 | Combining Advanced Visualization and Automatized Reasoning for Webometrics: A Test Study | <|reference_start|>Combining Advanced Visualization and Automatized Reasoning for Webometrics: A Test Study: This paper presents a first attempt at performing a precise and automatic identification of the linking behaviour in a scientific domain through the analysis of the communication of the related academic institutions on the web. The proposed approach is based on the paradigm of multiple viewpoint data analysis (MVDA) than can be fruitfully exploited to highlight relationships between data, like websites, carrying several kinds of description. It uses the MultiSOM clustering and mapping method. The domain that has been chosen for this study is the domain of Computer Science in Germany. The analysis is conduced on a set of 438 websites of this domain using all together, thematic, geographic and linking information. It highlights interesting results concerning both global and local linking behaviour.<|reference_end|> | arxiv | @article{françois2008combining,
title={Combining Advanced Visualization and Automatized Reasoning for
Webometrics: A Test Study},
author={Claire Franc{c}ois (INIST), Jean-Charles Lamirel (INRIA Lorraine -
LORIA), Shadi Al Shehabi (INRIA Lorraine - LORIA)},
journal={COLLNET 2006, France (2006)},
year={2008},
archivePrefix={arXiv},
eprint={0810.5057},
primaryClass={cs.IR cs.DL}
} | françois2008combining |
arxiv-5295 | 0810.5064 | A New Algorithm for Building Alphabetic Minimax Trees | <|reference_start|>A New Algorithm for Building Alphabetic Minimax Trees: We show how to build an alphabetic minimax tree for a sequence (W = w_1, >..., w_n) of real weights in (O (n d \log \log n)) time, where $d$ is the number of distinct integers (\lceil w_i \rceil). We apply this algorithm to building an alphabetic prefix code given a sample.<|reference_end|> | arxiv | @article{gagie2008a,
title={A New Algorithm for Building Alphabetic Minimax Trees},
author={Travis Gagie},
journal={arXiv preprint arXiv:0810.5064},
year={2008},
archivePrefix={arXiv},
eprint={0810.5064},
primaryClass={cs.IT cs.DS math.IT}
} | gagie2008a |
arxiv-5296 | 0810.5090 | Power-Bandwidth Tradeoff in Multiuser Relay Channels with Opportunistic Scheduling | <|reference_start|>Power-Bandwidth Tradeoff in Multiuser Relay Channels with Opportunistic Scheduling: The goal of this paper is to understand the key merits of multihop relaying techniques jointly in terms of their energy efficiency and spectral efficiency advantages in the presence of multiuser diversity gains from opportunistic (i.e., channel-aware) scheduling and identify the regimes and conditions in which relay-assisted multiuser communication provides a clear advantage over direct multiuser communication. For this purpose, we use Shannon-theoretic tools to analyze the tradeoff between energy efficiency and spectral efficiency (known as the power-bandwidth tradeoff) over a fading multiuser relay channel with $K$ users in the asymptotic regime of large (but finite) number of users (i.e., dense network). Benefiting from the extreme-value theoretic results of \cite{Oyman_isit07}, we characterize the power-bandwidth tradeoff and the associated energy and spectral efficiency measures of the bandwidth-limited high signal-to-noise ratio (SNR) and power-limited low SNR regimes, and utilize them in investigating the large system behavior of the multiuser relay channel as a function of the number of users and physical channel SNRs. Our analysis results in very accurate closed-form formulas in the large (but finite) $K$ regime that quantify energy and spectral efficiency performance, and provides insights on the impact of multihop relaying and multiuser diversity techniques on the power-bandwidth tradeoff.<|reference_end|> | arxiv | @article{oyman2008power-bandwidth,
title={Power-Bandwidth Tradeoff in Multiuser Relay Channels with Opportunistic
Scheduling},
author={Ozgur Oyman, Moe Z. Win},
journal={arXiv preprint arXiv:0810.5090},
year={2008},
archivePrefix={arXiv},
eprint={0810.5090},
primaryClass={cs.IT math.IT}
} | oyman2008power-bandwidth |
arxiv-5297 | 0810.5098 | Reliability Bounds for Delay-Constrained Multi-hop Networks | <|reference_start|>Reliability Bounds for Delay-Constrained Multi-hop Networks: We consider a linear multi-hop network composed of multi-state discrete-time memoryless channels over each hop, with orthogonal time-sharing across hops under a half-duplex relaying protocol. We analyze the probability of error and associated reliability function \cite{Gallager68} over the multi-hop network; with emphasis on random coding and sphere packing bounds, under the assumption of point-to-point coding over each hop. In particular, we define the system reliability function for the multi-hop network and derive lower and upper bounds on this function to specify the reliability-optimal operating conditions of the network under an end-to-end constraint on the total number of channel uses. Moreover, we apply the reliability analysis to bound the expected end-to-end latency of multi-hop communication under the support of an automatic repeat request (ARQ) protocol. Considering an additive white Gaussian noise (AWGN) channel model over each hop, we evaluate and compare these bounds to draw insights on the role of multi-hopping toward enhancing the end-to-end rate-reliability-delay tradeoff.<|reference_end|> | arxiv | @article{oyman2008reliability,
title={Reliability Bounds for Delay-Constrained Multi-hop Networks},
author={Ozgur Oyman},
journal={Proc. of 44th Annual Allerton Conference on Communication, Control
and Computing, Monticello, IL, U.S.A., Sep. 2006},
year={2008},
archivePrefix={arXiv},
eprint={0810.5098},
primaryClass={cs.IT math.IT}
} | oyman2008reliability |
arxiv-5298 | 0810.5148 | Scheduling Kalman Filters in Continuous Time | <|reference_start|>Scheduling Kalman Filters in Continuous Time: A set of N independent Gaussian linear time invariant systems is observed by M sensors whose task is to provide the best possible steady-state causal minimum mean square estimate of the state of the systems, in addition to minimizing a steady-state measurement cost. The sensors can switch between systems instantaneously, and there are additional resource constraints, for example on the number of sensors which can observe a given system simultaneously. We first derive a tractable relaxation of the problem, which provides a bound on the achievable performance. This bound can be computed by solving a convex program involving linear matrix inequalities. Exploiting the additional structure of the sites evolving independently, we can decompose this program into coupled smaller dimensional problems. In the scalar case with identical sensors, we give an analytical expression of an index policy proposed in a more general context by Whittle. In the general case, we develop open-loop periodic switching policies whose performance matches the bound arbitrarily closely.<|reference_end|> | arxiv | @article{ny2008scheduling,
title={Scheduling Kalman Filters in Continuous Time},
author={Jerome Le Ny, Eric Feron, Munther A. Dahleh},
journal={arXiv preprint arXiv:0810.5148},
year={2008},
archivePrefix={arXiv},
eprint={0810.5148},
primaryClass={math.OC cs.IT math.IT}
} | ny2008scheduling |
arxiv-5299 | 0810.5157 | Anomaly Detection in Streaming Sensor Data | <|reference_start|>Anomaly Detection in Streaming Sensor Data: In this chapter we consider a cell phone network as a set of automatically deployed sensors that records movement and interaction patterns of the population. We discuss methods for detecting anomalies in the streaming data produced by the cell phone network. We motivate this discussion by describing the Wireless Phone Based Emergency Response (WIPER) system, a proof-of-concept decision support system for emergency response managers. We also discuss some of the scientific work enabled by this type of sensor data and the related privacy issues. We describe scientific studies that use the cell phone data set and steps we have taken to ensure the security of the data. We describe the overall decision support system and discuss three methods of anomaly detection that we have applied to the data.<|reference_end|> | arxiv | @article{pawling2008anomaly,
title={Anomaly Detection in Streaming Sensor Data},
author={Alec Pawling, Ping Yan, Juli'an Candia, Tim Schoenharl, and Greg
Madey},
journal={arXiv preprint arXiv:0810.5157},
year={2008},
doi={10.4018/978-1-60566-328-9},
archivePrefix={arXiv},
eprint={0810.5157},
primaryClass={physics.data-an cs.NI physics.comp-ph}
} | pawling2008anomaly |
arxiv-5300 | 0810.5203 | Monotonic Convergence in an Information-Theoretic Law of Small Numbers | <|reference_start|>Monotonic Convergence in an Information-Theoretic Law of Small Numbers: An "entropy increasing to the maximum" result analogous to the entropic central limit theorem (Barron 1986; Artstein et al. 2004) is obtained in the discrete setting. This involves the thinning operation and a Poisson limit. Monotonic convergence in relative entropy is established for general discrete distributions, while monotonic increase of Shannon entropy is proved for the special class of ultra-log-concave distributions. Overall we extend the parallel between the information-theoretic central limit theorem and law of small numbers explored by Kontoyiannis et al. (2005) and Harremo\"es et al.\ (2007, 2008). Ingredients in the proofs include convexity, majorization, and stochastic orders.<|reference_end|> | arxiv | @article{yu2008monotonic,
title={Monotonic Convergence in an Information-Theoretic Law of Small Numbers},
author={Yaming Yu},
journal={IEEE Transactions on Information Theory 55 (2009) 5412--5422},
year={2008},
doi={10.1109/TIT.2009.2032727},
archivePrefix={arXiv},
eprint={0810.5203},
primaryClass={cs.IT math.IT math.PR}
} | yu2008monotonic |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.